Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3758 lines
106 KiB

  1. /**************************************************************************\
  2. *
  3. * Copyright (c) 2000 Microsoft Corporation
  4. *
  5. * Module Name:
  6. *
  7. * AARasterizer.cpp
  8. *
  9. * Abstract:
  10. *
  11. * Contains all the code for rasterizing the fill of a path.
  12. *
  13. * Created:
  14. *
  15. * 03/25/2000 andrewgo
  16. *
  17. \**************************************************************************/
  18. #include "precomp.hpp"
  19. #include <limits.h>
  20. #if DBG
  21. #define ASSERTACTIVELIST(list, y) AssertActiveList(list, y)
  22. #define ASSERTACTIVELISTORDER(list) AssertActiveListOrder(list)
  23. #define ASSERTINACTIVEARRAY(list, count) AssertInactiveArray(list, count)
  24. #define ASSERTPATH(path) AssertPath(path)
  25. #else
  26. #define ASSERTACTIVELIST(list, y)
  27. #define ASSERTACTIVELISTORDER(list)
  28. #define ASSERTINACTIVEARRAY(list, count)
  29. #define ASSERTPATH(path)
  30. #endif
  31. // Define our on-stack storage use. The 'free' versions are nicely tuned
  32. // to avoid allocations in most common scenarios, while at the same time
  33. // not chewing up toooo much stack space.
  34. //
  35. // We make the debug versions small so that we hit the 'grow' cases more
  36. // frequently, for better testing:
  37. #if DBG
  38. #define EDGE_STORE_STACK_NUMBER 10
  39. #define EDGE_STORE_ALLOCATION_NUMBER 11
  40. #define INACTIVE_LIST_NUMBER 12
  41. #define ENUMERATE_BUFFER_NUMBER 15
  42. #define INTERVAL_BUFFER_NUMBER 8 // Must be at least 4
  43. #define NOMINAL_FILL_POINT_NUMBER 4 // Must be at least 4
  44. #else
  45. #define EDGE_STORE_STACK_NUMBER (1600 / sizeof(EpEdge))
  46. #define EDGE_STORE_ALLOCATION_NUMBER (4032 / sizeof(EpEdge))
  47. #define INACTIVE_LIST_NUMBER EDGE_STORE_STACK_NUMBER
  48. #define ENUMERATE_BUFFER_NUMBER 32
  49. #define INTERVAL_BUFFER_NUMBER 32
  50. #define NOMINAL_FILL_POINT_NUMBER 32
  51. #endif
  52. class EpEdgeStore;
  53. // 'EpEdge' is our classic data structure for tracking an edge:
  54. struct EpEdge
  55. {
  56. EpEdge *Next; // Next active edge (don't check for NULL,
  57. // look for tail sentinel instead)
  58. INT X; // Current X location
  59. INT Dx; // X increment
  60. INT Error; // Current DDA error
  61. INT ErrorUp; // Error increment
  62. INT ErrorDown; // Error decrement when the error rolls over
  63. INT StartY; // Y-row start
  64. INT EndY; // Y-row end
  65. INT WindingDirection; // -1 or 1
  66. };
  67. // We the inactive-array separate from the edge allocations so that
  68. // we can more easily do in-place sorts on it:
  69. struct EpInactiveEdge
  70. {
  71. EpEdge *Edge; // Associated edge
  72. LONGLONG Yx; // Sorting key, StartY and X packed into an lword
  73. };
  74. // We allocate room for our edge datastructures in batches:
  75. struct EpEdgeAllocation
  76. {
  77. EpEdgeAllocation *Next; // Next allocation batch (may be NULL)
  78. INT Count;
  79. EpEdge EdgeArray[EDGE_STORE_STACK_NUMBER];
  80. };
  81. // The following is effectively the paramter list for 'InitializeEdges',
  82. // which takes a run of points and sets up the initial edge list:
  83. struct EpInitializeEdgesContext
  84. {
  85. INT MaxY; // Maximum 'y' found, should be INT_MIN on
  86. // first call to 'InitializeEdges'
  87. RECT* ClipRect; // Bounding clip rectangle in 28.4 format
  88. EpEdgeStore *Store; // Where to stick the edges
  89. BOOL IsAntialias; // The edges are going to be rendered
  90. // using antialiasing super-sampling
  91. };
  92. // Interval coverage descriptor for our antialiased filler:
  93. struct EpInterval
  94. {
  95. INT X; // Interval's left edge (Next->X is the
  96. // right edge)
  97. INT Depth; // Number of layers that this interval has
  98. // been covered
  99. EpInterval *Next; // Next interval (look for sentinel, not NULL)
  100. };
  101. // Allocator structure for the antialiased fill interval data:
  102. struct EpIntervalBuffer
  103. {
  104. EpIntervalBuffer *Next;
  105. EpInterval Interval[INTERVAL_BUFFER_NUMBER];
  106. };
  107. /**************************************************************************\
  108. *
  109. * Class Description:
  110. *
  111. * 'EpEdgeStore' is used by 'InitializeEdges' as its repository for
  112. * all the edge data:
  113. *
  114. * Created:
  115. *
  116. * 03/25/2000 andrewgo
  117. *
  118. \**************************************************************************/
  119. class EpEdgeStore
  120. {
  121. private:
  122. INT TotalCount; // Total edge count in store
  123. INT CurrentRemaining; // How much room remains in current buffer
  124. EpEdgeAllocation *CurrentBuffer;// Current buffer
  125. EpEdge *CurrentEdge; // Current edge in current buffer
  126. EpEdgeAllocation *Enumerator; // For enumerating all the edges
  127. EpEdgeAllocation EdgeHead; // Our built-in allocation
  128. public:
  129. EpEdgeStore()
  130. {
  131. TotalCount = 0;
  132. CurrentBuffer = &EdgeHead;
  133. CurrentEdge = &EdgeHead.EdgeArray[0];
  134. CurrentRemaining = EDGE_STORE_STACK_NUMBER;
  135. EdgeHead.Count = EDGE_STORE_STACK_NUMBER;
  136. EdgeHead.Next = NULL;
  137. }
  138. ~EpEdgeStore()
  139. {
  140. // Free our allocation list, skipping the head, which is not
  141. // dynamically allocated:
  142. EpEdgeAllocation *allocation = EdgeHead.Next;
  143. while (allocation != NULL)
  144. {
  145. EpEdgeAllocation *next = allocation->Next;
  146. GpFree(allocation);
  147. allocation = next;
  148. }
  149. }
  150. INT StartEnumeration()
  151. {
  152. Enumerator = &EdgeHead;
  153. // Update the count and make sure nothing more gets added (in
  154. // part because this Count would have to be re-computed):
  155. CurrentBuffer->Count -= CurrentRemaining;
  156. TotalCount += CurrentBuffer->Count;
  157. // Prevent this from being called again, because bad things would
  158. // happen:
  159. CurrentBuffer = NULL;
  160. return(TotalCount);
  161. }
  162. BOOL Enumerate(EpEdge** startEdge, EpEdge** endEdge)
  163. {
  164. EpEdgeAllocation *enumerator = Enumerator;
  165. // Might return startEdge == endEdge:
  166. *startEdge = &enumerator->EdgeArray[0];
  167. *endEdge = &enumerator->EdgeArray[Enumerator->Count];
  168. return((Enumerator = enumerator->Next) != NULL);
  169. }
  170. VOID StartAddBuffer(EpEdge **currentEdge, INT *remaining)
  171. {
  172. *currentEdge = CurrentEdge;
  173. *remaining = CurrentRemaining;
  174. }
  175. VOID EndAddBuffer(EpEdge *currentEdge, INT remaining)
  176. {
  177. CurrentEdge = currentEdge;
  178. CurrentRemaining = remaining;
  179. }
  180. BOOL NextAddBuffer(EpEdge **currentEdge, INT *remaining);
  181. };
  182. /**************************************************************************\
  183. *
  184. * Function Description:
  185. *
  186. * The edge initializer is out of room in its current 'store' buffer;
  187. * get it a new one.
  188. *
  189. * Created:
  190. *
  191. * 03/25/2000 andrewgo
  192. *
  193. \**************************************************************************/
  194. BOOL
  195. EpEdgeStore::NextAddBuffer(
  196. EpEdge **currentEdge,
  197. INT *remaining
  198. )
  199. {
  200. // The caller has completely filled up this chunk:
  201. ASSERT(*remaining == 0);
  202. // We have to grow our data structure by adding a new buffer
  203. // and adding it to the list:
  204. EpEdgeAllocation *newBuffer = static_cast<EpEdgeAllocation*>
  205. (GpMalloc(sizeof(EpEdgeAllocation) +
  206. sizeof(EpEdge) * (EDGE_STORE_ALLOCATION_NUMBER
  207. - EDGE_STORE_STACK_NUMBER)));
  208. if (newBuffer == NULL)
  209. return(FALSE);
  210. newBuffer->Next = NULL;
  211. newBuffer->Count = EDGE_STORE_ALLOCATION_NUMBER;
  212. TotalCount += CurrentBuffer->Count;
  213. CurrentBuffer->Next = newBuffer;
  214. CurrentBuffer = newBuffer;
  215. *currentEdge = CurrentEdge = &newBuffer->EdgeArray[0];
  216. *remaining = CurrentRemaining = EDGE_STORE_ALLOCATION_NUMBER;
  217. return(TRUE);
  218. }
  219. /**************************************************************************\
  220. *
  221. * Function Description:
  222. *
  223. * Some debug code for verifying the state of the active edge list.
  224. *
  225. * Created:
  226. *
  227. * 03/25/2000 andrewgo
  228. *
  229. \**************************************************************************/
  230. BOOL
  231. AssertActiveList(
  232. const EpEdge *list,
  233. INT yCurrent
  234. )
  235. {
  236. BOOL b = TRUE;
  237. INT activeCount = 0;
  238. ASSERT(list->X == INT_MIN);
  239. b &= (list->X == INT_MIN);
  240. // Skip the head sentinel:
  241. list = list->Next;
  242. while (list->X != INT_MAX)
  243. {
  244. ASSERT(list->X != INT_MIN);
  245. b &= (list->X != INT_MIN);
  246. ASSERT(list->X <= list->Next->X);
  247. b &= (list->X <= list->Next->X);
  248. ASSERT((list->StartY <= yCurrent) && (yCurrent < list->EndY));
  249. b &= ((list->StartY <= yCurrent) && (yCurrent < list->EndY));
  250. activeCount++;
  251. list = list->Next;
  252. }
  253. ASSERT(list->X == INT_MAX);
  254. b &= (list->X == INT_MAX);
  255. // There should always be a multiple of 2 edges in the active list.
  256. //
  257. // NOTE: If you hit this assert, do NOT simply comment it out!
  258. // It usually means that all the edges didn't get initialized
  259. // properly. For every scan-line, there has to be a left edge
  260. // and a right edge (or a mulitple thereof). So if you give
  261. // even a single bad edge to the edge initializer (or you miss
  262. // one), you'll probably hit this assert.
  263. ASSERT((activeCount & 1) == 0);
  264. b &= ((activeCount & 1) == 0);
  265. return(b);
  266. }
  267. /**************************************************************************\
  268. *
  269. * Function Description:
  270. *
  271. * Some debug code for verifying the state of the active edge list.
  272. *
  273. * Created:
  274. *
  275. * 03/25/2000 andrewgo
  276. *
  277. \**************************************************************************/
  278. VOID
  279. AssertActiveListOrder(
  280. const EpEdge *list
  281. )
  282. {
  283. INT activeCount = 0;
  284. ASSERT(list->X == INT_MIN);
  285. // Skip the head sentinel:
  286. list = list->Next;
  287. while (list->X != INT_MAX)
  288. {
  289. ASSERT(list->X != INT_MIN);
  290. ASSERT(list->X <= list->Next->X);
  291. activeCount++;
  292. list = list->Next;
  293. }
  294. ASSERT(list->X == INT_MAX);
  295. }
  296. /**************************************************************************\
  297. *
  298. * Class Description:
  299. *
  300. * Base class for all our fill routines.
  301. *
  302. * Created:
  303. *
  304. * 03/25/2000 andrewgo
  305. *
  306. \**************************************************************************/
  307. class EpFiller : public DpOutputSpan
  308. {
  309. public:
  310. virtual BOOL IsValid() const { return(TRUE); }
  311. };
  312. typedef VOID (FASTCALL EpFiller::*EpFillerFunction)(EpEdge *, INT);
  313. /**************************************************************************\
  314. *
  315. * Class Description:
  316. *
  317. * Antialised filler state.
  318. *
  319. * Created:
  320. *
  321. * 03/25/2000 andrewgo
  322. *
  323. \**************************************************************************/
  324. class EpAntialiasedFiller : public EpFiller
  325. {
  326. private:
  327. INT Y; // Current scan
  328. DpOutputSpan *Output;
  329. DpOutputSpan *Clipper;
  330. EpInterval *StartInterval; // Points to list head entry
  331. EpInterval *NewInterval;
  332. EpInterval *EndIntervalMinus2;
  333. EpIntervalBuffer BuiltinBuffer;
  334. EpIntervalBuffer *CurrentBuffer;
  335. public:
  336. EpAntialiasedFiller(DpOutputSpan *output)
  337. {
  338. Output = output;
  339. Clipper = this;
  340. BuiltinBuffer.Interval[0].X = INT_MIN;
  341. BuiltinBuffer.Interval[0].Depth = 0;
  342. BuiltinBuffer.Interval[0].Next = &BuiltinBuffer.Interval[1];
  343. BuiltinBuffer.Interval[1].X = INT_MAX;
  344. BuiltinBuffer.Interval[1].Depth = 0xdeadbeef;
  345. BuiltinBuffer.Interval[1].Next = NULL;
  346. BuiltinBuffer.Next = NULL;
  347. CurrentBuffer = &BuiltinBuffer;
  348. StartInterval = &BuiltinBuffer.Interval[0];
  349. NewInterval = &BuiltinBuffer.Interval[2];
  350. EndIntervalMinus2 = &BuiltinBuffer.Interval[INTERVAL_BUFFER_NUMBER - 2];
  351. }
  352. ~EpAntialiasedFiller()
  353. {
  354. GenerateOutputAndClearCoverage(Y);
  355. // Free the linked-list of allocations (skipping 'BuiltinBuffer',
  356. // which is built into the class):
  357. EpIntervalBuffer *buffer = BuiltinBuffer.Next;
  358. while (buffer != NULL)
  359. {
  360. EpIntervalBuffer *nextBuffer = buffer->Next;
  361. GpFree(buffer);
  362. buffer = nextBuffer;
  363. }
  364. }
  365. VOID SetClipper(DpOutputSpan *clipper)
  366. {
  367. Clipper = clipper;
  368. }
  369. VOID FASTCALL FillEdgesAlternate(const EpEdge *active, INT yCurrent);
  370. VOID FASTCALL FillEdgesWinding(const EpEdge *active, INT yCurrent);
  371. BOOL Grow(EpInterval **newInterval, EpInterval**endIntervalMinus2);
  372. VOID GenerateOutputAndClearCoverage(INT yCurrent);
  373. virtual GpStatus OutputSpan(INT y, INT left, INT right);
  374. };
  375. /**************************************************************************\
  376. *
  377. * Function Description:
  378. *
  379. * Grow our interval buffer.
  380. *
  381. * Created:
  382. *
  383. * 03/25/2000 andrewgo
  384. *
  385. \**************************************************************************/
  386. BOOL
  387. EpAntialiasedFiller::Grow(
  388. EpInterval **newInterval,
  389. EpInterval **endIntervalMinus2
  390. )
  391. {
  392. EpIntervalBuffer *newBuffer = CurrentBuffer->Next;
  393. if (!newBuffer)
  394. {
  395. newBuffer = static_cast<EpIntervalBuffer*>
  396. (GpMalloc(sizeof(EpIntervalBuffer)));
  397. if (!newBuffer)
  398. return(FALSE);
  399. newBuffer->Next = NULL;
  400. CurrentBuffer->Next = newBuffer;
  401. }
  402. CurrentBuffer = newBuffer;
  403. NewInterval = &newBuffer->Interval[2];
  404. EndIntervalMinus2 = &newBuffer->Interval[INTERVAL_BUFFER_NUMBER - 2];
  405. *newInterval = NewInterval;
  406. *endIntervalMinus2 = EndIntervalMinus2;
  407. return(TRUE);
  408. }
  409. /**************************************************************************\
  410. *
  411. * Function Description:
  412. *
  413. * Given the active edge list for the current scan, do an alternate-mode
  414. * antialiased fill.
  415. *
  416. * Created:
  417. *
  418. * 03/25/2000 andrewgo
  419. *
  420. \**************************************************************************/
  421. VOID
  422. FASTCALL
  423. EpAntialiasedFiller::FillEdgesAlternate(
  424. const EpEdge *activeList,
  425. INT yCurrent
  426. )
  427. {
  428. EpInterval *interval = StartInterval;
  429. EpInterval *newInterval = NewInterval;
  430. EpInterval *endIntervalMinus2 = EndIntervalMinus2;
  431. const EpEdge *startEdge = activeList->Next;
  432. const EpEdge *endEdge;
  433. INT left;
  434. INT right;
  435. INT nextX;
  436. ASSERTACTIVELIST(activeList, yCurrent);
  437. while (startEdge->X != INT_MAX)
  438. {
  439. endEdge = startEdge->Next;
  440. // We skip empty pairs:
  441. if ((left = startEdge->X) != endEdge->X)
  442. {
  443. // We now know we have a non-empty interval. Skip any
  444. // empty interior pairs:
  445. while ((right = endEdge->X) == endEdge->Next->X)
  446. endEdge = endEdge->Next->Next;
  447. ASSERT((left < right) && (right < INT_MAX));
  448. // Make sure we have enough room to add two intervals if
  449. // necessary:
  450. if (newInterval >= endIntervalMinus2)
  451. {
  452. if (!Grow(&newInterval, &endIntervalMinus2))
  453. break; // ==============>
  454. }
  455. // Skip any intervals less than 'left':
  456. while ((nextX = interval->Next->X) < left)
  457. interval = interval->Next;
  458. // Insert a new interval if necessary:
  459. if (nextX != left)
  460. {
  461. newInterval->X = left;
  462. newInterval->Depth = interval->Depth + 1;
  463. newInterval->Next = interval->Next;
  464. interval->Next = newInterval;
  465. interval = newInterval;
  466. newInterval++;
  467. }
  468. // Increase the coverage for any intervals between 'left'
  469. // and 'right':
  470. while ((nextX = interval->Next->X) < right)
  471. {
  472. interval = interval->Next;
  473. interval->Depth++;
  474. }
  475. // Insert another new interval if necessary:
  476. if (nextX != right)
  477. {
  478. newInterval->X = right;
  479. newInterval->Depth = interval->Depth - 1;
  480. newInterval->Next = interval->Next;
  481. interval->Next = newInterval;
  482. interval = newInterval;
  483. newInterval++;
  484. }
  485. }
  486. // Prepare for the next iteration:
  487. startEdge = endEdge->Next;
  488. }
  489. NewInterval = newInterval;
  490. Y = yCurrent;
  491. // If the next scan is done, output what's there:
  492. if (((yCurrent + 1) & AA_Y_MASK) == 0)
  493. {
  494. GenerateOutputAndClearCoverage(yCurrent);
  495. }
  496. }
  497. /**************************************************************************\
  498. *
  499. * Function Description:
  500. *
  501. * Given the active edge list for the current scan, do a winding-mode
  502. * antialiased fill.
  503. *
  504. * Created:
  505. *
  506. * 03/25/2000 andrewgo
  507. *
  508. \**************************************************************************/
  509. VOID
  510. FASTCALL
  511. EpAntialiasedFiller::FillEdgesWinding(
  512. const EpEdge *activeList,
  513. INT yCurrent
  514. )
  515. {
  516. EpInterval *interval = StartInterval;
  517. EpInterval *newInterval = NewInterval;
  518. EpInterval *endIntervalMinus2 = EndIntervalMinus2;
  519. const EpEdge *startEdge = activeList->Next;
  520. const EpEdge *endEdge;
  521. INT left;
  522. INT right;
  523. INT nextX;
  524. INT windingValue;
  525. ASSERTACTIVELIST(activeList, yCurrent);
  526. while (startEdge->X != INT_MAX)
  527. {
  528. endEdge = startEdge->Next;
  529. windingValue = startEdge->WindingDirection;
  530. while ((windingValue += endEdge->WindingDirection) != 0)
  531. endEdge = endEdge->Next;
  532. ASSERT(endEdge->X != INT_MAX);
  533. // We skip empty pairs:
  534. if ((left = startEdge->X) != endEdge->X)
  535. {
  536. // We now know we have a non-empty interval. Skip any
  537. // empty interior pairs:
  538. while ((right = endEdge->X) == endEdge->Next->X)
  539. {
  540. startEdge = endEdge->Next;
  541. endEdge = startEdge->Next;
  542. windingValue = startEdge->WindingDirection;
  543. while ((windingValue += endEdge->WindingDirection) != 0)
  544. endEdge = endEdge->Next;
  545. }
  546. ASSERT((left < right) && (right < INT_MAX));
  547. // Make sure we have enough room to add two intervals if
  548. // necessary:
  549. if (newInterval >= endIntervalMinus2)
  550. {
  551. if (!Grow(&newInterval, &endIntervalMinus2))
  552. break; // ==============>
  553. }
  554. // Skip any intervals less than 'left':
  555. while ((nextX = interval->Next->X) < left)
  556. interval = interval->Next;
  557. // Insert a new interval if necessary:
  558. if (nextX != left)
  559. {
  560. newInterval->X = left;
  561. newInterval->Depth = interval->Depth + 1;
  562. newInterval->Next = interval->Next;
  563. interval->Next = newInterval;
  564. interval = newInterval;
  565. newInterval++;
  566. }
  567. // Increase the coverage for any intervals between 'left'
  568. // and 'right':
  569. while ((nextX = interval->Next->X) < right)
  570. {
  571. interval = interval->Next;
  572. interval->Depth++;
  573. }
  574. // Insert another new interval if necessary:
  575. if (nextX != right)
  576. {
  577. newInterval->X = right;
  578. newInterval->Depth = interval->Depth - 1;
  579. newInterval->Next = interval->Next;
  580. interval->Next = newInterval;
  581. interval = newInterval;
  582. newInterval++;
  583. }
  584. }
  585. // Prepare for the next iteration:
  586. startEdge = endEdge->Next;
  587. }
  588. NewInterval = newInterval;
  589. Y = yCurrent;
  590. // If the next scan is done, output what's there:
  591. if (((yCurrent + 1) & AA_Y_MASK) == 0)
  592. {
  593. GenerateOutputAndClearCoverage(yCurrent);
  594. }
  595. }
  596. /**************************************************************************\
  597. *
  598. * Function Description:
  599. *
  600. * Now that it's been clipped, produce the pixels and modify their
  601. * alpha values according to the antialiased coverage.
  602. *
  603. * Created:
  604. *
  605. * 03/17/2000 andrewgo
  606. *
  607. \**************************************************************************/
  608. GpStatus
  609. EpAntialiasedFiller::OutputSpan(
  610. INT y, // Non-scaled coordinates
  611. INT left,
  612. INT right
  613. )
  614. {
  615. ASSERT(right > left);
  616. // First ask the 'producer' to actually generate the pixels for us.
  617. // Then we need to simply whack the pixel buffer with the coverage
  618. // values we've generated.
  619. Output->OutputSpan(y, left, right);
  620. // Retrieve a pointer to the buffer that the 'producer' just wrote
  621. // the pixels to:
  622. UCHAR *buffer = reinterpret_cast<UCHAR*>
  623. (Output->GetScanBuffer()->GetCurrentBuffer());
  624. EpInterval *coverage = StartInterval;
  625. // Figure out the end of the last pixel, remembering that 'right'
  626. // is exclusive:
  627. INT scaledRight = right << AA_X_SHIFT;
  628. // Skip any intervals that might have been completely clipped out:
  629. INT pixelLeftEdge = left << AA_X_SHIFT;
  630. while (coverage->Next->X < pixelLeftEdge)
  631. coverage = coverage->Next;
  632. INT pixelRightEdge = pixelLeftEdge + AA_X_WIDTH;
  633. while (pixelLeftEdge < scaledRight)
  634. {
  635. UINT coverageValue;
  636. // Compute the coverage coming into the first pixel:
  637. if (coverage->Next->X > pixelRightEdge)
  638. {
  639. // The interval extends out the end of the pixel:
  640. coverageValue = (pixelRightEdge - max(pixelLeftEdge, coverage->X))
  641. * coverage->Depth;
  642. }
  643. else
  644. {
  645. // The interval ends in our pixel:
  646. coverageValue = (coverage->Next->X - max(pixelLeftEdge, coverage->X))
  647. * coverage->Depth;
  648. coverage = coverage->Next;
  649. // Add in any coverages for intervals contained entirely within the
  650. // pixel:
  651. while (coverage->Next->X < pixelRightEdge)
  652. {
  653. coverageValue += (coverage->Next->X - coverage->X) * coverage->Depth;
  654. coverage = coverage->Next;
  655. }
  656. // Add in the coverage for the interval going out of the pixel:
  657. coverageValue += (pixelRightEdge - max(coverage->X, pixelLeftEdge))
  658. * coverage->Depth;
  659. }
  660. // We've goofed if we get a coverage value more than is theoretically
  661. // possible, or if it's zero (in the latter case, it should have
  662. // been filtered already by our caller).
  663. ASSERT(coverageValue <= (1 << (AA_X_SHIFT + AA_Y_SHIFT)));
  664. ASSERT(coverageValue != 0);
  665. // Modify the pixel's alpha channel according to the coverage values:
  666. #if !defined(NO_PREMULTIPLIED_ALPHA)
  667. *(buffer+0) = MULTIPLY_COVERAGE(*(buffer+0), coverageValue, AA_X_SHIFT + AA_Y_SHIFT);
  668. *(buffer+1) = MULTIPLY_COVERAGE(*(buffer+1), coverageValue, AA_X_SHIFT + AA_Y_SHIFT);
  669. *(buffer+2) = MULTIPLY_COVERAGE(*(buffer+2), coverageValue, AA_X_SHIFT + AA_Y_SHIFT);
  670. #endif
  671. *(buffer+3) = MULTIPLY_COVERAGE(*(buffer+3), coverageValue, AA_X_SHIFT + AA_Y_SHIFT);
  672. buffer += 4;
  673. // Now handle the part of the current interval that completely covers
  674. // more than one pixel (if it does):
  675. UINT consecutivePixels = (min(coverage->Next->X, scaledRight)
  676. - pixelRightEdge) >> AA_X_SHIFT;
  677. UINT depth = coverage->Depth;
  678. // By definition, we shouldn't have an interval with zero coverage
  679. // (it should have been filtered out by our caller). We won't fall
  680. // over, but it would be the wrong thing to do for SrcCopy mode.
  681. ASSERT((consecutivePixels == 0) || (depth != 0));
  682. if (depth == AA_Y_HEIGHT)
  683. {
  684. // All these pixels are completely covered. Woo hoo, no work to
  685. // do!
  686. buffer += (4 * consecutivePixels);
  687. }
  688. else
  689. {
  690. // Go through the run and multiply the alpha values by the run's
  691. // coverage:
  692. UINT i = consecutivePixels;
  693. while (i-- != 0)
  694. {
  695. #if !defined(NO_PREMULTIPLIED_ALPHA)
  696. *(buffer+0) = MULTIPLY_COVERAGE(*(buffer+0), depth, AA_Y_SHIFT);
  697. *(buffer+1) = MULTIPLY_COVERAGE(*(buffer+1), depth, AA_Y_SHIFT);
  698. *(buffer+2) = MULTIPLY_COVERAGE(*(buffer+2), depth, AA_Y_SHIFT);
  699. #endif
  700. *(buffer+3) = MULTIPLY_COVERAGE(*(buffer+3), depth, AA_Y_SHIFT);
  701. buffer += 4;
  702. }
  703. }
  704. // Prepare for the next iteration through the loop:
  705. pixelLeftEdge += ((consecutivePixels + 1) << AA_X_SHIFT);
  706. pixelRightEdge += ((consecutivePixels + 1) << AA_X_SHIFT);
  707. }
  708. return(Ok);
  709. }
  710. /**************************************************************************\
  711. *
  712. * Function Description:
  713. *
  714. * Given complete interval data for a scan, find runs of touched pixels
  715. * and then call the clipper (or directly to the rendering routine if
  716. * there's no clipping).
  717. *
  718. * Created:
  719. *
  720. * 03/17/2000 andrewgo
  721. *
  722. \**************************************************************************/
  723. VOID
  724. EpAntialiasedFiller::GenerateOutputAndClearCoverage(
  725. INT yScaled
  726. )
  727. {
  728. EpInterval *spanStart = StartInterval->Next;
  729. EpInterval *spanEnd;
  730. while (spanStart->X != INT_MAX)
  731. {
  732. ASSERT(spanStart->Depth != 0);
  733. // Here we determine the length of a continuous run of covered
  734. // pixels. For the case where the user has set the mode to
  735. // SRCCOPY, it's very important that we don't accidentally pass
  736. // off as 'covered' a pixel that we later realize wasn't covered.
  737. spanEnd = spanStart->Next;
  738. while ((spanEnd->Depth != 0) ||
  739. ((spanEnd->Next->X & ~AA_X_MASK) == (spanEnd->X & ~AA_X_MASK)))
  740. {
  741. spanEnd = spanEnd->Next;
  742. }
  743. // Figure out the actual integer pixel values.
  744. INT left = spanStart->X >> AA_X_SHIFT; // inclusive
  745. INT right = (spanEnd->X + AA_X_WIDTH - 1) >> AA_X_SHIFT; // exclusive
  746. INT y = yScaled >> AA_Y_SHIFT;
  747. // If there's no clip region, this jumps to EpAntialiasedFiller::
  748. // OutputSpan:
  749. Clipper->OutputSpan(y, left, right);
  750. // Advance to after the gap:
  751. spanStart = spanEnd->Next;
  752. }
  753. // Reset our coverage structure. Point the head back to the tail,
  754. // and reset where the next new entry will be placed:
  755. BuiltinBuffer.Interval[0].Next = &BuiltinBuffer.Interval[1];
  756. CurrentBuffer = &BuiltinBuffer;
  757. NewInterval = &BuiltinBuffer.Interval[2];
  758. EndIntervalMinus2 = &BuiltinBuffer.Interval[INTERVAL_BUFFER_NUMBER - 2];
  759. }
  760. /**************************************************************************\
  761. *
  762. * Class Description:
  763. *
  764. * Aliased filler state.
  765. *
  766. * Created:
  767. *
  768. * 03/25/2000 andrewgo
  769. *
  770. \**************************************************************************/
  771. class EpAliasedFiller : public EpFiller
  772. {
  773. private:
  774. DpOutputSpan *Output;
  775. public:
  776. EpAliasedFiller(DpOutputSpan *output)
  777. {
  778. Output = output;
  779. }
  780. VOID SetOutputSpan(DpOutputSpan *output)
  781. {
  782. Output = output;
  783. }
  784. VOID FASTCALL FillEdgesAlternate(const EpEdge *active, INT yCurrent);
  785. VOID FASTCALL FillEdgesWinding(const EpEdge *active, INT yCurrent);
  786. virtual GpStatus OutputSpan(INT y, INT left, INT right) { return Ok; }
  787. };
  788. /**************************************************************************\
  789. *
  790. * Function Description:
  791. *
  792. * Given the active edge list for the current scan, do an alternate-mode
  793. * aliased fill.
  794. *
  795. * Created:
  796. *
  797. * 03/25/2000 andrewgo
  798. *
  799. \**************************************************************************/
  800. VOID
  801. FASTCALL
  802. EpAliasedFiller::FillEdgesAlternate(
  803. const EpEdge *activeList,
  804. INT yCurrent
  805. )
  806. {
  807. const EpEdge *startEdge = activeList->Next;
  808. const EpEdge *endEdge;
  809. INT left;
  810. INT right;
  811. INT nextX;
  812. ASSERTACTIVELIST(activeList, yCurrent);
  813. while (startEdge->X != INT_MAX)
  814. {
  815. endEdge = startEdge->Next;
  816. ASSERT(endEdge->X != INT_MAX);
  817. // We skip empty pairs:
  818. if ((left = startEdge->X) != endEdge->X)
  819. {
  820. // We now know we have a non-empty interval. Skip any
  821. // empty interior pairs:
  822. while ((right = endEdge->X) == endEdge->Next->X)
  823. endEdge = endEdge->Next->Next;
  824. ASSERT((left < right) && (right < INT_MAX));
  825. Output->OutputSpan(yCurrent, left, right);
  826. }
  827. // Prepare for the next iteration:
  828. startEdge = endEdge->Next;
  829. }
  830. }
  831. /**************************************************************************\
  832. *
  833. * Function Description:
  834. *
  835. * Given the active edge list for the current scan, do a winding-mode
  836. * aliased fill.
  837. *
  838. * Created:
  839. *
  840. * 03/25/2000 andrewgo
  841. *
  842. \**************************************************************************/
  843. VOID
  844. FASTCALL
  845. EpAliasedFiller::FillEdgesWinding(
  846. const EpEdge *activeList,
  847. INT yCurrent
  848. )
  849. {
  850. const EpEdge *startEdge = activeList->Next;
  851. const EpEdge *endEdge;
  852. INT left;
  853. INT right;
  854. INT nextX;
  855. INT windingValue;
  856. ASSERTACTIVELIST(activeList, yCurrent);
  857. while (startEdge->X != INT_MAX)
  858. {
  859. endEdge = startEdge->Next;
  860. windingValue = startEdge->WindingDirection;
  861. while ((windingValue += endEdge->WindingDirection) != 0)
  862. endEdge = endEdge->Next;
  863. ASSERT(endEdge->X != INT_MAX);
  864. // We skip empty pairs:
  865. if ((left = startEdge->X) != endEdge->X)
  866. {
  867. // We now know we have a non-empty interval. Skip any
  868. // empty interior pairs:
  869. while ((right = endEdge->X) == endEdge->Next->X)
  870. {
  871. startEdge = endEdge->Next;
  872. endEdge = startEdge->Next;
  873. windingValue = startEdge->WindingDirection;
  874. while ((windingValue += endEdge->WindingDirection) != 0)
  875. endEdge = endEdge->Next;
  876. }
  877. ASSERT((left < right) && (right < INT_MAX));
  878. Output->OutputSpan(yCurrent, left, right);
  879. }
  880. // Prepare for the next iteration:
  881. startEdge = endEdge->Next;
  882. }
  883. }
  884. #ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
  885. // GDI flattens using an error of 2/3
  886. // Flatten to an error of 2/3. During initial phase, use 18.14 format.
  887. #define TEST_MAGNITUDE_INITIAL (6 * 0x00002aa0L)
  888. // Error of 2/3. During normal phase, use 15.17 format.
  889. #define TEST_MAGNITUDE_NORMAL (TEST_MAGNITUDE_INITIAL << 3)
  890. #else
  891. // Use a higher flattening tolerance. Turns out that 2/3 produces very
  892. // noticable artifacts on antialiased lines.
  893. // Flatten to an error of 1/4. During initial phase, use 18.14 format.
  894. #define TEST_MAGNITUDE_INITIAL (6 * 0x00001000L)
  895. // Error of 1/4. During normal phase, use 15.17 format.
  896. #define TEST_MAGNITUDE_NORMAL (TEST_MAGNITUDE_INITIAL << 3)
  897. #endif
  898. /**********************************Class***********************************\
  899. * class HfdBasis32
  900. *
  901. * Class for HFD vector objects.
  902. *
  903. * Public Interface:
  904. *
  905. * vInit(p1, p2, p3, p4) - Re-parameterizes the given control points
  906. * to our initial HFD error basis.
  907. * vLazyHalveStepSize(cShift) - Does a lazy shift. Caller has to remember
  908. * it changes 'cShift' by 2.
  909. * vSteadyState(cShift) - Re-parameterizes to our working normal
  910. * error basis.
  911. *
  912. * vTakeStep() - Forward steps to next sub-curve
  913. * vHalveStepSize() - Adjusts down (subdivides) the sub-curve
  914. * vDoubleStepSize() - Adjusts up the sub-curve
  915. * lError() - Returns error if current sub-curve were
  916. * to be approximated using a straight line
  917. * (value is actually multiplied by 6)
  918. * fxValue() - Returns rounded coordinate of first point in
  919. * current sub-curve. Must be in steady
  920. * state.
  921. *
  922. * History:
  923. * 10-Nov-1990 -by- J. Andrew Goossen [andrewgo]
  924. * Wrote it.
  925. \**************************************************************************/
  926. // The code is actually smaller when these methods are forced inline;
  927. // this is one of the rare cases where 'forceinline' is warranted:
  928. #define INLINE __forceinline
  929. class HfdBasis32
  930. {
  931. private:
  932. LONG e0;
  933. LONG e1;
  934. LONG e2;
  935. LONG e3;
  936. public:
  937. INLINE LONG lParentErrorDividedBy4()
  938. {
  939. return(max(abs(e3), abs(e2 + e2 - e3)));
  940. }
  941. INLINE LONG lError()
  942. {
  943. return(max(abs(e2), abs(e3)));
  944. }
  945. INLINE INT fxValue()
  946. {
  947. return((e0 + (1L << 12)) >> 13);
  948. }
  949. INLINE VOID vInit(INT p1, INT p2, INT p3, INT p4)
  950. {
  951. // Change basis and convert from 28.4 to 18.14 format:
  952. e0 = (p1 ) << 10;
  953. e1 = (p4 - p1 ) << 10;
  954. e2 = (3 * (p2 - p3 - p3 + p4)) << 11;
  955. e3 = (3 * (p1 - p2 - p2 + p3)) << 11;
  956. }
  957. INLINE VOID vLazyHalveStepSize(LONG cShift)
  958. {
  959. e2 = (e2 + e3) >> 1;
  960. e1 = (e1 - (e2 >> cShift)) >> 1;
  961. }
  962. INLINE VOID vSteadyState(LONG cShift)
  963. {
  964. // We now convert from 18.14 fixed format to 15.17:
  965. e0 <<= 3;
  966. e1 <<= 3;
  967. register LONG lShift = cShift - 3;
  968. if (lShift < 0)
  969. {
  970. lShift = -lShift;
  971. e2 <<= lShift;
  972. e3 <<= lShift;
  973. }
  974. else
  975. {
  976. e2 >>= lShift;
  977. e3 >>= lShift;
  978. }
  979. }
  980. INLINE VOID vHalveStepSize()
  981. {
  982. e2 = (e2 + e3) >> 3;
  983. e1 = (e1 - e2) >> 1;
  984. e3 >>= 2;
  985. }
  986. INLINE VOID vDoubleStepSize()
  987. {
  988. e1 += e1 + e2;
  989. e3 <<= 2;
  990. e2 = (e2 << 3) - e3;
  991. }
  992. INLINE VOID vTakeStep()
  993. {
  994. e0 += e1;
  995. register LONG lTemp = e2;
  996. e1 += lTemp;
  997. e2 += lTemp - e3;
  998. e3 = lTemp;
  999. }
  1000. };
  1001. /**********************************Class***********************************\
  1002. * class Bezier32
  1003. *
  1004. * Bezier cracker.
  1005. *
  1006. * A hybrid cubic Bezier curve flattener based on KirkO's error factor.
  1007. * Generates line segments fast without using the stack. Used to flatten
  1008. * a path.
  1009. *
  1010. * For an understanding of the methods used, see:
  1011. *
  1012. * Kirk Olynyk, "..."
  1013. * Goossen and Olynyk, "System and Method of Hybrid Forward
  1014. * Differencing to Render Bezier Splines"
  1015. * Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
  1016. * Rendering Curves and Surfaces", Computer Graphics, July 1987
  1017. * Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
  1018. * Differencing", Computer Graphics, August 1988
  1019. * Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
  1020. *
  1021. * Public Interface:
  1022. *
  1023. * vInit(pptfx) - pptfx points to 4 control points of
  1024. * Bezier. Current point is set to the first
  1025. * point after the start-point.
  1026. * Bezier32(pptfx) - Constructor with initialization.
  1027. * vGetCurrent(pptfx) - Returns current polyline point.
  1028. * bCurrentIsEndPoint() - TRUE if current point is end-point.
  1029. * vNext() - Moves to next polyline point.
  1030. *
  1031. * History:
  1032. * 1-Oct-1991 -by- J. Andrew Goossen [andrewgo]
  1033. * Wrote it.
  1034. \**************************************************************************/
  1035. class Bezier32
  1036. {
  1037. private:
  1038. LONG cSteps;
  1039. HfdBasis32 x;
  1040. HfdBasis32 y;
  1041. RECT rcfxBound;
  1042. public:
  1043. BOOL bInit(const POINT* aptfx, const RECT*);
  1044. INT cFlatten(POINT* pptfx, INT cptfx, BOOL *pbMore);
  1045. };
  1046. #define FRACTION64 28
  1047. class HfdBasis64
  1048. {
  1049. private:
  1050. LONGLONG e0;
  1051. LONGLONG e1;
  1052. LONGLONG e2;
  1053. LONGLONG e3;
  1054. public:
  1055. VOID vInit(INT p1, INT p2, INT p3, INT p4);
  1056. VOID vHalveStepSize();
  1057. VOID vDoubleStepSize();
  1058. VOID vTakeStep();
  1059. VOID vUntransform(LONG* afx);
  1060. VOID vParentError(LONGLONG* peq) const;
  1061. VOID vError(LONGLONG* peq) const;
  1062. INT fxValue() const;
  1063. };
  1064. class Bezier64
  1065. {
  1066. private:
  1067. HfdBasis64 xLow;
  1068. HfdBasis64 yLow;
  1069. HfdBasis64 xHigh;
  1070. HfdBasis64 yHigh;
  1071. LONGLONG eqErrorLow;
  1072. RECT* prcfxClip;
  1073. RECT rcfxClip;
  1074. LONG cStepsHigh;
  1075. LONG cStepsLow;
  1076. public:
  1077. INT cFlatten(POINT* pptfx, INT cptfx, BOOL *pbMore);
  1078. VOID vInit(const POINT* aptfx, const RECT* prcfx, const LONGLONG eq);
  1079. };
  1080. typedef struct _BEZIERCONTROLS {
  1081. POINT ptfx[4];
  1082. } BEZIERCONTROLS;
  1083. inline VOID vBoundBox(const POINT* aptfx, RECT* prcfx)
  1084. {
  1085. INT i;
  1086. INT left = aptfx[0].x;
  1087. INT right = aptfx[0].x;
  1088. INT top = aptfx[0].y;
  1089. INT bottom = aptfx[0].y;
  1090. for (i = 1; i < 4; i++)
  1091. {
  1092. left = min(left, aptfx[i].x);
  1093. top = min(top, aptfx[i].y);
  1094. right = max(right, aptfx[i].x);
  1095. bottom = max(bottom, aptfx[i].y);
  1096. }
  1097. // We make the bounds one pixel loose for the nominal width
  1098. // stroke case, which increases the bounds by half a pixel
  1099. // in every dimension:
  1100. prcfx->left = left - 16;
  1101. prcfx->top = top - 16;
  1102. prcfx->right = right + 16;
  1103. prcfx->bottom = bottom + 16;
  1104. }
  1105. BOOL bIntersect(
  1106. const RECT *a,
  1107. const RECT *b)
  1108. {
  1109. return((a->left < b->right) &&
  1110. (a->top < b->bottom) &&
  1111. (a->right > b->left) &&
  1112. (a->bottom > b->top));
  1113. }
  1114. BOOL Bezier32::bInit(
  1115. const POINT* aptfxBez, // Pointer to 4 control points
  1116. const RECT* prcfxClip) // Bound box of visible region (optional)
  1117. {
  1118. POINT aptfx[4];
  1119. LONG cShift = 0; // Keeps track of 'lazy' shifts
  1120. cSteps = 1; // Number of steps to do before reach end of curve
  1121. vBoundBox(aptfxBez, &rcfxBound);
  1122. *((BEZIERCONTROLS*) aptfx) = *((BEZIERCONTROLS*) aptfxBez);
  1123. {
  1124. register INT fxOr;
  1125. register INT fxOffset;
  1126. fxOffset = rcfxBound.left;
  1127. fxOr = (aptfx[0].x -= fxOffset);
  1128. fxOr |= (aptfx[1].x -= fxOffset);
  1129. fxOr |= (aptfx[2].x -= fxOffset);
  1130. fxOr |= (aptfx[3].x -= fxOffset);
  1131. fxOffset = rcfxBound.top;
  1132. fxOr |= (aptfx[0].y -= fxOffset);
  1133. fxOr |= (aptfx[1].y -= fxOffset);
  1134. fxOr |= (aptfx[2].y -= fxOffset);
  1135. fxOr |= (aptfx[3].y -= fxOffset);
  1136. // This 32 bit cracker can only handle points in a 10 bit space:
  1137. if ((fxOr & 0xffffc000) != 0)
  1138. return(FALSE);
  1139. }
  1140. x.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
  1141. y.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
  1142. if (prcfxClip == (RECT*) NULL || bIntersect(&rcfxBound, prcfxClip))
  1143. {
  1144. while (TRUE)
  1145. {
  1146. register LONG lTestMagnitude = TEST_MAGNITUDE_INITIAL << cShift;
  1147. if (x.lError() <= lTestMagnitude && y.lError() <= lTestMagnitude)
  1148. break;
  1149. cShift += 2;
  1150. x.vLazyHalveStepSize(cShift);
  1151. y.vLazyHalveStepSize(cShift);
  1152. cSteps <<= 1;
  1153. }
  1154. }
  1155. x.vSteadyState(cShift);
  1156. y.vSteadyState(cShift);
  1157. // Note that this handles the case where the initial error for
  1158. // the Bezier is already less than TEST_MAGNITUDE_NORMAL:
  1159. x.vTakeStep();
  1160. y.vTakeStep();
  1161. cSteps--;
  1162. return(TRUE);
  1163. }
  1164. INT Bezier32::cFlatten(POINT* pptfx, INT cptfx, BOOL *pbMore)
  1165. {
  1166. ASSERT(cptfx > 0);
  1167. INT cptfxOriginal = cptfx;
  1168. do {
  1169. // Return current point:
  1170. pptfx->x = x.fxValue() + rcfxBound.left;
  1171. pptfx->y = y.fxValue() + rcfxBound.top;
  1172. pptfx++;
  1173. // If cSteps == 0, that was the end point in the curve!
  1174. if (cSteps == 0)
  1175. {
  1176. *pbMore = FALSE;
  1177. // '+1' because we haven't decremented 'cptfx' yet:
  1178. return(cptfxOriginal - cptfx + 1);
  1179. }
  1180. // Okay, we have to step:
  1181. if (max(x.lError(), y.lError()) > TEST_MAGNITUDE_NORMAL)
  1182. {
  1183. x.vHalveStepSize();
  1184. y.vHalveStepSize();
  1185. cSteps <<= 1;
  1186. }
  1187. ASSERTMSG(max(x.lError(), y.lError()) <= TEST_MAGNITUDE_NORMAL,
  1188. ("Please tell AndrewGo he was wrong"));
  1189. while (!(cSteps & 1) &&
  1190. x.lParentErrorDividedBy4() <= (TEST_MAGNITUDE_NORMAL >> 2) &&
  1191. y.lParentErrorDividedBy4() <= (TEST_MAGNITUDE_NORMAL >> 2))
  1192. {
  1193. x.vDoubleStepSize();
  1194. y.vDoubleStepSize();
  1195. cSteps >>= 1;
  1196. }
  1197. cSteps--;
  1198. x.vTakeStep();
  1199. y.vTakeStep();
  1200. } while (--cptfx != 0);
  1201. *pbMore = TRUE;
  1202. return(cptfxOriginal);
  1203. }
  1204. ///////////////////////////////////////////////////////////////////////////
  1205. // Bezier64
  1206. //
  1207. // All math is done using 64 bit fixed numbers in a 36.28 format.
  1208. //
  1209. // All drawing is done in a 31 bit space, then a 31 bit window offset
  1210. // is applied. In the initial transform where we change to the HFD
  1211. // basis, e2 and e3 require the most bits precision: e2 = 6(p2 - 2p3 + p4).
  1212. // This requires an additional 4 bits precision -- hence we require 36 bits
  1213. // for the integer part, and the remaining 28 bits is given to the fraction.
  1214. //
  1215. // In rendering a Bezier, every 'subdivide' requires an extra 3 bits of
  1216. // fractional precision. In order to be reversible, we can allow no
  1217. // error to creep in. Since a INT coordinate is 32 bits, and we
  1218. // require an additional 4 bits as mentioned above, that leaves us
  1219. // 28 bits fractional precision -- meaning we can do a maximum of
  1220. // 9 subdivides. Now, the maximum absolute error of a Bezier curve in 27
  1221. // bit integer space is 2^29 - 1. But 9 subdivides reduces the error by a
  1222. // guaranteed factor of 2^18, meaning we can crack down only to an error
  1223. // of 2^11 before we overflow, when in fact we want to crack error to less
  1224. // than 1.
  1225. //
  1226. // So what we do is HFD until we hit an error less than 2^11, reverse our
  1227. // basis transform to get the four control points of this smaller curve
  1228. // (rounding in the process to 32 bits), then invoke another copy of HFD
  1229. // on the reduced Bezier curve. We again have enough precision, but since
  1230. // its starting error is less than 2^11, we can reduce error to 2^-7 before
  1231. // overflowing! We'll start a low HFD after every step of the high HFD.
  1232. ////////////////////////////////////////////////////////////////////////////
  1233. // The following is our 2^11 target error encoded as a 36.28 number
  1234. // (don't forget the additional 4 bits of fractional precision!) and
  1235. // the 6 times error multiplier:
  1236. const LONGLONG geqErrorHigh = (LONGLONG)(6 * (1L << 15) >> (32 - FRACTION64)) << 32;
  1237. // The following is the default 2/3 error encoded as a 36.28 number,
  1238. // multiplied by 6, and leaving 4 bits for fraction:
  1239. const LONGLONG geqErrorLow = (LONGLONG)(4) << 32;
  1240. inline INT HfdBasis64::fxValue() const
  1241. {
  1242. // Convert from 36.28 and round:
  1243. LONGLONG eq = e0;
  1244. eq += (1L << (FRACTION64 - 1));
  1245. eq >>= FRACTION64;
  1246. return((INT) (LONG) eq);
  1247. }
  1248. #define MAX(a, b) ((a) >= (b) ? (a) : (b))
  1249. #define ABS(a) ((a) >= 0 ? (a) : -(a))
  1250. inline VOID HfdBasis64::vParentError(LONGLONG* peq) const
  1251. {
  1252. *peq = MAX(ABS(e3 << 2), ABS((e2 << 3) - (e3 << 2)));
  1253. }
  1254. inline VOID HfdBasis64::vError(LONGLONG* peq) const
  1255. {
  1256. *peq = MAX(ABS(e2), ABS(e3));
  1257. }
  1258. VOID HfdBasis64::vInit(INT p1, INT p2, INT p3, INT p4)
  1259. {
  1260. LONGLONG eqTmp;
  1261. LONGLONG eqP2 = (LONGLONG) p2;
  1262. LONGLONG eqP3 = (LONGLONG) p3;
  1263. // e0 = p1
  1264. // e1 = p4 - p1
  1265. // e2 = 6(p2 - 2p3 + p4)
  1266. // e3 = 6(p1 - 2p2 + p3)
  1267. // Change basis:
  1268. e0 = p1; // e0 = p1
  1269. e1 = p4;
  1270. e2 = eqP2; e2 -= eqP3; e2 -= eqP3; e2 += e1; // e2 = p2 - 2*p3 + p4
  1271. e3 = e0; e3 -= eqP2; e3 -= eqP2; e3 += eqP3; // e3 = p1 - 2*p2 + p3
  1272. e1 -= e0; // e1 = p4 - p1
  1273. // Convert to 36.28 format and multiply e2 and e3 by six:
  1274. e0 <<= FRACTION64;
  1275. e1 <<= FRACTION64;
  1276. eqTmp = e2; e2 += eqTmp; e2 += eqTmp; e2 <<= (FRACTION64 + 1);
  1277. eqTmp = e3; e3 += eqTmp; e3 += eqTmp; e3 <<= (FRACTION64 + 1);
  1278. }
  1279. VOID HfdBasis64::vUntransform(LONG* afx)
  1280. {
  1281. // Declare some temps to hold our operations, since we can't modify e0..e3.
  1282. LONGLONG eqP0;
  1283. LONGLONG eqP1;
  1284. LONGLONG eqP2;
  1285. LONGLONG eqP3;
  1286. // p0 = e0
  1287. // p1 = e0 + (6e1 - e2 - 2e3)/18
  1288. // p2 = e0 + (12e1 - 2e2 - e3)/18
  1289. // p3 = e0 + e1
  1290. eqP0 = e0;
  1291. // NOTE PERF: Convert this to a multiply by 6: [andrewgo]
  1292. eqP2 = e1;
  1293. eqP2 += e1;
  1294. eqP2 += e1;
  1295. eqP1 = eqP2;
  1296. eqP1 += eqP2; // 6e1
  1297. eqP1 -= e2; // 6e1 - e2
  1298. eqP2 = eqP1;
  1299. eqP2 += eqP1; // 12e1 - 2e2
  1300. eqP2 -= e3; // 12e1 - 2e2 - e3
  1301. eqP1 -= e3;
  1302. eqP1 -= e3; // 6e1 - e2 - 2e3
  1303. // NOTE: May just want to approximate these divides! [andrewgo]
  1304. // Or can do a 64 bit divide by 32 bit to get 32 bits right here.
  1305. eqP1 /= 18;
  1306. eqP2 /= 18;
  1307. eqP1 += e0;
  1308. eqP2 += e0;
  1309. eqP3 = e0;
  1310. eqP3 += e1;
  1311. // Convert from 36.28 format with rounding:
  1312. eqP0 += (1L << (FRACTION64 - 1)); eqP0 >>= FRACTION64; afx[0] = (LONG) eqP0;
  1313. eqP1 += (1L << (FRACTION64 - 1)); eqP1 >>= FRACTION64; afx[2] = (LONG) eqP1;
  1314. eqP2 += (1L << (FRACTION64 - 1)); eqP2 >>= FRACTION64; afx[4] = (LONG) eqP2;
  1315. eqP3 += (1L << (FRACTION64 - 1)); eqP3 >>= FRACTION64; afx[6] = (LONG) eqP3;
  1316. }
  1317. VOID HfdBasis64::vHalveStepSize()
  1318. {
  1319. // e2 = (e2 + e3) >> 3
  1320. // e1 = (e1 - e2) >> 1
  1321. // e3 >>= 2
  1322. e2 += e3; e2 >>= 3;
  1323. e1 -= e2; e1 >>= 1;
  1324. e3 >>= 2;
  1325. }
  1326. VOID HfdBasis64::vDoubleStepSize()
  1327. {
  1328. // e1 = 2e1 + e2
  1329. // e3 = 4e3;
  1330. // e2 = 8e2 - e3
  1331. e1 <<= 1; e1 += e2;
  1332. e3 <<= 2;
  1333. e2 <<= 3; e2 -= e3;
  1334. }
  1335. VOID HfdBasis64::vTakeStep()
  1336. {
  1337. e0 += e1;
  1338. LONGLONG eqTmp = e2;
  1339. e1 += e2;
  1340. e2 += eqTmp; e2 -= e3;
  1341. e3 = eqTmp;
  1342. }
  1343. VOID Bezier64::vInit(
  1344. const POINT* aptfx, // Pointer to 4 control points
  1345. const RECT* prcfxVis, // Pointer to bound box of visible area (may be NULL)
  1346. LONGLONG eqError) // Fractional maximum error (32.32 format)
  1347. {
  1348. LONGLONG eqTmp;
  1349. cStepsHigh = 1;
  1350. cStepsLow = 0;
  1351. xHigh.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
  1352. yHigh.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
  1353. // Initialize error:
  1354. eqErrorLow = eqError;
  1355. if (prcfxVis == (RECT*) NULL)
  1356. prcfxClip = (RECT*) NULL;
  1357. else
  1358. {
  1359. rcfxClip = *prcfxVis;
  1360. prcfxClip = &rcfxClip;
  1361. }
  1362. while (((xHigh.vError(&eqTmp), eqTmp) > geqErrorHigh) ||
  1363. ((yHigh.vError(&eqTmp), eqTmp) > geqErrorHigh))
  1364. {
  1365. cStepsHigh <<= 1;
  1366. xHigh.vHalveStepSize();
  1367. yHigh.vHalveStepSize();
  1368. }
  1369. }
  1370. INT Bezier64::cFlatten(POINT* pptfx, INT cptfx, BOOL *pbMore)
  1371. {
  1372. POINT aptfx[4];
  1373. RECT rcfxBound;
  1374. LONGLONG eqTmp;
  1375. INT cptfxOriginal = cptfx;
  1376. ASSERT(cptfx > 0);
  1377. do {
  1378. if (cStepsLow == 0)
  1379. {
  1380. // Optimization that if the bound box of the control points doesn't
  1381. // intersect with the bound box of the visible area, render entire
  1382. // curve as a single line:
  1383. xHigh.vUntransform(&aptfx[0].x);
  1384. yHigh.vUntransform(&aptfx[0].y);
  1385. xLow.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
  1386. yLow.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
  1387. cStepsLow = 1;
  1388. if (prcfxClip != (RECT*) NULL)
  1389. vBoundBox(aptfx, &rcfxBound);
  1390. if (prcfxClip == (RECT*) NULL || bIntersect(&rcfxBound, prcfxClip))
  1391. {
  1392. while (((xLow.vError(&eqTmp), eqTmp) > eqErrorLow) ||
  1393. ((yLow.vError(&eqTmp), eqTmp) > eqErrorLow))
  1394. {
  1395. cStepsLow <<= 1;
  1396. xLow.vHalveStepSize();
  1397. yLow.vHalveStepSize();
  1398. }
  1399. }
  1400. // This 'if' handles the case where the initial error for the Bezier
  1401. // is already less than the target error:
  1402. if (--cStepsHigh != 0)
  1403. {
  1404. xHigh.vTakeStep();
  1405. yHigh.vTakeStep();
  1406. if (((xHigh.vError(&eqTmp), eqTmp) > geqErrorHigh) ||
  1407. ((yHigh.vError(&eqTmp), eqTmp) > geqErrorHigh))
  1408. {
  1409. cStepsHigh <<= 1;
  1410. xHigh.vHalveStepSize();
  1411. yHigh.vHalveStepSize();
  1412. }
  1413. while (!(cStepsHigh & 1) &&
  1414. ((xHigh.vParentError(&eqTmp), eqTmp) <= geqErrorHigh) &&
  1415. ((yHigh.vParentError(&eqTmp), eqTmp) <= geqErrorHigh))
  1416. {
  1417. xHigh.vDoubleStepSize();
  1418. yHigh.vDoubleStepSize();
  1419. cStepsHigh >>= 1;
  1420. }
  1421. }
  1422. }
  1423. xLow.vTakeStep();
  1424. yLow.vTakeStep();
  1425. pptfx->x = xLow.fxValue();
  1426. pptfx->y = yLow.fxValue();
  1427. pptfx++;
  1428. cStepsLow--;
  1429. if (cStepsLow == 0 && cStepsHigh == 0)
  1430. {
  1431. *pbMore = FALSE;
  1432. // '+1' because we haven't decremented 'cptfx' yet:
  1433. return(cptfxOriginal - cptfx + 1);
  1434. }
  1435. if (((xLow.vError(&eqTmp), eqTmp) > eqErrorLow) ||
  1436. ((yLow.vError(&eqTmp), eqTmp) > eqErrorLow))
  1437. {
  1438. cStepsLow <<= 1;
  1439. xLow.vHalveStepSize();
  1440. yLow.vHalveStepSize();
  1441. }
  1442. while (!(cStepsLow & 1) &&
  1443. ((xLow.vParentError(&eqTmp), eqTmp) <= eqErrorLow) &&
  1444. ((yLow.vParentError(&eqTmp), eqTmp) <= eqErrorLow))
  1445. {
  1446. xLow.vDoubleStepSize();
  1447. yLow.vDoubleStepSize();
  1448. cStepsLow >>= 1;
  1449. }
  1450. } while (--cptfx != 0);
  1451. *pbMore = TRUE;
  1452. return(cptfxOriginal);
  1453. }
  1454. /**********************************Class***********************************\
  1455. * class BEZIER
  1456. *
  1457. * Bezier cracker. Flattens any Bezier in our 28.4 device space down
  1458. * to a smallest 'error' of 2^-7 = 0.0078. Will use fast 32 bit cracker
  1459. * for small curves and slower 64 bit cracker for big curves.
  1460. *
  1461. * Public Interface:
  1462. *
  1463. * vInit(aptfx, prcfxClip, peqError)
  1464. * - pptfx points to 4 control points of Bezier. The first point
  1465. * retrieved by bNext() is the the first point in the approximation
  1466. * after the start-point.
  1467. *
  1468. * - prcfxClip is an optional pointer to the bound box of the visible
  1469. * region. This is used to optimize clipping of Bezier curves that
  1470. * won't be seen. Note that this value should account for the pen's
  1471. * width!
  1472. *
  1473. * - optional maximum error in 32.32 format, corresponding to Kirko's
  1474. * error factor.
  1475. *
  1476. * bNext(pptfx)
  1477. * - pptfx points to where next point in approximation will be
  1478. * returned. Returns FALSE if the point is the end-point of the
  1479. * curve.
  1480. *
  1481. * History:
  1482. * 1-Oct-1991 -by- J. Andrew Goossen [andrewgo]
  1483. * Wrote it.
  1484. \**************************************************************************/
  1485. class BEZIER
  1486. {
  1487. private:
  1488. union
  1489. {
  1490. Bezier64 bez64;
  1491. Bezier32 bez32;
  1492. } bez;
  1493. BOOL bBez32;
  1494. public:
  1495. // All coordinates must be in 28.4 format:
  1496. BEZIER(const POINT* aptfx, const RECT* prcfxClip)
  1497. {
  1498. bBez32 = bez.bez32.bInit(aptfx, prcfxClip);
  1499. if (!bBez32)
  1500. bez.bez64.vInit(aptfx, prcfxClip, geqErrorLow);
  1501. }
  1502. INT Flatten(POINT* pptfx, INT cptfx, BOOL *pbMore)
  1503. {
  1504. if (bBez32)
  1505. {
  1506. return(bez.bez32.cFlatten(pptfx, cptfx, pbMore));
  1507. }
  1508. else
  1509. {
  1510. return(bez.bez64.cFlatten(pptfx, cptfx, pbMore));
  1511. }
  1512. }
  1513. };
  1514. /**************************************************************************\
  1515. *
  1516. * Function Description:
  1517. *
  1518. * Clip the edge vertically.
  1519. *
  1520. * We've pulled this routine out-of-line from InitializeEdges mainly
  1521. * because it needs to call inline Asm, and when there is in-line
  1522. * Asm in a routine the compiler generally does a much less efficient
  1523. * job optimizing the whole routine. InitializeEdges is rather
  1524. * performance critical, so we avoid polluting the whole routine
  1525. * by having this functionality out-of-line.
  1526. *
  1527. * Created:
  1528. *
  1529. * 03/25/2000 andrewgo
  1530. *
  1531. \**************************************************************************/
  1532. VOID
  1533. ClipEdge(
  1534. EpEdge *edgeBuffer,
  1535. INT yClipTopInteger,
  1536. INT dMOriginal
  1537. )
  1538. {
  1539. INT xDelta;
  1540. INT error;
  1541. // Cases where bigNumerator will exceed 32-bits in precision
  1542. // will be rare, but could happen, and we can't fall over
  1543. // in those cases.
  1544. INT dN = edgeBuffer->ErrorDown;
  1545. LONGLONG bigNumerator = Int32x32To64(dMOriginal,
  1546. yClipTopInteger - edgeBuffer->StartY)
  1547. + (edgeBuffer->Error + dN);
  1548. if (bigNumerator >= 0)
  1549. {
  1550. QUOTIENT_REMAINDER_64_32(bigNumerator, dN, xDelta, error);
  1551. }
  1552. else
  1553. {
  1554. bigNumerator = -bigNumerator;
  1555. QUOTIENT_REMAINDER_64_32(bigNumerator, dN, xDelta, error);
  1556. xDelta = -xDelta;
  1557. if (error != 0)
  1558. {
  1559. xDelta--;
  1560. error = dN - error;
  1561. }
  1562. }
  1563. // Update the edge data structure with the results:
  1564. edgeBuffer->StartY = yClipTopInteger;
  1565. edgeBuffer->X += xDelta;
  1566. edgeBuffer->Error = error - dN; // Renormalize error
  1567. }
  1568. /**************************************************************************\
  1569. *
  1570. * Function Description:
  1571. *
  1572. * Add edges to the edge list.
  1573. *
  1574. * Created:
  1575. *
  1576. * 03/25/2000 andrewgo
  1577. *
  1578. \**************************************************************************/
  1579. BOOL
  1580. InitializeEdges(
  1581. VOID *context,
  1582. POINT *pointArray, // Points to a 28.4 array of size 'vertexCount'
  1583. // Note that we may modify the contents!
  1584. INT vertexCount,
  1585. PathEnumerateTermination lastSubpath // not used
  1586. )
  1587. {
  1588. INT xStart;
  1589. INT yStart;
  1590. INT yStartInteger;
  1591. INT yEndInteger;
  1592. INT dMOriginal;
  1593. INT dM;
  1594. INT dN;
  1595. INT dX;
  1596. INT errorUp;
  1597. INT quotient;
  1598. INT remainder;
  1599. INT error;
  1600. LONGLONG bigNumerator;
  1601. INT littleNumerator;
  1602. INT windingDirection;
  1603. EpEdge *edgeBuffer;
  1604. EpEdge *endEdge;
  1605. INT bufferCount;
  1606. INT yClipTopInteger;
  1607. INT yClipTop;
  1608. INT yClipBottom;
  1609. INT xClipLeft;
  1610. INT xClipRight;
  1611. EpInitializeEdgesContext *edgeContext = static_cast<EpInitializeEdgesContext*>(context);
  1612. INT yMax = edgeContext->MaxY;
  1613. EpEdgeStore *store = edgeContext->Store;
  1614. RECT *clipRect = edgeContext->ClipRect;
  1615. INT edgeCount = vertexCount - 1;
  1616. ASSERT(edgeCount >= 1);
  1617. if (clipRect == NULL)
  1618. {
  1619. yClipBottom = 0;
  1620. yClipTopInteger = INT_MIN >> AA_Y_SHIFT;
  1621. }
  1622. else
  1623. {
  1624. yClipTopInteger = clipRect->top >> 4;
  1625. yClipTop = clipRect->top;
  1626. yClipBottom = clipRect->bottom;
  1627. xClipLeft = clipRect->left;
  1628. xClipRight = clipRect->right;
  1629. ASSERT(yClipBottom > 0);
  1630. ASSERT(yClipTop <= yClipBottom);
  1631. }
  1632. if (edgeContext->IsAntialias)
  1633. {
  1634. // If antialiasing, apply the supersampling scaling here before we
  1635. // calculate the DDAs. We do this here and not in the Matrix
  1636. // transform we give to FixedPointPathEnumerate mainly so that the
  1637. // Bezier flattener can continue to operate in its optimal 28.4
  1638. // format.
  1639. //
  1640. // We also apply a half-pixel offset here so that the antialiasing
  1641. // code can assume that the pixel centers are at half-pixel
  1642. // coordinates, not on the integer coordinates.
  1643. POINT *point = pointArray;
  1644. INT i = vertexCount;
  1645. do {
  1646. point->x = (point->x + 8) << AA_X_SHIFT;
  1647. point->y = (point->y + 8) << AA_Y_SHIFT;
  1648. } while (point++, --i != 0);
  1649. yClipTopInteger <<= AA_Y_SHIFT;
  1650. yClipTop <<= AA_Y_SHIFT;
  1651. yClipBottom <<= AA_Y_SHIFT;
  1652. xClipLeft <<= AA_X_SHIFT;
  1653. xClipRight <<= AA_X_SHIFT;
  1654. }
  1655. // Make 'yClipBottom' inclusive by subtracting off one pixel
  1656. // (keeping in mind that we're in 28.4 device space):
  1657. yClipBottom -= 16;
  1658. // Warm up the store where we keep the edge data:
  1659. store->StartAddBuffer(&edgeBuffer, &bufferCount);
  1660. do {
  1661. // Handle trivial rejection:
  1662. if (yClipBottom >= 0)
  1663. {
  1664. // Throw out any edges that are above or below the clipping.
  1665. // This has to be a precise check, because we assume later
  1666. // on that every edge intersects in the vertical dimension
  1667. // with the clip rectangle. That asssumption is made in two
  1668. // places:
  1669. //
  1670. // 1. When we sort the edges, we assume either zero edges,
  1671. // or two or more.
  1672. // 2. When we start the DDAs, we assume either zero edges,
  1673. // or that there's at least one scan of DDAs to output.
  1674. //
  1675. // Plus, of course, it's less efficient if we let things
  1676. // through.
  1677. //
  1678. // Note that 'yClipBottom' is inclusive:
  1679. BOOL clipHigh = ((pointArray)->y <= yClipTop) &&
  1680. ((pointArray + 1)->y <= yClipTop);
  1681. BOOL clipLow = ((pointArray)->y > yClipBottom) &&
  1682. ((pointArray + 1)->y > yClipBottom);
  1683. #if DBG
  1684. {
  1685. INT yRectTop, yRectBottom, y0, y1, yTop, yBottom;
  1686. // Getting the trivial rejection code right is tricky.
  1687. // So on checked builds let's verify that we're doing it
  1688. // correctly, using a different approach:
  1689. BOOL clipped = FALSE;
  1690. if (clipRect != NULL)
  1691. {
  1692. yRectTop = clipRect->top >> 4;
  1693. yRectBottom = clipRect->bottom >> 4;
  1694. if (edgeContext->IsAntialias)
  1695. {
  1696. yRectTop <<= AA_Y_SHIFT;
  1697. yRectBottom <<= AA_Y_SHIFT;
  1698. }
  1699. y0 = ((pointArray)->y + 15) >> 4;
  1700. y1 = ((pointArray + 1)->y + 15) >> 4;
  1701. yTop = min(y0, y1);
  1702. yBottom = max(y0, y1);
  1703. clipped = ((yTop >= yRectBottom) || (yBottom <= yRectTop));
  1704. }
  1705. ASSERT(clipped == (clipHigh || clipLow));
  1706. }
  1707. #endif
  1708. if (clipHigh || clipLow)
  1709. continue; // ======================>
  1710. if (edgeCount > 1)
  1711. {
  1712. // Here we'll collapse two edges down to one if both are
  1713. // to the left or to the right of the clipping rectangle.
  1714. if (((pointArray)->x < xClipLeft) &&
  1715. ((pointArray + 1)->x < xClipLeft) &&
  1716. ((pointArray + 2)->x < xClipLeft))
  1717. {
  1718. // Note this is one reason why 'pointArray' can't be 'const':
  1719. *(pointArray + 1) = *(pointArray);
  1720. continue; // ======================>
  1721. }
  1722. if (((pointArray)->x > xClipRight) &&
  1723. ((pointArray + 1)->x > xClipRight) &&
  1724. ((pointArray + 2)->x > xClipRight))
  1725. {
  1726. // Note this is one reason why 'pointArray' can't be 'const':
  1727. *(pointArray + 1) = *(pointArray);
  1728. continue; // ======================>
  1729. }
  1730. }
  1731. }
  1732. dM = (pointArray + 1)->x - (pointArray)->x;
  1733. dN = (pointArray + 1)->y - (pointArray)->y;
  1734. if (dN >= 0)
  1735. {
  1736. // The vector points downward:
  1737. xStart = (pointArray)->x;
  1738. yStart = (pointArray)->y;
  1739. yStartInteger = (yStart + 15) >> 4;
  1740. yEndInteger = ((pointArray + 1)->y + 15) >> 4;
  1741. windingDirection = 1;
  1742. }
  1743. else
  1744. {
  1745. // The vector points upward, so we have to essentially
  1746. // 'swap' the end points:
  1747. dN = -dN;
  1748. dM = -dM;
  1749. xStart = (pointArray + 1)->x;
  1750. yStart = (pointArray + 1)->y;
  1751. yStartInteger = (yStart + 15) >> 4;
  1752. yEndInteger = ((pointArray)->y + 15) >> 4;
  1753. windingDirection = -1;
  1754. }
  1755. // The edgeBuffer must span an integer y-value in order to be
  1756. // added to the edgeBuffer list. This serves to get rid of
  1757. // horizontal edges, which cause trouble for our divides.
  1758. if (yEndInteger > yStartInteger)
  1759. {
  1760. yMax = max(yMax, yEndInteger);
  1761. dMOriginal = dM;
  1762. if (dM < 0)
  1763. {
  1764. dM = -dM;
  1765. if (dM < dN) // Can't be '<='
  1766. {
  1767. dX = -1;
  1768. errorUp = dN - dM;
  1769. }
  1770. else
  1771. {
  1772. QUOTIENT_REMAINDER(dM, dN, quotient, remainder);
  1773. dX = -quotient;
  1774. errorUp = remainder;
  1775. if (remainder > 0)
  1776. {
  1777. dX = -quotient - 1;
  1778. errorUp = dN - remainder;
  1779. }
  1780. }
  1781. }
  1782. else
  1783. {
  1784. if (dM < dN)
  1785. {
  1786. dX = 0;
  1787. errorUp = dM;
  1788. }
  1789. else
  1790. {
  1791. QUOTIENT_REMAINDER(dM, dN, quotient, remainder);
  1792. dX = quotient;
  1793. errorUp = remainder;
  1794. }
  1795. }
  1796. error = -1; // Error is initially zero (add dN - 1 for
  1797. // the ceiling, but subtract off dN so that
  1798. // we can check the sign instead of comparing
  1799. // to dN)
  1800. if ((yStart & 15) != 0)
  1801. {
  1802. // Advance to the next integer y coordinate
  1803. for (INT i = 16 - (yStart & 15); i != 0; i--)
  1804. {
  1805. xStart += dX;
  1806. error += errorUp;
  1807. if (error >= 0)
  1808. {
  1809. error -= dN;
  1810. xStart++;
  1811. }
  1812. }
  1813. }
  1814. if ((xStart & 15) != 0)
  1815. {
  1816. error -= dN * (16 - (xStart & 15));
  1817. xStart += 15; // We'll want the ceiling in just a bit...
  1818. }
  1819. xStart >>= 4;
  1820. error >>= 4;
  1821. if (bufferCount == 0)
  1822. {
  1823. if (!store->NextAddBuffer(&edgeBuffer, &bufferCount))
  1824. return(FALSE);
  1825. }
  1826. edgeBuffer->X = xStart;
  1827. edgeBuffer->Dx = dX;
  1828. edgeBuffer->Error = error;
  1829. edgeBuffer->ErrorUp = errorUp;
  1830. edgeBuffer->ErrorDown = dN;
  1831. edgeBuffer->WindingDirection = windingDirection;
  1832. edgeBuffer->StartY = yStartInteger;
  1833. edgeBuffer->EndY = yEndInteger; // Exclusive of end
  1834. // Here we handle the case where the edge starts above the
  1835. // clipping rectangle, and we need to jump down in the 'y'
  1836. // direction to the first unclipped scan-line.
  1837. //
  1838. // Consequently, we advance the DDA here:
  1839. if (yClipTopInteger > yStartInteger)
  1840. {
  1841. ASSERT(edgeBuffer->EndY > yClipTopInteger);
  1842. ClipEdge(edgeBuffer, yClipTopInteger, dMOriginal);
  1843. }
  1844. // Advance to handle the next edge:
  1845. edgeBuffer++;
  1846. bufferCount--;
  1847. }
  1848. } while (pointArray++, --edgeCount != 0);
  1849. // We're done with this batch. Let the store know how many edges
  1850. // we ended up with:
  1851. store->EndAddBuffer(edgeBuffer, bufferCount);
  1852. edgeContext->MaxY = yMax;
  1853. return(TRUE);
  1854. }
  1855. /**************************************************************************\
  1856. *
  1857. * Function Description:
  1858. *
  1859. * Returns TRUE if the line from point[1] to point[2] turns 'left'
  1860. * from the line from point[0] to point[1]. Uses the sign of the
  1861. * cross product.
  1862. *
  1863. * Remember that we're in device space, where positive 'y' is down!
  1864. *
  1865. * Created:
  1866. *
  1867. * 04/09/2000 andrewgo
  1868. *
  1869. \**************************************************************************/
  1870. inline
  1871. BOOL
  1872. TurnLeft(
  1873. const POINT *points
  1874. )
  1875. {
  1876. LONGLONG ad = Int32x32To64(points[1].x - points[0].x,
  1877. points[2].y - points[1].y);
  1878. LONGLONG bc = Int32x32To64(points[1].y - points[0].y,
  1879. points[2].x - points[1].x);
  1880. return(ad < bc);
  1881. }
  1882. /**************************************************************************\
  1883. *
  1884. * Function Description:
  1885. *
  1886. * Computes the index of the NominalDrawVertex table to be use as the
  1887. * drawing vertex. The result is numbered such that a traversal using
  1888. * an increasing pointer will go counter-clockwise around the pen.
  1889. *
  1890. * Created:
  1891. *
  1892. * 04/09/2000 andrewgo
  1893. *
  1894. \**************************************************************************/
  1895. POINT NominalDrawVertex[] =
  1896. {
  1897. // Don't forget that in device space, positive 'y' is down:
  1898. {0, -8},
  1899. {-8, 0},
  1900. {0, 8},
  1901. {8, 0}
  1902. };
  1903. INT OctantToDrawVertexTranslate[] =
  1904. {
  1905. 0, 2, 0, 2, 3, 3, 1, 1
  1906. };
  1907. inline
  1908. INT
  1909. ComputeDrawVertex(
  1910. const POINT* points
  1911. )
  1912. {
  1913. INT dx = points[1].x - points[0].x;
  1914. INT dy = points[1].y - points[0].y;
  1915. INT octant = 0;
  1916. if (dx < 0)
  1917. {
  1918. octant |= 1;
  1919. dx = -dx;
  1920. }
  1921. if (dy < 0)
  1922. {
  1923. octant |= 2;
  1924. dy = -dy;
  1925. }
  1926. if (dy > dx)
  1927. {
  1928. octant |= 4;
  1929. }
  1930. return(OctantToDrawVertexTranslate[octant]);
  1931. }
  1932. /**************************************************************************\
  1933. *
  1934. * Function Description:
  1935. *
  1936. * Routine for nominal-width lines that generates a fast outline to
  1937. * be filled by the fill code.
  1938. *
  1939. * The resulting fill must always be done in winding mode.
  1940. *
  1941. * Created:
  1942. *
  1943. * 03/25/2000 andrewgo
  1944. *
  1945. \**************************************************************************/
  1946. BOOL
  1947. InitializeNominal(
  1948. VOID *context,
  1949. POINT *pointArray, // Points to a 28.4 array of size 'vertexCount'
  1950. // Note that we may modify the contents!
  1951. INT vertexCount,
  1952. PathEnumerateTermination lastSubpath
  1953. )
  1954. {
  1955. POINT leftBuffer[NOMINAL_FILL_POINT_NUMBER];
  1956. POINT rightBuffer[NOMINAL_FILL_POINT_NUMBER];
  1957. POINT lastPoint;
  1958. INT iDraw;
  1959. INT iNewDraw;
  1960. INT xStart;
  1961. INT yStart;
  1962. INT xEnd;
  1963. INT yEnd;
  1964. INT xPerp;
  1965. INT yPerp;
  1966. BOOL isLeftTurn;
  1967. INT iNext;
  1968. POINT *rightStartPlus3 = rightBuffer + 3;
  1969. POINT *rightEnd = rightBuffer + NOMINAL_FILL_POINT_NUMBER;
  1970. POINT *leftStart = leftBuffer;
  1971. POINT *leftEndMinus3 = leftBuffer + NOMINAL_FILL_POINT_NUMBER - 3;
  1972. POINT *left = leftStart;
  1973. POINT *right = rightEnd;
  1974. INT lineCount = vertexCount - 1;
  1975. iDraw = ComputeDrawVertex(pointArray);
  1976. // Add start cap:
  1977. xStart = (pointArray)->x;
  1978. yStart = (pointArray)->y;
  1979. (left)->x = xStart - NominalDrawVertex[iDraw].x;
  1980. (left)->y = yStart - NominalDrawVertex[iDraw].y;
  1981. (left + 1)->x = xStart + NominalDrawVertex[(iDraw + 1) & 3].x;
  1982. (left + 1)->y = yStart + NominalDrawVertex[(iDraw + 1) & 3].y;
  1983. left += 2;
  1984. while (TRUE)
  1985. {
  1986. if (left >= leftEndMinus3)
  1987. {
  1988. lastPoint = *(left - 1);
  1989. if (!InitializeEdges(context,
  1990. leftBuffer,
  1991. static_cast<INT>(left - leftBuffer),
  1992. lastSubpath))
  1993. {
  1994. return(FALSE);
  1995. }
  1996. *(leftStart) = lastPoint;
  1997. left = leftStart + 1;
  1998. }
  1999. if (right < rightStartPlus3)
  2000. {
  2001. lastPoint = *right;
  2002. if (!InitializeEdges(context,
  2003. right,
  2004. static_cast<INT>(rightEnd - right),
  2005. lastSubpath))
  2006. {
  2007. return(FALSE);
  2008. }
  2009. *(rightEnd - 1) = lastPoint;
  2010. right = rightEnd - 1;
  2011. }
  2012. xStart = (pointArray)->x;
  2013. yStart = (pointArray)->y;
  2014. xEnd = (pointArray + 1)->x;
  2015. yEnd = (pointArray + 1)->y;
  2016. xPerp = NominalDrawVertex[iDraw].x;
  2017. yPerp = NominalDrawVertex[iDraw].y;
  2018. (left)->x = xStart + xPerp;
  2019. (left)->y = yStart + yPerp;
  2020. (right - 1)->x = xStart - xPerp;
  2021. (right - 1)->y = yStart - yPerp;
  2022. (left + 1)->x = xEnd + xPerp;
  2023. (left + 1)->y = yEnd + yPerp;
  2024. (right - 2)->x = xEnd - xPerp;
  2025. (right - 2)->y = yEnd - yPerp;
  2026. left += 2;
  2027. right -= 2;
  2028. pointArray++;
  2029. if (--lineCount == 0)
  2030. break;
  2031. // Darn, we have to handle a join:
  2032. iNewDraw = ComputeDrawVertex(pointArray);
  2033. if (iNewDraw != iDraw)
  2034. {
  2035. isLeftTurn = TurnLeft(pointArray - 1);
  2036. if (isLeftTurn)
  2037. {
  2038. iNext = (iDraw + 1) & 3;
  2039. if (iNewDraw != iNext)
  2040. {
  2041. (right - 1)->x = xEnd - NominalDrawVertex[iNext].x;
  2042. (right - 1)->y = yEnd - NominalDrawVertex[iNext].y;
  2043. right--;
  2044. }
  2045. (left)->x = xEnd;
  2046. (left)->y = yEnd;
  2047. left++;
  2048. }
  2049. else // We're turning 'right':
  2050. {
  2051. iNext = (iDraw - 1) & 3;
  2052. if (iNewDraw != iNext)
  2053. {
  2054. (left)->x = xEnd + NominalDrawVertex[iNext].x;
  2055. (left)->y = yEnd + NominalDrawVertex[iNext].y;
  2056. left++;
  2057. }
  2058. (right - 1)->x = xEnd;
  2059. (right - 1)->y = yEnd;
  2060. right--;
  2061. }
  2062. }
  2063. ASSERT(left <= &leftBuffer[NOMINAL_FILL_POINT_NUMBER]);
  2064. ASSERT(right >= &rightBuffer[0]);
  2065. iDraw = iNewDraw;
  2066. }
  2067. // Add end cap:
  2068. if (left >= leftEndMinus3)
  2069. {
  2070. lastPoint = *(left - 1);
  2071. if (!InitializeEdges(context,
  2072. leftBuffer,
  2073. static_cast<INT>(left - leftBuffer),
  2074. lastSubpath))
  2075. {
  2076. return(FALSE);
  2077. }
  2078. *(leftStart) = lastPoint;
  2079. left = leftStart + 1;
  2080. }
  2081. xStart = (pointArray)->x;
  2082. yStart = (pointArray)->y;
  2083. (left)->x = xStart + NominalDrawVertex[(iDraw - 1) & 3].x;
  2084. (left)->y = yStart + NominalDrawVertex[(iDraw - 1) & 3].y;
  2085. (left + 1)->x = xStart - NominalDrawVertex[iDraw].x;
  2086. (left + 1)->y = yStart - NominalDrawVertex[iDraw].y;
  2087. left += 2;
  2088. // Flush the left batch. Since we just added an end cap, we
  2089. // know there's more than one vertex in there:
  2090. if (!InitializeEdges(context,
  2091. leftBuffer,
  2092. static_cast<INT>(left - leftBuffer),
  2093. lastSubpath))
  2094. {
  2095. return(FALSE);
  2096. }
  2097. // Don't flush the right buffer if there's only one point in there,
  2098. // because one point doesn't make an edge.
  2099. INT count = static_cast<INT>(rightEnd - right);
  2100. if (count > 1)
  2101. {
  2102. if (!InitializeEdges(context, right, count, lastSubpath))
  2103. {
  2104. return(FALSE);
  2105. }
  2106. }
  2107. return(TRUE);
  2108. }
  2109. /**************************************************************************\
  2110. *
  2111. * Function Description:
  2112. *
  2113. * Does complete parameter checking on the 'types' array of a path.
  2114. *
  2115. * Created:
  2116. *
  2117. * 03/25/2000 andrewgo
  2118. *
  2119. \**************************************************************************/
  2120. BOOL
  2121. ValidatePathTypes(
  2122. const BYTE *typesArray,
  2123. INT count
  2124. )
  2125. {
  2126. BYTE type;
  2127. const BYTE *types = typesArray;
  2128. if (count == 0)
  2129. return(TRUE);
  2130. while (TRUE)
  2131. {
  2132. // The first point in every subpath has to be an unadorned
  2133. // 'start' point:
  2134. if ((*types & PathPointTypePathTypeMask) != PathPointTypeStart)
  2135. {
  2136. WARNING(("Bad subpath start"));
  2137. return(FALSE);
  2138. }
  2139. // Advance to the first point after the 'start' point:
  2140. types++;
  2141. if (--count == 0)
  2142. {
  2143. WARNING(("Path ended after start-path"));
  2144. return(FALSE);
  2145. }
  2146. if ((*types & PathPointTypePathTypeMask) == PathPointTypeStart)
  2147. {
  2148. WARNING(("Can't have a start followed by a start!"));
  2149. return(FALSE);
  2150. }
  2151. // Swallow up runs of lines and Bezier curves:
  2152. do {
  2153. switch(*types & PathPointTypePathTypeMask)
  2154. {
  2155. case PathPointTypeLine:
  2156. types++;
  2157. if (--count == 0)
  2158. return(TRUE);
  2159. break;
  2160. case PathPointTypeBezier:
  2161. if(count < 3)
  2162. {
  2163. WARNING(("Path ended before multiple of 3 Bezier points"));
  2164. return(FALSE);
  2165. }
  2166. if((*types & PathPointTypePathTypeMask) != PathPointTypeBezier)
  2167. {
  2168. WARNING(("Can't have a close on the first Bezier vertex"));
  2169. return(FALSE);
  2170. }
  2171. if((*(types + 1) & PathPointTypePathTypeMask) != PathPointTypeBezier)
  2172. {
  2173. WARNING(("Expected plain Bezier control point for 3rd vertex"));
  2174. return(FALSE);
  2175. }
  2176. if((*(types + 2) & PathPointTypePathTypeMask) != PathPointTypeBezier)
  2177. {
  2178. WARNING(("Expected Bezier control point for 4th vertex"));
  2179. return(FALSE);
  2180. }
  2181. types += 3;
  2182. if ((count -= 3) == 0)
  2183. return(TRUE);
  2184. break;
  2185. default:
  2186. WARNING(("Illegal type"));
  2187. return(FALSE);
  2188. }
  2189. // A close-subpath marker or a start-subpath marker marks the
  2190. // end of a subpath:
  2191. } while (!(*(types - 1) & PathPointTypeCloseSubpath) &&
  2192. ((*types & PathPointTypePathTypeMask) != PathPointTypeStart));
  2193. }
  2194. return(TRUE);
  2195. }
  2196. /**************************************************************************\
  2197. *
  2198. * Function Description:
  2199. *
  2200. * Some debug code for verifying the path.
  2201. *
  2202. * Created:
  2203. *
  2204. * 03/25/2000 andrewgo
  2205. *
  2206. \**************************************************************************/
  2207. VOID
  2208. AssertPath(
  2209. const DpPath *path
  2210. )
  2211. {
  2212. // Make sure that the 'types' array is well-formed, otherwise we
  2213. // may fall over in the FixedPointPathEnumerate function.
  2214. //
  2215. // NOTE: If you hit this assert, DO NOT SIMPLY COMMENT THIS ASSERT OUT!
  2216. //
  2217. // Instead, fix the ValidatePathTypes code if it's letting through
  2218. // valid paths, or (more likely) fix the code that's letting bogus
  2219. // paths through. The FixedPointPathEnumerate routine has some
  2220. // subtle assumptions that require the path to be perfectly valid!
  2221. //
  2222. // No internal code should be producing invalid paths, and all
  2223. // paths created by the application must be parameter checked!
  2224. ASSERT(ValidatePathTypes(path->GetPathTypes(), path->GetPointCount()));
  2225. }
  2226. /**************************************************************************\
  2227. *
  2228. * Function Description:
  2229. *
  2230. * Enumerate the path.
  2231. *
  2232. * NOTE: The 'enumerateFunction' function is allowed to modify the
  2233. * contents of our call-back buffer! (This is mainly done
  2234. * to allow 'InitializeEdges' to be simpler for some clipping trivial
  2235. * rejection cases.)
  2236. *
  2237. * Created:
  2238. *
  2239. * 03/25/2000 andrewgo
  2240. *
  2241. \**************************************************************************/
  2242. BOOL
  2243. FixedPointPathEnumerate(
  2244. const DpPath *path,
  2245. const GpMatrix *matrix,
  2246. const RECT *clipRect, // In scaled 28.4 format
  2247. PathEnumerateType enumType, // Fill, stroke or for flattening.
  2248. FIXEDPOINTPATHENUMERATEFUNCTION enumerateFunction,
  2249. VOID *enumerateContext
  2250. )
  2251. {
  2252. POINT bufferStart[ENUMERATE_BUFFER_NUMBER];
  2253. POINT bezierBuffer[4];
  2254. POINT *buffer;
  2255. INT bufferSize;
  2256. POINT startFigure;
  2257. INT iStart;
  2258. INT iEnd;
  2259. INT runSize;
  2260. INT thisCount;
  2261. BOOL isMore;
  2262. RECT scaledClip;
  2263. INT xLast;
  2264. INT yLast;
  2265. ASSERTPATH(path);
  2266. INT pathCount = path->GetPointCount();
  2267. const PointF *pathPoint = path->GetPathPoints();
  2268. const BYTE *pathType = path->GetPathTypes();
  2269. // Every valid subpath has at least two vertices in it, hence the
  2270. // check of 'pathCount - 1':
  2271. iStart = 0;
  2272. while (iStart < pathCount - 1)
  2273. {
  2274. ASSERT((pathType[iStart] & PathPointTypePathTypeMask)
  2275. == PathPointTypeStart);
  2276. ASSERT((pathType[iStart + 1] & PathPointTypePathTypeMask)
  2277. != PathPointTypeStart);
  2278. // Add the start point to the beginning of the batch, and
  2279. // remember it for handling the close figure:
  2280. matrix->Transform(&pathPoint[iStart], &startFigure, 1);
  2281. bufferStart[0].x = startFigure.x;
  2282. bufferStart[0].y = startFigure.y;
  2283. buffer = bufferStart + 1;
  2284. bufferSize = ENUMERATE_BUFFER_NUMBER - 1;
  2285. // We need to enter our loop with 'iStart' pointing one past
  2286. // the start figure:
  2287. iStart++;
  2288. do {
  2289. // Try finding a run of lines:
  2290. if ((pathType[iStart] & PathPointTypePathTypeMask)
  2291. == PathPointTypeLine)
  2292. {
  2293. iEnd = iStart + 1;
  2294. while ((iEnd < pathCount) &&
  2295. ((pathType[iEnd] & PathPointTypePathTypeMask)
  2296. == PathPointTypeLine))
  2297. {
  2298. iEnd++;
  2299. }
  2300. // Okay, we've found a run of lines. Break it up into our
  2301. // buffer size:
  2302. runSize = (iEnd - iStart);
  2303. do {
  2304. thisCount = min(bufferSize, runSize);
  2305. matrix->Transform(&pathPoint[iStart], buffer, thisCount);
  2306. iStart += thisCount;
  2307. buffer += thisCount;
  2308. runSize -= thisCount;
  2309. bufferSize -= thisCount;
  2310. if (bufferSize > 0)
  2311. break;
  2312. xLast = bufferStart[ENUMERATE_BUFFER_NUMBER - 1].x;
  2313. yLast = bufferStart[ENUMERATE_BUFFER_NUMBER - 1].y;
  2314. if (!(enumerateFunction)(
  2315. enumerateContext,
  2316. bufferStart,
  2317. ENUMERATE_BUFFER_NUMBER,
  2318. PathEnumerateContinue
  2319. ))
  2320. {
  2321. return(FALSE);
  2322. }
  2323. // Continue the last vertex as the first in the new batch:
  2324. bufferStart[0].x = xLast;
  2325. bufferStart[0].y = yLast;
  2326. buffer = bufferStart + 1;
  2327. bufferSize = ENUMERATE_BUFFER_NUMBER - 1;
  2328. } while (runSize != 0);
  2329. }
  2330. else
  2331. {
  2332. ASSERT(iStart + 3 <= pathCount);
  2333. ASSERT((pathType[iStart] & PathPointTypePathTypeMask)
  2334. == PathPointTypeBezier);
  2335. ASSERT((pathType[iStart + 1] & PathPointTypePathTypeMask)
  2336. == PathPointTypeBezier);
  2337. ASSERT((pathType[iStart + 2] & PathPointTypePathTypeMask)
  2338. == PathPointTypeBezier);
  2339. matrix->Transform(&pathPoint[iStart - 1], bezierBuffer, 4);
  2340. // Prepare for the next iteration:
  2341. iStart += 3;
  2342. // Crack that Bezier:
  2343. BEZIER bezier(bezierBuffer, clipRect);
  2344. do {
  2345. thisCount = bezier.Flatten(buffer, bufferSize, &isMore);
  2346. buffer += thisCount;
  2347. bufferSize -= thisCount;
  2348. if (bufferSize > 0)
  2349. break;
  2350. xLast = bufferStart[ENUMERATE_BUFFER_NUMBER - 1].x;
  2351. yLast = bufferStart[ENUMERATE_BUFFER_NUMBER - 1].y;
  2352. if (!(enumerateFunction)(
  2353. enumerateContext,
  2354. bufferStart,
  2355. ENUMERATE_BUFFER_NUMBER,
  2356. PathEnumerateContinue
  2357. ))
  2358. {
  2359. return(FALSE);
  2360. }
  2361. // Continue the last vertex as the first in the new batch:
  2362. bufferStart[0].x = xLast;
  2363. bufferStart[0].y = yLast;
  2364. buffer = bufferStart + 1;
  2365. bufferSize = ENUMERATE_BUFFER_NUMBER - 1;
  2366. } while (isMore);
  2367. }
  2368. } while ((iStart < pathCount) &&
  2369. ((pathType[iStart] & PathPointTypePathTypeMask)
  2370. != PathPointTypeStart));
  2371. // Okay, the subpath is done. But we still have to handle the
  2372. // 'close figure' (which is implicit for a fill):
  2373. bool isClosed = (
  2374. (enumType == PathEnumerateTypeFill) ||
  2375. (pathType[iStart - 1] & PathPointTypeCloseSubpath));
  2376. if (isClosed)
  2377. {
  2378. // Add the close-figure point:
  2379. buffer->x = startFigure.x;
  2380. buffer->y = startFigure.y;
  2381. bufferSize--;
  2382. }
  2383. // We have to flush anything we might have in the batch, unless
  2384. // there's only one vertex in there! (The latter case may happen
  2385. // for the stroke case with no close figure if we just flushed a
  2386. // batch.)
  2387. // If we're flattening, we must call the one additional time to
  2388. // correctly handle closing the subpath, even if there is only
  2389. // one entry in the batch. The flattening callback handles the
  2390. // one point case and closes the subpath properly without adding
  2391. // extraneous points.
  2392. INT verticesInBatch = ENUMERATE_BUFFER_NUMBER - bufferSize;
  2393. if ((verticesInBatch > 1) || (enumType == PathEnumerateTypeFlatten))
  2394. {
  2395. // because we always exit the above loops if the buffer contains
  2396. // some data, and if it contains nothing, we add a final element,
  2397. // verticesInBatch should always be at least one.
  2398. // If we're flattening, we will sometimes enter here with
  2399. // verticesInBatch==1, but it should never be zero or less.
  2400. ASSERT(verticesInBatch >= 1);
  2401. if (!(enumerateFunction)(
  2402. enumerateContext,
  2403. bufferStart,
  2404. verticesInBatch,
  2405. isClosed ? PathEnumerateCloseSubpath : PathEnumerateEndSubpath
  2406. ))
  2407. {
  2408. return(FALSE);
  2409. }
  2410. }
  2411. }
  2412. return(TRUE);
  2413. }
  2414. /**************************************************************************\
  2415. *
  2416. * Function Description:
  2417. *
  2418. * We want to sort in the inactive list; the primary key is 'y', and
  2419. * the secondary key is 'x'. This routine creates a single LONGLONG
  2420. * key that represents both.
  2421. *
  2422. * Created:
  2423. *
  2424. * 03/25/2000 andrewgo
  2425. *
  2426. \**************************************************************************/
  2427. inline VOID YX(INT x, INT y, LONGLONG *p)
  2428. {
  2429. // Bias 'x' by INT_MAX so that it's effectively unsigned:
  2430. reinterpret_cast<LARGE_INTEGER*>(p)->HighPart = y;
  2431. reinterpret_cast<LARGE_INTEGER*>(p)->LowPart = x + INT_MAX;
  2432. }
  2433. /**************************************************************************\
  2434. *
  2435. * Function Description:
  2436. *
  2437. * Recursive function to quick-sort our inactive edge list. Note that
  2438. * for performance, the results are not completely sorted; an insertion
  2439. * sort has to be run after the quicksort in order to do a lighter-weight
  2440. * sort of the subtables.
  2441. *
  2442. * Created:
  2443. *
  2444. * 03/25/2000 andrewgo
  2445. *
  2446. \**************************************************************************/
  2447. #define QUICKSORT_THRESHOLD 8
  2448. VOID
  2449. QuickSortEdges(
  2450. EpInactiveEdge *f,
  2451. EpInactiveEdge *l
  2452. )
  2453. {
  2454. EpEdge *e;
  2455. LONGLONG y;
  2456. LONGLONG first;
  2457. LONGLONG second;
  2458. LONGLONG last;
  2459. // Find the median of the first, middle, and last elements:
  2460. EpInactiveEdge *m = f + ((l - f) >> 1);
  2461. SWAP(y, (f + 1)->Yx, m->Yx);
  2462. SWAP(e, (f + 1)->Edge, m->Edge);
  2463. if ((second = (f + 1)->Yx) > (last = l->Yx))
  2464. {
  2465. (f + 1)->Yx = last;
  2466. l->Yx = second;
  2467. SWAP(e, (f + 1)->Edge, l->Edge);
  2468. }
  2469. if ((first = f->Yx) > (last = l->Yx))
  2470. {
  2471. f->Yx = last;
  2472. l->Yx = first;
  2473. SWAP(e, f->Edge, l->Edge);
  2474. }
  2475. if ((second = (f + 1)->Yx) > (first = f->Yx))
  2476. {
  2477. (f + 1)->Yx = first;
  2478. f->Yx = second;
  2479. SWAP(e, (f + 1)->Edge, f->Edge);
  2480. }
  2481. // f->Yx is now the desired median, and (f + 1)->Yx <= f->Yx <= l->Yx
  2482. ASSERT(((f + 1)->Yx <= f->Yx) && (f->Yx <= l->Yx));
  2483. LONGLONG median = f->Yx;
  2484. EpInactiveEdge *i = f + 2;
  2485. while (i->Yx < median)
  2486. i++;
  2487. EpInactiveEdge *j = l - 1;
  2488. while (j->Yx > median)
  2489. j--;
  2490. while (i < j)
  2491. {
  2492. SWAP(y, i->Yx, j->Yx);
  2493. SWAP(e, i->Edge, j->Edge);
  2494. do {
  2495. i++;
  2496. } while (i->Yx < median);
  2497. do {
  2498. j--;
  2499. } while (j->Yx > median);
  2500. }
  2501. SWAP(y, f->Yx, j->Yx);
  2502. SWAP(e, f->Edge, j->Edge);
  2503. size_t a = j - f;
  2504. size_t b = l - j;
  2505. // Use less stack space by recursing on the shorter subtable. Also,
  2506. // have the less-overhead insertion-sort handle small subtables.
  2507. if (a <= b)
  2508. {
  2509. if (a > QUICKSORT_THRESHOLD)
  2510. {
  2511. // 'a' is the smallest, so do it first:
  2512. QuickSortEdges(f, j - 1);
  2513. QuickSortEdges(j + 1, l);
  2514. }
  2515. else if (b > QUICKSORT_THRESHOLD)
  2516. {
  2517. QuickSortEdges(j + 1, l);
  2518. }
  2519. }
  2520. else
  2521. {
  2522. if (b > QUICKSORT_THRESHOLD)
  2523. {
  2524. // 'b' is the smallest, so do it first:
  2525. QuickSortEdges(j + 1, l);
  2526. QuickSortEdges(f, j - 1);
  2527. }
  2528. else if (a > QUICKSORT_THRESHOLD)
  2529. {
  2530. QuickSortEdges(f, j- 1);
  2531. }
  2532. }
  2533. }
  2534. /**************************************************************************\
  2535. *
  2536. * Function Description:
  2537. *
  2538. * Do a sort of the inactive table using an insertion-sort. Expects
  2539. * large tables to have already been sorted via quick-sort.
  2540. *
  2541. * Created:
  2542. *
  2543. * 03/25/2000 andrewgo
  2544. *
  2545. \**************************************************************************/
  2546. VOID
  2547. FASTCALL
  2548. InsertionSortEdges(
  2549. EpInactiveEdge *inactive,
  2550. INT count
  2551. )
  2552. {
  2553. EpInactiveEdge *p;
  2554. EpEdge *e;
  2555. LONGLONG y;
  2556. LONGLONG yPrevious;
  2557. ASSERT((inactive - 1)->Yx == _I64_MIN);
  2558. ASSERT(count >= 2);
  2559. inactive++; // Skip first entry (by definition it's already in order!)
  2560. count--;
  2561. do {
  2562. p = inactive;
  2563. // Copy the current stuff to temporary variables to make a hole:
  2564. e = inactive->Edge;
  2565. y = inactive->Yx;
  2566. // Shift everything one slot to the right (effectively moving
  2567. // the hole one position to the left):
  2568. while (y < (yPrevious = (p - 1)->Yx))
  2569. {
  2570. p->Yx = yPrevious;
  2571. p->Edge = (p - 1)->Edge;
  2572. p--;
  2573. }
  2574. // Drop the temporary stuff into the final hole:
  2575. p->Yx = y;
  2576. p->Edge = e;
  2577. // The quicksort should have ensured that we don't have to move
  2578. // any entry terribly far:
  2579. ASSERT(inactive - p <= QUICKSORT_THRESHOLD);
  2580. } while (inactive++, --count != 0);
  2581. }
  2582. /**************************************************************************\
  2583. *
  2584. * Function Description:
  2585. *
  2586. * Assert the state of the inactive array.
  2587. *
  2588. * Created:
  2589. *
  2590. * 03/25/2000 andrewgo
  2591. *
  2592. \**************************************************************************/
  2593. VOID
  2594. AssertInactiveArray(
  2595. EpInactiveEdge *inactive,
  2596. INT count
  2597. )
  2598. {
  2599. // Verify the head:
  2600. ASSERT((inactive - 1)->Yx == _I64_MIN);
  2601. ASSERT(inactive->Yx != _I64_MIN);
  2602. do {
  2603. LONGLONG yx;
  2604. YX(inactive->Edge->X, inactive->Edge->StartY, &yx);
  2605. ASSERT(inactive->Yx == yx);
  2606. ASSERT(inactive->Yx >= (inactive - 1)->Yx);
  2607. } while (inactive++, --count != 0);
  2608. // Verify that the tail is setup appropriately:
  2609. ASSERT(inactive->Edge->StartY == INT_MAX);
  2610. }
  2611. /**************************************************************************\
  2612. *
  2613. * Function Description:
  2614. *
  2615. * Initialize and sort the inactive array.
  2616. *
  2617. * Returns:
  2618. *
  2619. * 'y' value of topmost edge.
  2620. *
  2621. * Created:
  2622. *
  2623. * 03/25/2000 andrewgo
  2624. *
  2625. \**************************************************************************/
  2626. INT
  2627. InitializeInactiveArray(
  2628. EpEdgeStore *edgeStore,
  2629. EpInactiveEdge *inactiveArray, // 'inactiveArray' must be at least
  2630. INT count, // 'count + 2' elements in size!
  2631. EpEdge *tailEdge // Tail sentinel for inactive list
  2632. )
  2633. {
  2634. BOOL isMore;
  2635. EpEdge *activeEdge;
  2636. EpEdge *activeEdgeEnd;
  2637. INT i;
  2638. INT j;
  2639. EpEdge *e;
  2640. INT y;
  2641. // First initialize the inactive array. Skip the first entry,
  2642. // which we reserve as a head sentinel for the insertion sort:
  2643. EpInactiveEdge *inactiveEdge = inactiveArray + 1;
  2644. do {
  2645. isMore = edgeStore->Enumerate(&activeEdge, &activeEdgeEnd);
  2646. while (activeEdge != activeEdgeEnd)
  2647. {
  2648. inactiveEdge->Edge = activeEdge;
  2649. YX(activeEdge->X, activeEdge->StartY, &inactiveEdge->Yx);
  2650. inactiveEdge++;
  2651. activeEdge++;
  2652. }
  2653. } while (isMore);
  2654. ASSERT(inactiveEdge - inactiveArray == count + 1);
  2655. // Add the tail, which is used when reading back the array. This
  2656. // is why we had to allocate the array as 'count + 1':
  2657. inactiveEdge->Edge = tailEdge;
  2658. // Add the head, which is used for the insertion sort. This is why
  2659. // we had to allocate the array as 'count + 2':
  2660. inactiveArray->Yx = _I64_MIN;
  2661. // Only invoke the quicksort routine if it's worth the overhead:
  2662. if (count > QUICKSORT_THRESHOLD)
  2663. {
  2664. // Quick-sort this, skipping the first and last elements,
  2665. // which are sentinels.
  2666. //
  2667. // We do 'inactiveArray + count' to be inclusive of the last
  2668. // element:
  2669. QuickSortEdges(inactiveArray + 1, inactiveArray + count);
  2670. }
  2671. // Do a quick sort to handle the mostly sorted result:
  2672. InsertionSortEdges(inactiveArray + 1, count);
  2673. ASSERTINACTIVEARRAY(inactiveArray + 1, count);
  2674. // Return the 'y' value of the topmost edge:
  2675. return(inactiveArray[1].Edge->StartY);
  2676. }
  2677. /**************************************************************************\
  2678. *
  2679. * Function Description:
  2680. *
  2681. * Insert edges into the active edge list.
  2682. *
  2683. * Created:
  2684. *
  2685. * 03/25/2000 andrewgo
  2686. *
  2687. \**************************************************************************/
  2688. VOID
  2689. InsertNewEdges(
  2690. EpEdge *activeList,
  2691. INT yCurrent,
  2692. EpInactiveEdge **inactiveEdge, // In/Out
  2693. INT *yNextInactive // Out, will be INT_MAX when no more
  2694. )
  2695. {
  2696. EpInactiveEdge *inactive = *inactiveEdge;
  2697. ASSERT(inactive->Edge->StartY == yCurrent);
  2698. do {
  2699. EpEdge *newActive = inactive->Edge;
  2700. // The activeList edge list sentinel is INT_MAX, so this always
  2701. // terminates:
  2702. while (activeList->Next->X < newActive->X)
  2703. activeList = activeList->Next;
  2704. newActive->Next = activeList->Next;
  2705. activeList->Next = newActive;
  2706. inactive++;
  2707. } while (inactive->Edge->StartY == yCurrent);
  2708. *yNextInactive = inactive->Edge->StartY;
  2709. *inactiveEdge = inactive;
  2710. }
  2711. /**************************************************************************\
  2712. *
  2713. * Function Description:
  2714. *
  2715. * Sort the edges so that they're in ascending 'x' order.
  2716. *
  2717. * We use a bubble-sort for this stage, because edges maintain good
  2718. * locality and don't often switch ordering positions.
  2719. *
  2720. * Created:
  2721. *
  2722. * 03/25/2000 andrewgo
  2723. *
  2724. \**************************************************************************/
  2725. VOID
  2726. FASTCALL
  2727. SortActiveEdges(
  2728. EpEdge *list
  2729. )
  2730. {
  2731. BOOL swapOccurred;
  2732. EpEdge *tmp;
  2733. // We should never be called with an empty active edge list:
  2734. ASSERT(list->Next->X != INT_MAX);
  2735. do {
  2736. swapOccurred = FALSE;
  2737. EpEdge *previous = list;
  2738. EpEdge *current = list->Next;
  2739. EpEdge *next = current->Next;
  2740. INT nextX = next->X;
  2741. do {
  2742. if (nextX < current->X)
  2743. {
  2744. swapOccurred = TRUE;
  2745. previous->Next = next;
  2746. current->Next = next->Next;
  2747. next->Next = current;
  2748. SWAP(tmp, next, current);
  2749. }
  2750. previous = current;
  2751. current = next;
  2752. next = next->Next;
  2753. } while ((nextX = next->X) != INT_MAX);
  2754. } while (swapOccurred);
  2755. }
  2756. /**************************************************************************\
  2757. *
  2758. * Function Description:
  2759. *
  2760. * For each scan-line to be filled:
  2761. *
  2762. * 1. Remove any stale edges from the active edge list
  2763. * 2. Insert into the active edge list any edges new to this scan-line
  2764. * 3. Advance the DDAs of every active edge
  2765. * 4. If any active edges are out of order, re-sort the active edge list
  2766. * 5. Now that the active edges are ready for this scan, call the filler
  2767. * to traverse the edges and output the spans appropriately
  2768. * 6. Lather, rinse, and repeat
  2769. *
  2770. * Created:
  2771. *
  2772. * 03/25/2000 andrewgo
  2773. *
  2774. \**************************************************************************/
  2775. VOID
  2776. RasterizeEdges(
  2777. EpEdge *activeList,
  2778. EpInactiveEdge *inactiveArray,
  2779. INT yCurrent,
  2780. INT yBottom,
  2781. EpFiller *filler,
  2782. EpFillerFunction fillerFunction
  2783. )
  2784. {
  2785. INT yNextInactive;
  2786. InsertNewEdges(activeList, yCurrent, &inactiveArray, &yNextInactive);
  2787. ASSERTACTIVELIST(activeList, yCurrent);
  2788. (filler->*fillerFunction)(activeList, yCurrent);
  2789. while (++yCurrent < yBottom)
  2790. {
  2791. EpEdge *previous = activeList;
  2792. EpEdge *current = activeList->Next;
  2793. INT outOfOrderCount = 0;
  2794. while (TRUE)
  2795. {
  2796. if (current->EndY <= yCurrent)
  2797. {
  2798. // If we've hit the sentinel, our work here is done:
  2799. if (current->EndY == INT_MIN)
  2800. break; // ============>
  2801. // This edge is stale, remove it from the list:
  2802. current = current->Next;
  2803. previous->Next = current;
  2804. continue; // ============>
  2805. }
  2806. // Advance the DDA:
  2807. current->X += current->Dx;
  2808. current->Error += current->ErrorUp;
  2809. if (current->Error >= 0)
  2810. {
  2811. current->Error -= current->ErrorDown;
  2812. current->X++;
  2813. }
  2814. // Is this entry out-of-order with respect to the previous one?
  2815. outOfOrderCount += (previous->X > current->X);
  2816. // Advance:
  2817. previous = current;
  2818. current = current->Next;
  2819. }
  2820. // It turns out that having any out-of-order edges at this point
  2821. // is extremely rare in practice, so only call the bubble-sort
  2822. // if it's truly needed.
  2823. //
  2824. // NOTE: If you're looking at this code trying to fix a bug where
  2825. // the edges are out of order when the filler is called, do
  2826. // NOT simply change the code to always do the bubble-sort!
  2827. // Instead, figure out what caused our 'outOfOrder' logic
  2828. // above to get messed up.
  2829. if (outOfOrderCount)
  2830. {
  2831. SortActiveEdges(activeList);
  2832. }
  2833. ASSERTACTIVELISTORDER(activeList);
  2834. if (yCurrent == yNextInactive)
  2835. {
  2836. InsertNewEdges(activeList, yCurrent, &inactiveArray, &yNextInactive);
  2837. }
  2838. ASSERTACTIVELIST(activeList, yCurrent);
  2839. // Do the appropriate alternate or winding, supersampled or
  2840. // non-supersampled fill:
  2841. (filler->*fillerFunction)(activeList, yCurrent);
  2842. }
  2843. }
  2844. /**************************************************************************\
  2845. *
  2846. * Function Description:
  2847. *
  2848. * Fill (or sometimes stroke) that path.
  2849. *
  2850. * Created:
  2851. *
  2852. * 03/25/2000 andrewgo
  2853. *
  2854. \**************************************************************************/
  2855. GpStatus
  2856. RasterizePath(
  2857. const DpPath *path,
  2858. GpMatrix *worldTransform,
  2859. GpFillMode fillMode,
  2860. BOOL antiAlias,
  2861. BOOL nominalWideLine,
  2862. DpOutputSpan *output,
  2863. DpClipRegion *clipper,
  2864. const GpRect *drawBounds
  2865. )
  2866. {
  2867. EpInactiveEdge inactiveArrayStack[INACTIVE_LIST_NUMBER];
  2868. EpInactiveEdge *inactiveArray;
  2869. EpInactiveEdge *inactiveArrayAllocation;
  2870. EpEdge headEdge;
  2871. EpEdge tailEdge;
  2872. EpEdge *activeList;
  2873. RECT clipBounds;
  2874. GpRect clipRect;
  2875. EpEdgeStore edgeStore;
  2876. EpInitializeEdgesContext edgeContext;
  2877. INT yClipBottom;
  2878. inactiveArrayAllocation = NULL;
  2879. edgeContext.ClipRect = NULL;
  2880. tailEdge.X = INT_MAX; // Terminator to active list
  2881. tailEdge.StartY = INT_MAX; // Terminator to inactive list
  2882. tailEdge.EndY = INT_MIN;
  2883. headEdge.X = INT_MIN; // Beginning of active list
  2884. edgeContext.MaxY = INT_MIN;
  2885. headEdge.Next = &tailEdge;
  2886. activeList = &headEdge;
  2887. edgeContext.Store = &edgeStore;
  2888. edgeContext.IsAntialias = antiAlias;
  2889. //////////////////////////////////////////////////////////////////////////
  2890. DpRegion::Visibility visibility = clipper->GetRectVisibility(
  2891. drawBounds->X,
  2892. drawBounds->Y,
  2893. drawBounds->X + drawBounds->Width,
  2894. drawBounds->Y + drawBounds->Height);
  2895. if (visibility != DpRegion::TotallyVisible)
  2896. {
  2897. clipper->GetBounds(&clipRect);
  2898. // !!![andrewgo] Don, why do we have to do an 'Invisible' test
  2899. // here? Shouldn't GetRectVisibility have already
  2900. // taken care of that case? (GetRectVisibility
  2901. // was checked by our caller.)
  2902. // Early-out if we're invisible, or if the bounds would overflow
  2903. // our DDA calculations (which would cause us to crash). We
  2904. // leave 4 bits for the 28.4 fixed point, plus enough bits for
  2905. // the antialiasing supersampling. We also need a bit for doing
  2906. // a signed difference without overflowing.
  2907. if ((visibility == DpRegion::Invisible) ||
  2908. (clipRect.X < (INT_MIN >> (5 + AA_X_SHIFT))) ||
  2909. (clipRect.Y < (INT_MIN >> (5 + AA_Y_SHIFT))) ||
  2910. (clipRect.X > (INT_MAX >> (5 + AA_X_SHIFT))) ||
  2911. (clipRect.Y > (INT_MAX >> (5 + AA_Y_SHIFT))) ||
  2912. (clipRect.Width > (INT_MAX >> (5 + AA_X_SHIFT))) ||
  2913. (clipRect.Height > (INT_MAX >> (5 + AA_Y_SHIFT))))
  2914. {
  2915. return(Ok);
  2916. }
  2917. // Scale the clip bounds rectangle by 16 to account for our
  2918. // scaling to 28.4 coordinates:
  2919. clipBounds.left = clipRect.GetLeft() * 16;
  2920. clipBounds.top = clipRect.GetTop() * 16;
  2921. clipBounds.right = clipRect.GetRight() * 16;
  2922. clipBounds.bottom = clipRect.GetBottom() * 16;
  2923. yClipBottom = clipRect.GetBottom();
  2924. edgeContext.ClipRect = &clipBounds;
  2925. }
  2926. //////////////////////////////////////////////////////////////////////////
  2927. // Convert all our points to 28.4 fixed point:
  2928. GpMatrix matrix(*worldTransform);
  2929. matrix.AppendScale(TOREAL(16), TOREAL(16));
  2930. // Enumerate the path and construct the edge table:
  2931. FIXEDPOINTPATHENUMERATEFUNCTION pathEnumerationFunction = InitializeEdges;
  2932. PathEnumerateType enumType = PathEnumerateTypeFill;
  2933. if (nominalWideLine)
  2934. {
  2935. // The nominal-width wideline code always produces edges that
  2936. // require a winding-mode fill:
  2937. pathEnumerationFunction = InitializeNominal;
  2938. fillMode = FillModeWinding;
  2939. enumType = PathEnumerateTypeStroke; // nominal width is a stroke.
  2940. }
  2941. if (!FixedPointPathEnumerate(
  2942. path,
  2943. &matrix,
  2944. edgeContext.ClipRect,
  2945. enumType,
  2946. pathEnumerationFunction,
  2947. &edgeContext
  2948. ))
  2949. {
  2950. return(OutOfMemory);
  2951. }
  2952. INT totalCount = edgeStore.StartEnumeration();
  2953. if (totalCount == 0)
  2954. return(Ok); // We're outta here (empty path or entirely clipped)
  2955. // At this point, there has to be at least two edges. If there's only
  2956. // one, it means that we didn't do the trivially rejection properly.
  2957. ASSERT(totalCount >= 2);
  2958. inactiveArray = &inactiveArrayStack[0];
  2959. if (totalCount > (INACTIVE_LIST_NUMBER - 2))
  2960. {
  2961. inactiveArrayAllocation = static_cast<EpInactiveEdge*>
  2962. (GpMalloc(sizeof(EpInactiveEdge) * (totalCount + 2)));
  2963. if (inactiveArrayAllocation == NULL)
  2964. return(OutOfMemory);
  2965. inactiveArray = inactiveArrayAllocation;
  2966. }
  2967. // Initialize and sort the inactive array:
  2968. INT yCurrent = InitializeInactiveArray(&edgeStore, inactiveArray,
  2969. totalCount, &tailEdge);
  2970. INT yBottom = edgeContext.MaxY;
  2971. ASSERT(yBottom > 0);
  2972. // Skip the head sentinel on the inactive array:
  2973. inactiveArray++;
  2974. if (antiAlias)
  2975. {
  2976. EpAntialiasedFiller filler(output);
  2977. EpFillerFunction fillerFunction = (fillMode == FillModeAlternate)
  2978. ? (EpFillerFunction) (EpAntialiasedFiller::FillEdgesAlternate)
  2979. : (EpFillerFunction) (EpAntialiasedFiller::FillEdgesWinding);
  2980. if (edgeContext.ClipRect)
  2981. {
  2982. filler.SetClipper(clipper);
  2983. // The clipper has to call back to EpFillerFunction::OutputSpan
  2984. // to do the output, and then *we* call output->OutputSpan.
  2985. clipper->InitClipping(&filler, drawBounds->Y);
  2986. // 'yClipBottom' is in 28.4 format, and has to be converted
  2987. // to the 30.2 format we use for antialiasing:
  2988. yBottom = min(yBottom, yClipBottom << AA_Y_SHIFT);
  2989. // 'totalCount' should have been zero if all the edges were
  2990. // clipped out (RasterizeEdges assumes there's at least one edge
  2991. // to be drawn):
  2992. ASSERT(yBottom > yCurrent);
  2993. }
  2994. RasterizeEdges(activeList, inactiveArray, yCurrent, yBottom, &filler,
  2995. fillerFunction);
  2996. }
  2997. else
  2998. {
  2999. EpAliasedFiller filler(output);
  3000. EpFillerFunction fillerFunction = (fillMode == FillModeAlternate)
  3001. ? (EpFillerFunction) (EpAliasedFiller::FillEdgesAlternate)
  3002. : (EpFillerFunction) (EpAliasedFiller::FillEdgesWinding);
  3003. if (edgeContext.ClipRect)
  3004. {
  3005. // The clipper calls output->OutputSpan directly. We just have
  3006. // to remember to call clipper->OutputSpan instead of
  3007. // output->OutputSpan:
  3008. filler.SetOutputSpan(clipper);
  3009. clipper->InitClipping(output, drawBounds->Y);
  3010. yBottom = min(yBottom, yClipBottom);
  3011. // 'totalCount' should have been zero if all the edges were
  3012. // clipped out (RasterizeEdges assumes there's at least one edge
  3013. // to be drawn):
  3014. ASSERT(yBottom > yCurrent);
  3015. }
  3016. RasterizeEdges(activeList, inactiveArray, yCurrent, yBottom, &filler,
  3017. fillerFunction);
  3018. }
  3019. // Free any objects and get outta here:
  3020. if (inactiveArrayAllocation != NULL)
  3021. GpFree(inactiveArrayAllocation);
  3022. return(Ok);
  3023. }