1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/core/SkScanPriv.h"
9
10 #include "include/core/SkMatrix.h"
11 #include "include/core/SkPath.h"
12 #include "include/core/SkRegion.h"
13 #include "include/private/SkTo.h"
14 #include "src/core/SkAntiRun.h"
15 #include "src/core/SkBlitter.h"
16 #include "src/core/SkPathPriv.h"
17
18 #ifdef SK_ENABLE_PATH_COMPLEXITY_DFX
19 #include "src/core/SkPathComplexityDfx.h"
20 #endif
21
22 #define SHIFT SK_SUPERSAMPLE_SHIFT
23 #define SCALE (1 << SHIFT)
24 #define MASK (SCALE - 1)
25
26 /** @file
27 We have two techniques for capturing the output of the supersampler:
28 - SUPERMASK, which records a large mask-bitmap
29 this is often faster for small, complex objects
30 - RLE, which records a rle-encoded scanline
31 this is often faster for large objects with big spans
32
33 These blitters use two coordinate systems:
34 - destination coordinates, scale equal to the output - often
35 abbreviated with 'i' or 'I' in variable names
36 - supersampled coordinates, scale equal to the output * SCALE
37 */
38
39 //#define FORCE_SUPERMASK
40 //#define FORCE_RLE
41
42 ///////////////////////////////////////////////////////////////////////////////
43
44 /// Base class for a single-pass supersampled blitter.
45 class BaseSuperBlitter : public SkBlitter {
46 public:
47 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
48 const SkIRect& clipBounds, bool isInverse);
49
50 /// Must be explicitly defined on subclasses.
51 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
52 SkDEBUGFAIL("How did I get here?");
53 }
54 /// May not be called on BaseSuperBlitter because it blits out of order.
55 void blitV(int x, int y, int height, SkAlpha alpha) override {
56 SkDEBUGFAIL("How did I get here?");
57 }
58
59 protected:
60 SkBlitter* fRealBlitter;
61 /// Current y coordinate, in destination coordinates.
62 int fCurrIY;
63 /// Widest row of region to be blitted, in destination coordinates.
64 int fWidth;
65 /// Leftmost x coordinate in any row, in destination coordinates.
66 int fLeft;
67 /// Leftmost x coordinate in any row, in supersampled coordinates.
68 int fSuperLeft;
69
70 SkDEBUGCODE(int fCurrX;)
71 /// Current y coordinate in supersampled coordinates.
72 int fCurrY;
73 /// Initial y coordinate (top of bounds).
74 int fTop;
75
76 SkIRect fSectBounds;
77 };
78
BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse)79 BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
80 const SkIRect& clipBounds, bool isInverse) {
81 fRealBlitter = realBlit;
82
83 SkIRect sectBounds;
84 if (isInverse) {
85 // We use the clip bounds instead of the ir, since we may be asked to
86 //draw outside of the rect when we're a inverse filltype
87 sectBounds = clipBounds;
88 } else {
89 if (!sectBounds.intersect(ir, clipBounds)) {
90 sectBounds.setEmpty();
91 }
92 }
93
94 const int left = sectBounds.left();
95 const int right = sectBounds.right();
96
97 fLeft = left;
98 fSuperLeft = SkLeftShift(left, SHIFT);
99 fWidth = right - left;
100 fTop = sectBounds.top();
101 fCurrIY = fTop - 1;
102 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
103
104 SkDEBUGCODE(fCurrX = -1;)
105 }
106
107 /// Run-length-encoded supersampling antialiased blitter.
108 class SuperBlitter : public BaseSuperBlitter {
109 public:
110 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
111 bool isInverse);
112
113 ~SuperBlitter() override {
114 this->flush();
115 }
116
117 /// Once fRuns contains a complete supersampled row, flush() blits
118 /// it out through the wrapped blitter.
119 void flush();
120
121 /// Blits a row of pixels, with location and width specified
122 /// in supersampled coordinates.
123 void blitH(int x, int y, int width) override;
124 /// Blits a rectangle of pixels, with location and size specified
125 /// in supersampled coordinates.
126 void blitRect(int x, int y, int width, int height) override;
127
128 private:
129 // The next three variables are used to track a circular buffer that
130 // contains the values used in SkAlphaRuns. These variables should only
131 // ever be updated in advanceRuns(), and fRuns should always point to
132 // a valid SkAlphaRuns...
133 int fRunsToBuffer;
134 void* fRunsBuffer;
135 int fCurrentRun;
136 SkAlphaRuns fRuns;
137
138 // extra one to store the zero at the end
getRunsSz() const139 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
140
141 // This function updates the fRuns variable to point to the next buffer space
142 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
143 // and resets fRuns to point to an empty scanline.
advanceRuns()144 void advanceRuns() {
145 const size_t kRunsSz = this->getRunsSz();
146 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
147 fRuns.fRuns = reinterpret_cast<int16_t*>(
148 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
149 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
150 fRuns.reset(fWidth);
151 }
152
153 int fOffsetX;
154 };
155
SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse)156 SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
157 bool isInverse)
158 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
159 {
160 fRunsToBuffer = realBlitter->requestRowsPreserved();
161 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
162 fCurrentRun = -1;
163
164 this->advanceRuns();
165
166 fOffsetX = 0;
167 }
168
flush()169 void SuperBlitter::flush() {
170 if (fCurrIY >= fTop) {
171
172 SkASSERT(fCurrentRun < fRunsToBuffer);
173 if (!fRuns.empty()) {
174 // SkDEBUGCODE(fRuns.dump();)
175 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
176 this->advanceRuns();
177 fOffsetX = 0;
178 }
179
180 fCurrIY = fTop - 1;
181 SkDEBUGCODE(fCurrX = -1;)
182 }
183 }
184
185 /** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
186 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
187 to produce a final value in [0, 255] and handles clamping 256->255
188 itself, with the same (alpha - (alpha >> 8)) correction as
189 coverage_to_exact_alpha().
190 */
191 static inline int coverage_to_partial_alpha(int aa) {
192 aa <<= 8 - 2*SHIFT;
193 return aa;
194 }
195
196 /** coverage_to_exact_alpha() is being used by our blitter, which wants
197 a final value in [0, 255].
198 */
199 static inline int coverage_to_exact_alpha(int aa) {
200 int alpha = (256 >> SHIFT) * aa;
201 // clamp 256->255
202 return alpha - (alpha >> 8);
203 }
204
205 void SuperBlitter::blitH(int x, int y, int width) {
206 SkASSERT(width > 0);
207
208 int iy = y >> SHIFT;
209 SkASSERT(iy >= fCurrIY);
210
211 x -= fSuperLeft;
212 // hack, until I figure out why my cubics (I think) go beyond the bounds
213 if (x < 0) {
214 width += x;
215 x = 0;
216 }
217
218 #ifdef SK_DEBUG
219 SkASSERT(y != fCurrY || x >= fCurrX);
220 #endif
221 SkASSERT(y >= fCurrY);
222 if (fCurrY != y) {
223 fOffsetX = 0;
224 fCurrY = y;
225 }
226
227 if (iy != fCurrIY) { // new scanline
228 this->flush();
229 fCurrIY = iy;
230 }
231
232 int start = x;
233 int stop = x + width;
234
235 SkASSERT(start >= 0 && stop > start);
236 // integer-pixel-aligned ends of blit, rounded out
237 int fb = start & MASK;
238 int fe = stop & MASK;
239 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
240
241 if (n < 0) {
242 fb = fe - fb;
243 n = 0;
244 fe = 0;
245 } else {
246 if (fb == 0) {
247 n += 1;
248 } else {
249 fb = SCALE - fb;
250 }
251 }
252
253 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
254 n, coverage_to_partial_alpha(fe),
255 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
256 fOffsetX);
257
258 #ifdef SK_DEBUG
259 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
260 fCurrX = x + width;
261 #endif
262 }
263
264 #if 0 // UNUSED
265 static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
266 int n, U8CPU riteA) {
267 SkASSERT(leftA <= 0xFF);
268 SkASSERT(riteA <= 0xFF);
269
270 int16_t* run = runs.fRuns;
271 uint8_t* aa = runs.fAlpha;
272
273 if (ileft > 0) {
274 run[0] = ileft;
275 aa[0] = 0;
276 run += ileft;
277 aa += ileft;
278 }
279
280 SkASSERT(leftA < 0xFF);
281 if (leftA > 0) {
282 *run++ = 1;
283 *aa++ = leftA;
284 }
285
286 if (n > 0) {
287 run[0] = n;
288 aa[0] = 0xFF;
289 run += n;
290 aa += n;
291 }
292
293 SkASSERT(riteA < 0xFF);
294 if (riteA > 0) {
295 *run++ = 1;
296 *aa++ = riteA;
297 }
298 run[0] = 0;
299 }
300 #endif
301
302 void SuperBlitter::blitRect(int x, int y, int width, int height) {
303 SkASSERT(width > 0);
304 SkASSERT(height > 0);
305
306 // blit leading rows
307 while ((y & MASK)) {
308 this->blitH(x, y++, width);
309 if (--height <= 0) {
310 return;
311 }
312 }
313 SkASSERT(height > 0);
314
315 // Since this is a rect, instead of blitting supersampled rows one at a
316 // time and then resolving to the destination canvas, we can blit
317 // directly to the destintion canvas one row per SCALE supersampled rows.
318 int start_y = y >> SHIFT;
319 int stop_y = (y + height) >> SHIFT;
320 int count = stop_y - start_y;
321 if (count > 0) {
322 y += count << SHIFT;
323 height -= count << SHIFT;
324
325 // save original X for our tail blitH() loop at the bottom
326 int origX = x;
327
328 x -= fSuperLeft;
329 // hack, until I figure out why my cubics (I think) go beyond the bounds
330 if (x < 0) {
331 width += x;
332 x = 0;
333 }
334
335 // There is always a left column, a middle, and a right column.
336 // ileft is the destination x of the first pixel of the entire rect.
337 // xleft is (SCALE - # of covered supersampled pixels) in that
338 // destination pixel.
339 int ileft = x >> SHIFT;
340 int xleft = x & MASK;
341 // irite is the destination x of the last pixel of the OPAQUE section.
342 // xrite is the number of supersampled pixels extending beyond irite;
343 // xrite/SCALE should give us alpha.
344 int irite = (x + width) >> SHIFT;
345 int xrite = (x + width) & MASK;
346 if (!xrite) {
347 xrite = SCALE;
348 irite--;
349 }
350
351 // Need to call flush() to clean up pending draws before we
352 // even consider blitV(), since otherwise it can look nonmonotonic.
353 SkASSERT(start_y > fCurrIY);
354 this->flush();
355
356 int n = irite - ileft - 1;
357 if (n < 0) {
358 // If n < 0, we'll only have a single partially-transparent column
359 // of pixels to render.
360 xleft = xrite - xleft;
361 SkASSERT(xleft <= SCALE);
362 SkASSERT(xleft > 0);
363 fRealBlitter->blitV(ileft + fLeft, start_y, count,
364 coverage_to_exact_alpha(xleft));
365 } else {
366 // With n = 0, we have two possibly-transparent columns of pixels
367 // to render; with n > 0, we have opaque columns between them.
368
369 xleft = SCALE - xleft;
370
371 // Using coverage_to_exact_alpha is not consistent with blitH()
372 const int coverageL = coverage_to_exact_alpha(xleft);
373 const int coverageR = coverage_to_exact_alpha(xrite);
374
375 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
376 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
377
378 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
379 coverageL, coverageR);
380 }
381
382 // preamble for our next call to blitH()
383 fCurrIY = stop_y - 1;
384 fOffsetX = 0;
385 fCurrY = y - 1;
386 fRuns.reset(fWidth);
387 x = origX;
388 }
389
390 // catch any remaining few rows
391 SkASSERT(height <= MASK);
392 while (--height >= 0) {
393 this->blitH(x, y++, width);
394 }
395 }
396
397 ///////////////////////////////////////////////////////////////////////////////
398
399 /// Masked supersampling antialiased blitter.
400 class MaskSuperBlitter : public BaseSuperBlitter {
401 public:
402 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
403 ~MaskSuperBlitter() override {
404 fRealBlitter->blitMask(fMask, fClipRect);
405 }
406
407 void blitH(int x, int y, int width) override;
408
409 static bool CanHandleRect(const SkIRect& bounds) {
410 #ifdef FORCE_RLE
411 return false;
412 #endif
413 int width = bounds.width();
414 int64_t rb = SkAlign4(width);
415 // use 64bits to detect overflow
416 int64_t storage = rb * bounds.height();
417
418 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
419 (storage <= MaskSuperBlitter::kMAX_STORAGE);
420 }
421
422 private:
423 enum {
424 #ifdef FORCE_SUPERMASK
425 kMAX_WIDTH = 2048,
426 kMAX_STORAGE = 1024 * 1024 * 2
427 #else
428 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
429 kMAX_STORAGE = 1024
430 #endif
431 };
432
433 SkMask fMask;
434 SkIRect fClipRect;
435 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
436 // perform a test to see if stopAlpha != 0
437 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
438 };
439
440 MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
441 const SkIRect& clipBounds, bool isInverse)
442 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
443 {
444 SkASSERT(CanHandleRect(ir));
445 SkASSERT(!isInverse);
446
447 fMask.fImage = (uint8_t*)fStorage;
448 fMask.fBounds = ir;
449 fMask.fRowBytes = ir.width();
450 fMask.fFormat = SkMask::kA8_Format;
451
452 fClipRect = ir;
453 if (!fClipRect.intersect(clipBounds)) {
454 SkASSERT(0);
455 fClipRect.setEmpty();
456 }
457
458 // For valgrind, write 1 extra byte at the end so we don't read
459 // uninitialized memory. See comment in add_aa_span and fStorage[].
460 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
461 }
462
463 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
464 /* I should be able to just add alpha[x] + startAlpha.
465 However, if the trailing edge of the previous span and the leading
466 edge of the current span round to the same super-sampled x value,
467 I might overflow to 256 with this add, hence the funny subtract.
468 */
469 unsigned tmp = *alpha + startAlpha;
470 SkASSERT(tmp <= 256);
471 *alpha = SkToU8(tmp - (tmp >> 8));
472 }
473
474 static inline uint32_t quadplicate_byte(U8CPU value) {
475 uint32_t pair = (value << 8) | value;
476 return (pair << 16) | pair;
477 }
478
479 // Perform this tricky subtract, to avoid overflowing to 256. Our caller should
480 // only ever call us with at most enough to hit 256 (never larger), so it is
481 // enough to just subtract the high-bit. Actually clamping with a branch would
482 // be slower (e.g. if (tmp > 255) tmp = 255;)
483 //
484 static inline void saturated_add(uint8_t* ptr, U8CPU add) {
485 unsigned tmp = *ptr + add;
486 SkASSERT(tmp <= 256);
487 *ptr = SkToU8(tmp - (tmp >> 8));
488 }
489
490 // minimum count before we want to setup an inner loop, adding 4-at-a-time
491 #define MIN_COUNT_FOR_QUAD_LOOP 16
492
493 static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
494 U8CPU stopAlpha, U8CPU maxValue) {
495 SkASSERT(middleCount >= 0);
496
497 saturated_add(alpha, startAlpha);
498 alpha += 1;
499
500 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
501 // loop until we're quad-byte aligned
502 while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
503 alpha[0] = SkToU8(alpha[0] + maxValue);
504 alpha += 1;
505 middleCount -= 1;
506 }
507
508 int bigCount = middleCount >> 2;
509 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
510 uint32_t qval = quadplicate_byte(maxValue);
511 do {
512 *qptr++ += qval;
513 } while (--bigCount > 0);
514
515 middleCount &= 3;
516 alpha = reinterpret_cast<uint8_t*> (qptr);
517 // fall through to the following while-loop
518 }
519
520 while (--middleCount >= 0) {
521 alpha[0] = SkToU8(alpha[0] + maxValue);
522 alpha += 1;
523 }
524
525 // potentially this can be off the end of our "legal" alpha values, but that
526 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
527 // every time (slow), we just do it, and ensure that we've allocated extra space
528 // (see the + 1 comment in fStorage[]
529 saturated_add(alpha, stopAlpha);
530 }
531
532 void MaskSuperBlitter::blitH(int x, int y, int width) {
533 int iy = (y >> SHIFT);
534
535 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
536 iy -= fMask.fBounds.fTop; // make it relative to 0
537
538 // This should never happen, but it does. Until the true cause is
539 // discovered, let's skip this span instead of crashing.
540 // See http://crbug.com/17569.
541 if (iy < 0) {
542 return;
543 }
544
545 #ifdef SK_DEBUG
546 {
547 int ix = x >> SHIFT;
548 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
549 }
550 #endif
551
552 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
553
554 // hack, until I figure out why my cubics (I think) go beyond the bounds
555 if (x < 0) {
556 width += x;
557 x = 0;
558 }
559
560 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
561
562 int start = x;
563 int stop = x + width;
564
565 SkASSERT(start >= 0 && stop > start);
566 int fb = start & MASK;
567 int fe = stop & MASK;
568 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
569
570
571 if (n < 0) {
572 SkASSERT(row >= fMask.fImage);
573 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
574 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
575 } else {
576 fb = SCALE - fb;
577 SkASSERT(row >= fMask.fImage);
578 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
579 add_aa_span(row, coverage_to_partial_alpha(fb),
580 n, coverage_to_partial_alpha(fe),
581 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
582 }
583
584 #ifdef SK_DEBUG
585 fCurrX = x + width;
586 #endif
587 }
588
589 ///////////////////////////////////////////////////////////////////////////////
590
591 static SkIRect safeRoundOut(const SkRect& src) {
592 // roundOut will pin huge floats to max/min int
593 SkIRect dst = src.roundOut();
594
595 // intersect with a smaller huge rect, so the rect will not be considered empty for being
596 // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
597 // exceeds signed 32bit.
598 const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
599 (void)dst.intersect({ -limit, -limit, limit, limit});
600
601 return dst;
602 }
603
604 constexpr int kSampleSize = 8;
605 #if !defined(SK_DISABLE_AAA)
606 #ifdef SK_BUILD_FOR_OHOS
607 constexpr SkScalar kComplexityThreshold = 8.0;
608 #else
609 constexpr SkScalar kComplexityThreshold = 0.25;
610 #endif
611 #endif
612
613 void compute_complexity(const SkPath& path, SkScalar& avgLength, SkScalar& complexity) {
614 int n = path.countPoints();
615 if (n < kSampleSize || path.getBounds().isEmpty()) {
616 // set to invalid value to indicate that we failed to compute
617 avgLength = complexity = -1;
618 return;
619 }
620
621 SkScalar sumLength = 0;
622 SkPoint lastPoint = path.getPoint(0);
623 for(int i = 1; i < kSampleSize; ++i) {
624 SkPoint point = path.getPoint(i);
625 sumLength += SkPoint::Distance(lastPoint, point);
626 lastPoint = point;
627 }
628 avgLength = sumLength / (kSampleSize - 1);
629
630 auto sqr = [](SkScalar x) { return x*x; };
631
632 SkScalar diagonalSqr = sqr(path.getBounds().width()) + sqr(path.getBounds().height());
633
634 // If the path consists of random line segments, the number of intersections should be
635 // proportional to this.
636 SkScalar intersections = sk_ieee_float_divide(sqr(n) * sqr(avgLength), diagonalSqr);
637
638 // The number of intersections per scanline should be proportional to this number.
639 complexity = sk_ieee_float_divide(intersections, path.getBounds().height());
640
641 if (sk_float_isnan(complexity)) { // it may be possible to have 0.0 / 0.0; inf is fine for us.
642 complexity = -1;
643 }
644 }
645
646 static bool ShouldUseAAA(const SkPath& path, SkScalar avgLength, SkScalar complexity) {
647 #if defined(SK_DISABLE_AAA)
648 return false;
649 #else
650 if (gSkForceAnalyticAA) {
651 return true;
652 }
653 if (!gSkUseAnalyticAA) {
654 return false;
655 }
656 if (path.isRect(nullptr)) {
657 return true;
658 }
659
660 #ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
661 const SkRect& bounds = path.getBounds();
662 // When the path have so many points compared to the size of its
663 // bounds/resolution, it indicates that the path is not quite smooth in
664 // the current resolution: the expected number of turning points in
665 // every pixel row/column is significantly greater than zero. Hence
666 // Aanlytic AA is not likely to produce visible quality improvements,
667 // and Analytic AA might be slower than supersampling.
668 return path.countPoints() < std::max(bounds.width(), bounds.height()) / 2 - 10;
669 #else
670 #ifndef SK_BUILD_FOR_OHOS
671 if (path.countPoints() >= path.getBounds().height()) {
672 // SAA is faster than AAA in this case even if there are no
673 // intersections because AAA will have too many scan lines. See
674 // skbug.com/8272
675 return false;
676 }
677 #endif
678 #ifdef SK_ENABLE_PATH_COMPLEXITY_DFX
679 SkPathComplexityDfx::AddPathComplexityTrace(complexity);
680 #endif
681 // We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
682 return complexity < kComplexityThreshold;
683 #endif
684 #endif
685 }
686
687 void SkScan::SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
688 const SkIRect& clipBounds, bool forceRLE) {
689 bool containedInClip = clipBounds.contains(ir);
690 bool isInverse = path.isInverseFillType();
691
692 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
693 // if we're an inverse filltype
694 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
695 MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
696 SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
697 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
698 } else {
699 SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
700 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
701 }
702 }
703
704 static int overflows_short_shift(int value, int shift) {
705 const int s = 16 + shift;
706 return (SkLeftShift(value, s) >> s) - value;
707 }
708
709 /**
710 Would any of the coordinates of this rectangle not fit in a short,
711 when left-shifted by shift?
712 */
713 static int rect_overflows_short_shift(SkIRect rect, int shift) {
714 SkASSERT(!overflows_short_shift(8191, shift));
715 SkASSERT(overflows_short_shift(8192, shift));
716 SkASSERT(!overflows_short_shift(32767, 0));
717 SkASSERT(overflows_short_shift(32768, 0));
718
719 // Since we expect these to succeed, we bit-or together
720 // for a tiny extra bit of speed.
721 return overflows_short_shift(rect.fLeft, shift) |
722 overflows_short_shift(rect.fRight, shift) |
723 overflows_short_shift(rect.fTop, shift) |
724 overflows_short_shift(rect.fBottom, shift);
725 }
726
727 void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
728 SkBlitter* blitter, bool forceRLE) {
729 if (origClip.isEmpty()) {
730 return;
731 }
732
733 const bool isInverse = path.isInverseFillType();
734 SkIRect ir = safeRoundOut(path.getBounds());
735 if (ir.isEmpty()) {
736 if (isInverse) {
737 blitter->blitRegion(origClip);
738 }
739 return;
740 }
741
742 // If the intersection of the path bounds and the clip bounds
743 // will overflow 32767 when << by SHIFT, we can't supersample,
744 // so draw without antialiasing.
745 SkIRect clippedIR;
746 if (isInverse) {
747 // If the path is an inverse fill, it's going to fill the entire
748 // clip, and we care whether the entire clip exceeds our limits.
749 clippedIR = origClip.getBounds();
750 } else {
751 if (!clippedIR.intersect(ir, origClip.getBounds())) {
752 return;
753 }
754 }
755 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
756 SkScan::FillPath(path, origClip, blitter);
757 return;
758 }
759
760 // Our antialiasing can't handle a clip larger than 32767, so we restrict
761 // the clip to that limit here. (the runs[] uses int16_t for its index).
762 //
763 // A more general solution (one that could also eliminate the need to
764 // disable aa based on ir bounds (see overflows_short_shift) would be
765 // to tile the clip/target...
766 SkRegion tmpClipStorage;
767 const SkRegion* clipRgn = &origClip;
768 {
769 static const int32_t kMaxClipCoord = 32767;
770 const SkIRect& bounds = origClip.getBounds();
771 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
772 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
773 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
774 clipRgn = &tmpClipStorage;
775 }
776 }
777 // for here down, use clipRgn, not origClip
778
779 SkScanClipper clipper(blitter, clipRgn, ir);
780
781 if (clipper.getBlitter() == nullptr) { // clipped out
782 if (isInverse) {
783 blitter->blitRegion(*clipRgn);
784 }
785 return;
786 }
787
788 SkASSERT(clipper.getClipRect() == nullptr ||
789 *clipper.getClipRect() == clipRgn->getBounds());
790
791 // now use the (possibly wrapped) blitter
792 blitter = clipper.getBlitter();
793
794 if (isInverse) {
795 sk_blit_above(blitter, ir, *clipRgn);
796 }
797
798 SkScalar avgLength, complexity;
799 compute_complexity(path, avgLength, complexity);
800
801 if (ShouldUseAAA(path, avgLength, complexity)) {
802 // Do not use AAA if path is too complicated:
803 // there won't be any speedup or significant visual improvement.
804 SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
805 } else {
806 SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
807 }
808
809 if (isInverse) {
810 sk_blit_below(blitter, ir, *clipRgn);
811 }
812 }
813
814 ///////////////////////////////////////////////////////////////////////////////
815
816 #include "src/core/SkRasterClip.h"
817
818 void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
819 if (clip.isEmpty() || !path.isFinite()) {
820 return;
821 }
822
823 if (clip.isBW()) {
824 FillPath(path, clip.bwRgn(), blitter);
825 } else {
826 SkRegion tmp;
827 SkAAClipBlitter aaBlitter;
828
829 tmp.setRect(clip.getBounds());
830 aaBlitter.init(blitter, &clip.aaRgn());
831 SkScan::FillPath(path, tmp, &aaBlitter);
832 }
833 }
834
835 void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* blitter) {
836 if (clip.isEmpty() || !path.isFinite()) {
837 return;
838 }
839
840 if (clip.isBW()) {
841 AntiFillPath(path, clip.bwRgn(), blitter, false);
842 } else {
843 SkRegion tmp;
844 SkAAClipBlitter aaBlitter;
845
846 tmp.setRect(clip.getBounds());
847 aaBlitter.init(blitter, &clip.aaRgn());
848 AntiFillPath(path, tmp, &aaBlitter, true); // SkAAClipBlitter can blitMask, why forceRLE?
849 }
850 }
851