1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ops/AAConvexPathRenderer.h"
9
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "src/core/SkGeometry.h"
13 #include "src/core/SkMatrixPriv.h"
14 #include "src/core/SkPathPriv.h"
15 #include "src/core/SkPointPriv.h"
16 #include "src/gpu/BufferWriter.h"
17 #include "src/gpu/GrAuditTrail.h"
18 #include "src/gpu/GrCaps.h"
19 #include "src/gpu/GrDrawOpTest.h"
20 #include "src/gpu/GrGeometryProcessor.h"
21 #include "src/gpu/GrProcessor.h"
22 #include "src/gpu/GrProgramInfo.h"
23 #include "src/gpu/geometry/GrPathUtils.h"
24 #include "src/gpu/geometry/GrStyledShape.h"
25 #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
26 #include "src/gpu/glsl/GrGLSLProgramDataManager.h"
27 #include "src/gpu/glsl/GrGLSLUniformHandler.h"
28 #include "src/gpu/glsl/GrGLSLVarying.h"
29 #include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
30 #include "src/gpu/ops/GrMeshDrawOp.h"
31 #include "src/gpu/ops/GrSimpleMeshDrawOpHelperWithStencil.h"
32 #include "src/gpu/v1/SurfaceDrawContext_v1.h"
33
34 namespace skgpu::v1 {
35
36 namespace {
37
38 struct Segment {
39 enum {
40 // These enum values are assumed in member functions below.
41 kLine = 0,
42 kQuad = 1,
43 } fType;
44
45 // line uses one pt, quad uses 2 pts
46 SkPoint fPts[2];
47 // normal to edge ending at each pt
48 SkVector fNorms[2];
49 // is the corner where the previous segment meets this segment
50 // sharp. If so, fMid is a normalized bisector facing outward.
51 SkVector fMid;
52
countPointsskgpu::v1::__anon18822::Segment53 int countPoints() {
54 static_assert(0 == kLine && 1 == kQuad);
55 return fType + 1;
56 }
endPtskgpu::v1::__anon18822::Segment57 const SkPoint& endPt() const {
58 static_assert(0 == kLine && 1 == kQuad);
59 return fPts[fType];
60 }
endNormskgpu::v1::__anon18822::Segment61 const SkPoint& endNorm() const {
62 static_assert(0 == kLine && 1 == kQuad);
63 return fNorms[fType];
64 }
65 };
66
67 typedef SkTArray<Segment, true> SegmentArray;
68
center_of_mass(const SegmentArray& segments, SkPoint* c)69 bool center_of_mass(const SegmentArray& segments, SkPoint* c) {
70 SkScalar area = 0;
71 SkPoint center = {0, 0};
72 int count = segments.count();
73 SkPoint p0 = {0, 0};
74 if (count > 2) {
75 // We translate the polygon so that the first point is at the origin.
76 // This avoids some precision issues with small area polygons far away
77 // from the origin.
78 p0 = segments[0].endPt();
79 SkPoint pi;
80 SkPoint pj;
81 // the first and last iteration of the below loop would compute
82 // zeros since the starting / ending point is (0,0). So instead we start
83 // at i=1 and make the last iteration i=count-2.
84 pj = segments[1].endPt() - p0;
85 for (int i = 1; i < count - 1; ++i) {
86 pi = pj;
87 pj = segments[i + 1].endPt() - p0;
88
89 SkScalar t = SkPoint::CrossProduct(pi, pj);
90 area += t;
91 center.fX += (pi.fX + pj.fX) * t;
92 center.fY += (pi.fY + pj.fY) * t;
93 }
94 }
95
96 // If the poly has no area then we instead return the average of
97 // its points.
98 if (SkScalarNearlyZero(area)) {
99 SkPoint avg;
100 avg.set(0, 0);
101 for (int i = 0; i < count; ++i) {
102 const SkPoint& pt = segments[i].endPt();
103 avg.fX += pt.fX;
104 avg.fY += pt.fY;
105 }
106 SkScalar denom = SK_Scalar1 / count;
107 avg.scale(denom);
108 *c = avg;
109 } else {
110 area *= 3;
111 area = SkScalarInvert(area);
112 center.scale(area);
113 // undo the translate of p0 to the origin.
114 *c = center + p0;
115 }
116 return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite();
117 }
118
compute_vectors(SegmentArray* segments, SkPoint* fanPt, SkPathFirstDirection dir, int* vCount, int* iCount)119 bool compute_vectors(SegmentArray* segments,
120 SkPoint* fanPt,
121 SkPathFirstDirection dir,
122 int* vCount,
123 int* iCount) {
124 if (!center_of_mass(*segments, fanPt)) {
125 return false;
126 }
127 int count = segments->count();
128
129 // Make the normals point towards the outside
130 SkPointPriv::Side normSide;
131 if (dir == SkPathFirstDirection::kCCW) {
132 normSide = SkPointPriv::kRight_Side;
133 } else {
134 normSide = SkPointPriv::kLeft_Side;
135 }
136
137 int64_t vCount64 = 0;
138 int64_t iCount64 = 0;
139 // compute normals at all points
140 for (int a = 0; a < count; ++a) {
141 Segment& sega = (*segments)[a];
142 int b = (a + 1) % count;
143 Segment& segb = (*segments)[b];
144
145 const SkPoint* prevPt = &sega.endPt();
146 int n = segb.countPoints();
147 for (int p = 0; p < n; ++p) {
148 segb.fNorms[p] = segb.fPts[p] - *prevPt;
149 segb.fNorms[p].normalize();
150 segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide);
151 prevPt = &segb.fPts[p];
152 }
153 if (Segment::kLine == segb.fType) {
154 vCount64 += 5;
155 iCount64 += 9;
156 } else {
157 vCount64 += 6;
158 iCount64 += 12;
159 }
160 }
161
162 // compute mid-vectors where segments meet. TODO: Detect shallow corners
163 // and leave out the wedges and close gaps by stitching segments together.
164 for (int a = 0; a < count; ++a) {
165 const Segment& sega = (*segments)[a];
166 int b = (a + 1) % count;
167 Segment& segb = (*segments)[b];
168 segb.fMid = segb.fNorms[0] + sega.endNorm();
169 segb.fMid.normalize();
170 // corner wedges
171 vCount64 += 4;
172 iCount64 += 6;
173 }
174 if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) {
175 return false;
176 }
177 *vCount = vCount64;
178 *iCount = iCount64;
179 return true;
180 }
181
182 struct DegenerateTestData {
DegenerateTestDataskgpu::v1::__anon18822::DegenerateTestData183 DegenerateTestData() { fStage = kInitial; }
isDegenerateskgpu::v1::__anon18822::DegenerateTestData184 bool isDegenerate() const { return kNonDegenerate != fStage; }
185 enum {
186 kInitial,
187 kPoint,
188 kLine,
189 kNonDegenerate
190 } fStage;
191 SkPoint fFirstPoint;
192 SkVector fLineNormal;
193 SkScalar fLineC;
194 };
195
196 static const SkScalar kClose = (SK_Scalar1 / 16);
197 static const SkScalar kCloseSqd = kClose * kClose;
198
update_degenerate_test(DegenerateTestData* data, const SkPoint& pt)199 void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
200 switch (data->fStage) {
201 case DegenerateTestData::kInitial:
202 data->fFirstPoint = pt;
203 data->fStage = DegenerateTestData::kPoint;
204 break;
205 case DegenerateTestData::kPoint:
206 if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) {
207 data->fLineNormal = pt - data->fFirstPoint;
208 data->fLineNormal.normalize();
209 data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal);
210 data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
211 data->fStage = DegenerateTestData::kLine;
212 }
213 break;
214 case DegenerateTestData::kLine:
215 if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
216 data->fStage = DegenerateTestData::kNonDegenerate;
217 }
218 break;
219 case DegenerateTestData::kNonDegenerate:
220 break;
221 default:
222 SK_ABORT("Unexpected degenerate test stage.");
223 }
224 }
225
get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir)226 inline bool get_direction(const SkPath& path, const SkMatrix& m, SkPathFirstDirection* dir) {
227 // At this point, we've already returned true from canDraw(), which checked that the path's
228 // direction could be determined, so this should just be fetching the cached direction.
229 // However, if perspective is involved, we're operating on a transformed path, which may no
230 // longer have a computable direction.
231 *dir = SkPathPriv::ComputeFirstDirection(path);
232 if (*dir == SkPathFirstDirection::kUnknown) {
233 return false;
234 }
235
236 // check whether m reverses the orientation
237 SkASSERT(!m.hasPerspective());
238 SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) -
239 m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY);
240 if (det2x2 < 0) {
241 *dir = SkPathPriv::OppositeFirstDirection(*dir);
242 }
243
244 return true;
245 }
246
add_line_to_segment(const SkPoint& pt, SegmentArray* segments)247 inline void add_line_to_segment(const SkPoint& pt, SegmentArray* segments) {
248 segments->push_back();
249 segments->back().fType = Segment::kLine;
250 segments->back().fPts[0] = pt;
251 }
252
add_quad_segment(const SkPoint pts[3], SegmentArray* segments)253 inline void add_quad_segment(const SkPoint pts[3], SegmentArray* segments) {
254 if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) {
255 if (pts[0] != pts[2]) {
256 add_line_to_segment(pts[2], segments);
257 }
258 } else {
259 segments->push_back();
260 segments->back().fType = Segment::kQuad;
261 segments->back().fPts[0] = pts[1];
262 segments->back().fPts[1] = pts[2];
263 }
264 }
265
add_cubic_segments(const SkPoint pts[4], SkPathFirstDirection dir, SegmentArray* segments)266 inline void add_cubic_segments(const SkPoint pts[4],
267 SkPathFirstDirection dir,
268 SegmentArray* segments) {
269 SkSTArray<15, SkPoint, true> quads;
270 GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads);
271 int count = quads.count();
272 for (int q = 0; q < count; q += 3) {
273 add_quad_segment(&quads[q], segments);
274 }
275 }
276
get_segments(const SkPath& path, const SkMatrix& m, SegmentArray* segments, SkPoint* fanPt, int* vCount, int* iCount)277 bool get_segments(const SkPath& path,
278 const SkMatrix& m,
279 SegmentArray* segments,
280 SkPoint* fanPt,
281 int* vCount,
282 int* iCount) {
283 SkPath::Iter iter(path, true);
284 // This renderer over-emphasizes very thin path regions. We use the distance
285 // to the path from the sample to compute coverage. Every pixel intersected
286 // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
287 // notice that the sample may be close to a very thin area of the path and
288 // thus should be very light. This is particularly egregious for degenerate
289 // line paths. We detect paths that are very close to a line (zero area) and
290 // draw nothing.
291 DegenerateTestData degenerateData;
292 SkPathFirstDirection dir;
293 if (!get_direction(path, m, &dir)) {
294 return false;
295 }
296
297 for (;;) {
298 SkPoint pts[4];
299 SkPath::Verb verb = iter.next(pts);
300 switch (verb) {
301 case SkPath::kMove_Verb:
302 m.mapPoints(pts, 1);
303 update_degenerate_test(°enerateData, pts[0]);
304 break;
305 case SkPath::kLine_Verb: {
306 if (!SkPathPriv::AllPointsEq(pts, 2)) {
307 m.mapPoints(&pts[1], 1);
308 update_degenerate_test(°enerateData, pts[1]);
309 add_line_to_segment(pts[1], segments);
310 }
311 break;
312 }
313 case SkPath::kQuad_Verb:
314 if (!SkPathPriv::AllPointsEq(pts, 3)) {
315 m.mapPoints(pts, 3);
316 update_degenerate_test(°enerateData, pts[1]);
317 update_degenerate_test(°enerateData, pts[2]);
318 add_quad_segment(pts, segments);
319 }
320 break;
321 case SkPath::kConic_Verb: {
322 if (!SkPathPriv::AllPointsEq(pts, 3)) {
323 m.mapPoints(pts, 3);
324 SkScalar weight = iter.conicWeight();
325 SkAutoConicToQuads converter;
326 const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f);
327 for (int i = 0; i < converter.countQuads(); ++i) {
328 update_degenerate_test(°enerateData, quadPts[2*i + 1]);
329 update_degenerate_test(°enerateData, quadPts[2*i + 2]);
330 add_quad_segment(quadPts + 2*i, segments);
331 }
332 }
333 break;
334 }
335 case SkPath::kCubic_Verb: {
336 if (!SkPathPriv::AllPointsEq(pts, 4)) {
337 m.mapPoints(pts, 4);
338 update_degenerate_test(°enerateData, pts[1]);
339 update_degenerate_test(°enerateData, pts[2]);
340 update_degenerate_test(°enerateData, pts[3]);
341 add_cubic_segments(pts, dir, segments);
342 }
343 break;
344 }
345 case SkPath::kDone_Verb:
346 if (degenerateData.isDegenerate()) {
347 return false;
348 } else {
349 return compute_vectors(segments, fanPt, dir, vCount, iCount);
350 }
351 default:
352 break;
353 }
354 }
355 }
356
357 struct Draw {
Drawskgpu::v1::__anon18822::Draw358 Draw() : fVertexCnt(0), fIndexCnt(0) {}
359 int fVertexCnt;
360 int fIndexCnt;
361 };
362
363 typedef SkTArray<Draw, true> DrawArray;
364
create_vertices(const SegmentArray& segments, const SkPoint& fanPt, const GrVertexColor& color, DrawArray* draws, VertexWriter& verts, uint16_t* idxs, size_t vertexStride)365 void create_vertices(const SegmentArray& segments,
366 const SkPoint& fanPt,
367 const GrVertexColor& color,
368 DrawArray* draws,
369 VertexWriter& verts,
370 uint16_t* idxs,
371 size_t vertexStride) {
372 Draw* draw = &draws->push_back();
373 // alias just to make vert/index assignments easier to read.
374 int* v = &draw->fVertexCnt;
375 int* i = &draw->fIndexCnt;
376
377 int count = segments.count();
378 for (int a = 0; a < count; ++a) {
379 const Segment& sega = segments[a];
380 int b = (a + 1) % count;
381 const Segment& segb = segments[b];
382
383 // Check whether adding the verts for this segment to the current draw would cause index
384 // values to overflow.
385 int vCount = 4;
386 if (Segment::kLine == segb.fType) {
387 vCount += 5;
388 } else {
389 vCount += 6;
390 }
391 if (draw->fVertexCnt + vCount > (1 << 16)) {
392 idxs += *i;
393 draw = &draws->push_back();
394 v = &draw->fVertexCnt;
395 i = &draw->fIndexCnt;
396 }
397
398 const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 };
399
400 // FIXME: These tris are inset in the 1 unit arc around the corner
401 SkPoint p0 = sega.endPt();
402 // Position, Color, UV, D0, D1
403 verts << p0 << color << SkPoint{0, 0} << negOneDists;
404 verts << (p0 + sega.endNorm()) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
405 verts << (p0 + segb.fMid) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
406 verts << (p0 + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
407
408 idxs[*i + 0] = *v + 0;
409 idxs[*i + 1] = *v + 2;
410 idxs[*i + 2] = *v + 1;
411 idxs[*i + 3] = *v + 0;
412 idxs[*i + 4] = *v + 3;
413 idxs[*i + 5] = *v + 2;
414
415 *v += 4;
416 *i += 6;
417
418 if (Segment::kLine == segb.fType) {
419 // we draw the line edge as a degenerate quad (u is 0, v is the
420 // signed distance to the edge)
421 SkPoint v1Pos = sega.endPt();
422 SkPoint v2Pos = segb.fPts[0];
423 SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos);
424
425 verts << fanPt << color << SkPoint{0, dist} << negOneDists;
426 verts << v1Pos << color << SkPoint{0, 0} << negOneDists;
427 verts << v2Pos << color << SkPoint{0, 0} << negOneDists;
428 verts << (v1Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
429 verts << (v2Pos + segb.fNorms[0]) << color << SkPoint{0, -SK_Scalar1} << negOneDists;
430
431 idxs[*i + 0] = *v + 3;
432 idxs[*i + 1] = *v + 1;
433 idxs[*i + 2] = *v + 2;
434
435 idxs[*i + 3] = *v + 4;
436 idxs[*i + 4] = *v + 3;
437 idxs[*i + 5] = *v + 2;
438
439 *i += 6;
440
441 // Draw the interior fan if it exists.
442 // TODO: Detect and combine colinear segments. This will ensure we catch every case
443 // with no interior, and that the resulting shared edge uses the same endpoints.
444 if (count >= 3) {
445 idxs[*i + 0] = *v + 0;
446 idxs[*i + 1] = *v + 2;
447 idxs[*i + 2] = *v + 1;
448
449 *i += 3;
450 }
451
452 *v += 5;
453 } else {
454 SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
455
456 SkScalar c0 = segb.fNorms[0].dot(qpts[0]);
457 SkScalar c1 = segb.fNorms[1].dot(qpts[2]);
458
459 // We must transform the positions into UV in cpu memory and then copy them to the gpu
460 // buffer. If we write the position first into the gpu buffer then calculate the UVs, it
461 // will cause us to read from the GPU buffer which can be very slow.
462 struct PosAndUV {
463 SkPoint fPos;
464 SkPoint fUV;
465 };
466 PosAndUV posAndUVPoints[6];
467 posAndUVPoints[0].fPos = fanPt;
468 posAndUVPoints[1].fPos = qpts[0];
469 posAndUVPoints[2].fPos = qpts[2];
470 posAndUVPoints[3].fPos = qpts[0] + segb.fNorms[0];
471 posAndUVPoints[4].fPos = qpts[2] + segb.fNorms[1];
472 SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
473 midVec.normalize();
474 posAndUVPoints[5].fPos = qpts[1] + midVec;
475
476 GrPathUtils::QuadUVMatrix toUV(qpts);
477 toUV.apply(posAndUVPoints, 6, sizeof(PosAndUV), sizeof(SkPoint));
478
479 verts << posAndUVPoints[0].fPos << color << posAndUVPoints[0].fUV
480 << (-segb.fNorms[0].dot(fanPt) + c0)
481 << (-segb.fNorms[1].dot(fanPt) + c1);
482
483 verts << posAndUVPoints[1].fPos << color << posAndUVPoints[1].fUV
484 << 0.0f
485 << (-segb.fNorms[1].dot(qpts[0]) + c1);
486
487 verts << posAndUVPoints[2].fPos << color << posAndUVPoints[2].fUV
488 << (-segb.fNorms[0].dot(qpts[2]) + c0)
489 << 0.0f;
490 // We need a negative value that is very large that it won't effect results if it is
491 // interpolated with. However, the value can't be too large of a negative that it
492 // effects numerical precision on less powerful GPUs.
493 static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000;
494 verts << posAndUVPoints[3].fPos << color << posAndUVPoints[3].fUV
495 << kStableLargeNegativeValue
496 << kStableLargeNegativeValue;
497
498 verts << posAndUVPoints[4].fPos << color << posAndUVPoints[4].fUV
499 << kStableLargeNegativeValue
500 << kStableLargeNegativeValue;
501
502 verts << posAndUVPoints[5].fPos << color << posAndUVPoints[5].fUV
503 << kStableLargeNegativeValue
504 << kStableLargeNegativeValue;
505
506 idxs[*i + 0] = *v + 3;
507 idxs[*i + 1] = *v + 1;
508 idxs[*i + 2] = *v + 2;
509 idxs[*i + 3] = *v + 4;
510 idxs[*i + 4] = *v + 3;
511 idxs[*i + 5] = *v + 2;
512
513 idxs[*i + 6] = *v + 5;
514 idxs[*i + 7] = *v + 3;
515 idxs[*i + 8] = *v + 4;
516
517 *i += 9;
518
519 // Draw the interior fan if it exists.
520 // TODO: Detect and combine colinear segments. This will ensure we catch every case
521 // with no interior, and that the resulting shared edge uses the same endpoints.
522 if (count >= 3) {
523 idxs[*i + 0] = *v + 0;
524 idxs[*i + 1] = *v + 2;
525 idxs[*i + 2] = *v + 1;
526
527 *i += 3;
528 }
529
530 *v += 6;
531 }
532 }
533 }
534
535 ///////////////////////////////////////////////////////////////////////////////
536
537 /*
538 * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
539 * two components of the vertex attribute. Coverage is based on signed
540 * distance with negative being inside, positive outside. The edge is specified in
541 * window space (y-down). If either the third or fourth component of the interpolated
542 * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
543 * attempt to trim to a portion of the infinite quad.
544 * Requires shader derivative instruction support.
545 */
546
547 class QuadEdgeEffect : public GrGeometryProcessor {
548 public:
Make(SkArenaAlloc* arena, const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)549 static GrGeometryProcessor* Make(SkArenaAlloc* arena,
550 const SkMatrix& localMatrix,
551 bool usesLocalCoords,
552 bool wideColor) {
553 return arena->make([&](void* ptr) {
554 return new (ptr) QuadEdgeEffect(localMatrix, usesLocalCoords, wideColor);
555 });
556 }
557
558 ~QuadEdgeEffect() override {}
559
560 const char* name() const override { return "QuadEdge"; }
561
562 SkString getShaderDfxInfo() const override {
563 SkString format;
564 format.printf("ShaderDfx_QuadEdgeEffect_%d_%d_%d_%d", fUsesLocalCoords,
565 fLocalMatrix.isIdentity(), fLocalMatrix.isScaleTranslate(), fLocalMatrix.hasPerspective());
566 return format;
567 }
568
569 void addToKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override {
570 b->addBool(fUsesLocalCoords, "usesLocalCoords");
571 b->addBits(ProgramImpl::kMatrixKeyBits,
572 ProgramImpl::ComputeMatrixKey(caps, fLocalMatrix),
573 "localMatrixType");
574 }
575
576 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
577
578 private:
QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)579 QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor)
580 : INHERITED(kQuadEdgeEffect_ClassID)
581 , fLocalMatrix(localMatrix)
582 , fUsesLocalCoords(usesLocalCoords) {
583 fInPosition = {"inPosition", kFloat2_GrVertexAttribType, kFloat2_GrSLType};
584 fInColor = MakeColorAttribute("inColor", wideColor);
585 // GL on iOS 14 needs more precision for the quadedge attributes
586 fInQuadEdge = {"inQuadEdge", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
587 this->setVertexAttributes(&fInPosition, 3);
588 }
589
590 Attribute fInPosition;
591 Attribute fInColor;
592 Attribute fInQuadEdge;
593
594 SkMatrix fLocalMatrix;
595 bool fUsesLocalCoords;
596
597 GR_DECLARE_GEOMETRY_PROCESSOR_TEST
598
599 using INHERITED = GrGeometryProcessor;
600 };
601
makeProgramImpl( const GrShaderCaps&) const602 std::unique_ptr<GrGeometryProcessor::ProgramImpl> QuadEdgeEffect::makeProgramImpl(
603 const GrShaderCaps&) const {
604 class Impl : public ProgramImpl {
605 public:
606 void setData(const GrGLSLProgramDataManager& pdman,
607 const GrShaderCaps& shaderCaps,
608 const GrGeometryProcessor& geomProc) override {
609 const QuadEdgeEffect& qe = geomProc.cast<QuadEdgeEffect>();
610 SetTransform(pdman, shaderCaps, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix);
611 }
612
613 private:
614 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
615 const QuadEdgeEffect& qe = args.fGeomProc.cast<QuadEdgeEffect>();
616 GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder;
617 GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder;
618 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
619 GrGLSLUniformHandler* uniformHandler = args.fUniformHandler;
620
621 // emit attributes
622 varyingHandler->emitAttributes(qe);
623
624 // GL on iOS 14 needs more precision for the quadedge attributes
625 // We might as well enable it everywhere
626 GrGLSLVarying v(kFloat4_GrSLType);
627 varyingHandler->addVarying("QuadEdge", &v);
628 vertBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.fInQuadEdge.name());
629
630 // Setup pass through color
631 fragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
632 varyingHandler->addPassThroughAttribute(qe.fInColor.asShaderVar(), args.fOutputColor);
633
634 // Setup position
635 WriteOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name());
636 if (qe.fUsesLocalCoords) {
637 WriteLocalCoord(vertBuilder,
638 uniformHandler,
639 *args.fShaderCaps,
640 gpArgs,
641 qe.fInPosition.asShaderVar(),
642 qe.fLocalMatrix,
643 &fLocalMatrixUniform);
644 }
645
646 fragBuilder->codeAppendf("half edgeAlpha;");
647
648 // keep the derivative instructions outside the conditional
649 fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));", v.fsIn());
650 fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));", v.fsIn());
651 fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
652 // today we know z and w are in device space. We could use derivatives
653 fragBuilder->codeAppendf("edgeAlpha = half(min(min(%s.z, %s.w) + 0.5, 1.0));", v.fsIn(),
654 v.fsIn());
655 fragBuilder->codeAppendf ("} else {");
656 fragBuilder->codeAppendf("half2 gF = half2(half(2.0*%s.x*duvdx.x - duvdx.y),"
657 " half(2.0*%s.x*duvdy.x - duvdy.y));",
658 v.fsIn(), v.fsIn());
659 fragBuilder->codeAppendf("edgeAlpha = half(%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
660 v.fsIn());
661 fragBuilder->codeAppendf("edgeAlpha = "
662 "saturate(0.5 - edgeAlpha / length(gF));}");
663
664 fragBuilder->codeAppendf("half4 %s = half4(edgeAlpha);", args.fOutputCoverage);
665 }
666
667 private:
668 SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
669
670 UniformHandle fLocalMatrixUniform;
671 };
672
673 return std::make_unique<Impl>();
674 }
675
676 GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
677
678 #if GR_TEST_UTILS
TestCreate(GrProcessorTestData* d)679 GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
680 SkMatrix localMatrix = GrTest::TestMatrix(d->fRandom);
681 bool usesLocalCoords = d->fRandom->nextBool();
682 bool wideColor = d->fRandom->nextBool();
683 // Doesn't work without derivative instructions.
684 return d->caps()->shaderCaps()->shaderDerivativeSupport()
685 ? QuadEdgeEffect::Make(d->allocator(), localMatrix, usesLocalCoords, wideColor)
686 : nullptr;
687 }
688 #endif
689
690 class AAConvexPathOp final : public GrMeshDrawOp {
691 private:
692 using Helper = GrSimpleMeshDrawOpHelperWithStencil;
693
694 public:
695 DEFINE_OP_CLASS_ID
696
Make(GrRecordingContext* context, GrPaint&& paint, const SkMatrix& viewMatrix, const SkPath& path, const GrUserStencilSettings* stencilSettings)697 static GrOp::Owner Make(GrRecordingContext* context,
698 GrPaint&& paint,
699 const SkMatrix& viewMatrix,
700 const SkPath& path,
701 const GrUserStencilSettings* stencilSettings) {
702 return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path,
703 stencilSettings);
704 }
705
AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color, const SkMatrix& viewMatrix, const SkPath& path, const GrUserStencilSettings* stencilSettings)706 AAConvexPathOp(GrProcessorSet* processorSet, const SkPMColor4f& color,
707 const SkMatrix& viewMatrix, const SkPath& path,
708 const GrUserStencilSettings* stencilSettings)
709 : INHERITED(ClassID()), fHelper(processorSet, GrAAType::kCoverage, stencilSettings) {
710 fPaths.emplace_back(PathData{viewMatrix, path, color});
711 this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
712 IsHairline::kNo);
713 }
714
715 const char* name() const override { return "AAConvexPathOp"; }
716
717 void visitProxies(const GrVisitProxyFunc& func) const override {
718 if (fProgramInfo) {
719 fProgramInfo->visitFPProxies(func);
720 } else {
721 fHelper.visitProxies(func);
722 }
723 }
724
725 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
726
727 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
728 GrClampType clampType) override {
729 return fHelper.finalizeProcessors(
730 caps, clip, clampType, GrProcessorAnalysisCoverage::kSingleChannel,
731 &fPaths.back().fColor, &fWideColor);
732 }
733
734 private:
735 GrProgramInfo* programInfo() override { return fProgramInfo; }
736
737 void onCreateProgramInfo(const GrCaps* caps,
738 SkArenaAlloc* arena,
739 const GrSurfaceProxyView& writeView,
740 bool usesMSAASurface,
741 GrAppliedClip&& appliedClip,
742 const GrDstProxyView& dstProxyView,
743 GrXferBarrierFlags renderPassXferBarriers,
744 GrLoadOp colorLoadOp) override {
745 SkMatrix invert;
746 if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) {
747 return;
748 }
749
750 GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert,
751 fHelper.usesLocalCoords(),
752 fWideColor);
753
754 fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, usesMSAASurface,
755 std::move(appliedClip),
756 dstProxyView, quadProcessor,
757 GrPrimitiveType::kTriangles,
758 renderPassXferBarriers, colorLoadOp);
759 }
760
761 void onPrepareDraws(GrMeshDrawTarget* target) override {
762 int instanceCount = fPaths.count();
763
764 if (!fProgramInfo) {
765 this->createProgramInfo(target);
766 if (!fProgramInfo) {
767 return;
768 }
769 }
770
771 const size_t kVertexStride = fProgramInfo->geomProc().vertexStride();
772
773 fDraws.reserve(instanceCount);
774
775 // TODO generate all segments for all paths and use one vertex buffer
776 for (int i = 0; i < instanceCount; i++) {
777 const PathData& args = fPaths[i];
778
779 // We use the fact that SkPath::transform path does subdivision based on
780 // perspective. Otherwise, we apply the view matrix when copying to the
781 // segment representation.
782 const SkMatrix* viewMatrix = &args.fViewMatrix;
783
784 // We avoid initializing the path unless we have to
785 const SkPath* pathPtr = &args.fPath;
786 SkTLazy<SkPath> tmpPath;
787 if (viewMatrix->hasPerspective()) {
788 SkPath* tmpPathPtr = tmpPath.init(*pathPtr);
789 tmpPathPtr->setIsVolatile(true);
790 tmpPathPtr->transform(*viewMatrix);
791 viewMatrix = &SkMatrix::I();
792 pathPtr = tmpPathPtr;
793 }
794
795 int vertexCount;
796 int indexCount;
797 enum {
798 kPreallocSegmentCnt = 512 / sizeof(Segment),
799 kPreallocDrawCnt = 4,
800 };
801 SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
802 SkPoint fanPt;
803
804 if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount,
805 &indexCount)) {
806 continue;
807 }
808
809 sk_sp<const GrBuffer> vertexBuffer;
810 int firstVertex;
811
812 VertexWriter verts{target->makeVertexSpace(kVertexStride,
813 vertexCount,
814 &vertexBuffer,
815 &firstVertex)};
816
817 if (!verts) {
818 SkDebugf("Could not allocate vertices\n");
819 return;
820 }
821
822 sk_sp<const GrBuffer> indexBuffer;
823 int firstIndex;
824
825 uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
826 if (!idxs) {
827 SkDebugf("Could not allocate indices\n");
828 return;
829 }
830
831 SkSTArray<kPreallocDrawCnt, Draw, true> draws;
832 GrVertexColor color(args.fColor, fWideColor);
833 create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride);
834
835 GrSimpleMesh* meshes = target->allocMeshes(draws.count());
836 for (int j = 0; j < draws.count(); ++j) {
837 const Draw& draw = draws[j];
838 meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0,
839 draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer,
840 firstVertex);
841 firstIndex += draw.fIndexCnt;
842 firstVertex += draw.fVertexCnt;
843 }
844
845 fDraws.push_back({ meshes, draws.count() });
846 }
847 }
848
849 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
850 if (!fProgramInfo || fDraws.isEmpty()) {
851 return;
852 }
853
854 flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds);
855 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
856 for (int i = 0; i < fDraws.count(); ++i) {
857 for (int j = 0; j < fDraws[i].fMeshCount; ++j) {
858 flushState->drawMesh(fDraws[i].fMeshes[j]);
859 }
860 }
861 }
862
863 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
864 AAConvexPathOp* that = t->cast<AAConvexPathOp>();
865 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) {
866 return CombineResult::kCannotCombine;
867 }
868 if (fHelper.usesLocalCoords() &&
869 !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) {
870 return CombineResult::kCannotCombine;
871 }
872
873 fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
874 fWideColor |= that->fWideColor;
875 return CombineResult::kMerged;
876 }
877
878 #if GR_TEST_UTILS
879 SkString onDumpInfo() const override {
880 return SkStringPrintf("Count: %d\n%s", fPaths.count(), fHelper.dumpInfo().c_str());
881 }
882 #endif
883
884 struct PathData {
885 SkMatrix fViewMatrix;
886 SkPath fPath;
887 SkPMColor4f fColor;
888 };
889
890 Helper fHelper;
891 SkSTArray<1, PathData, true> fPaths;
892 bool fWideColor;
893
894 struct MeshDraw {
895 GrSimpleMesh* fMeshes;
896 int fMeshCount;
897 };
898
899 SkTDArray<MeshDraw> fDraws;
900 GrProgramInfo* fProgramInfo = nullptr;
901
902 using INHERITED = GrMeshDrawOp;
903 };
904
905 } // anonymous namespace
906
907 ///////////////////////////////////////////////////////////////////////////////
908
onCanDrawPath(const CanDrawPathArgs& args) const909 PathRenderer::CanDrawPath AAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
910 // This check requires convexity and known direction, since the direction is used to build
911 // the geometry segments. Degenerate convex paths will fall through to some other path renderer.
912 if (args.fCaps->shaderCaps()->shaderDerivativeSupport() &&
913 (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() &&
914 !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() &&
915 args.fShape->knownDirection()) {
916 return CanDrawPath::kYes;
917 }
918 return CanDrawPath::kNo;
919 }
920
onDrawPath(const DrawPathArgs& args)921 bool AAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
922 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
923 "AAConvexPathRenderer::onDrawPath");
924 SkASSERT(args.fSurfaceDrawContext->numSamples() <= 1);
925 SkASSERT(!args.fShape->isEmpty());
926
927 SkPath path;
928 args.fShape->asPath(&path);
929
930 GrOp::Owner op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint),
931 *args.fViewMatrix,
932 path, args.fUserStencilSettings);
933 args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
934 return true;
935 }
936
937 } // namespace skgpu::v1
938
939 #if GR_TEST_UTILS
940
941 GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) {
942 SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
943 const SkPath& path = GrTest::TestPathConvex(random);
944 const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context);
945 return skgpu::v1::AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path,
946 stencilSettings);
947 }
948
949 #endif
950