xref: /third_party/skia/src/gpu/vk/GrVkImage.cpp (revision cb93a386)
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/vk/GrVkImage.h"
9
10#include "src/gpu/vk/GrVkGpu.h"
11#include "src/gpu/vk/GrVkImageView.h"
12#include "src/gpu/vk/GrVkMemory.h"
13#include "src/gpu/vk/GrVkTexture.h"
14#include "src/gpu/vk/GrVkUtil.h"
15
16#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
17constexpr uint32_t VKIMAGE_LIMIT_SIZE = 10000 * 10000; // Vk-Image Size need less than 10000*10000
18
19sk_sp<GrVkImage> GrVkImage::MakeStencil(GrVkGpu* gpu,
20                                        SkISize dimensions,
21                                        int sampleCnt,
22                                        VkFormat format) {
23    VkImageUsageFlags vkUsageFlags =
24            VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
25    return GrVkImage::Make(gpu,
26                           dimensions,
27                           UsageFlags::kStencilAttachment,
28                           sampleCnt,
29                           format,
30                           /*mipLevels=*/1,
31                           vkUsageFlags,
32                           GrProtected::kNo,
33                           GrMemoryless::kNo,
34                           SkBudgeted::kYes);
35}
36
37sk_sp<GrVkImage> GrVkImage::MakeMSAA(GrVkGpu* gpu,
38                                     SkISize dimensions,
39                                     int numSamples,
40                                     VkFormat format,
41                                     GrProtected isProtected,
42                                     GrMemoryless memoryless) {
43    SkASSERT(numSamples > 1);
44
45    VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
46    if (memoryless == GrMemoryless::kYes) {
47        vkUsageFlags |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
48    } else {
49        vkUsageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
50    }
51    return GrVkImage::Make(gpu,
52                           dimensions,
53                           UsageFlags::kColorAttachment,
54                           numSamples,
55                           format,
56                           /*mipLevels=*/1,
57                           vkUsageFlags,
58                           isProtected,
59                           memoryless,
60                           SkBudgeted::kYes);
61}
62
63sk_sp<GrVkImage> GrVkImage::MakeTexture(GrVkGpu* gpu,
64                                        SkISize dimensions,
65                                        VkFormat format,
66                                        uint32_t mipLevels,
67                                        GrRenderable renderable,
68                                        int numSamples,
69                                        SkBudgeted budgeted,
70                                        GrProtected isProtected) {
71    UsageFlags usageFlags = UsageFlags::kTexture;
72    VkImageUsageFlags vkUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
73                                     VK_IMAGE_USAGE_TRANSFER_DST_BIT;
74    if (renderable == GrRenderable::kYes) {
75        usageFlags |= UsageFlags::kColorAttachment;
76        vkUsageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
77        // We always make our render targets support being used as input attachments
78        vkUsageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
79    }
80
81    return GrVkImage::Make(gpu,
82                           dimensions,
83                           usageFlags,
84                           numSamples,
85                           format,
86                           mipLevels,
87                           vkUsageFlags,
88                           isProtected,
89                           GrMemoryless::kNo,
90                           budgeted);
91}
92
93static bool make_views(GrVkGpu* gpu,
94                       const GrVkImageInfo& info,
95                       GrAttachment::UsageFlags attachmentUsages,
96                       sk_sp<const GrVkImageView>* framebufferView,
97                       sk_sp<const GrVkImageView>* textureView) {
98    GrVkImageView::Type viewType;
99    if (attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) {
100        // If we have stencil usage then we shouldn't have any other usages
101        SkASSERT(attachmentUsages == GrAttachment::UsageFlags::kStencilAttachment);
102        viewType = GrVkImageView::kStencil_Type;
103    } else {
104        viewType = GrVkImageView::kColor_Type;
105    }
106
107    if (SkToBool(attachmentUsages & GrAttachment::UsageFlags::kStencilAttachment) ||
108        SkToBool(attachmentUsages & GrAttachment::UsageFlags::kColorAttachment)) {
109        // Attachments can only have a mip level of 1
110        *framebufferView = GrVkImageView::Make(
111                gpu, info.fImage, info.fFormat, viewType, 1, info.fYcbcrConversionInfo);
112        if (!*framebufferView) {
113            return false;
114        }
115    }
116
117    if (attachmentUsages & GrAttachment::UsageFlags::kTexture) {
118        *textureView = GrVkImageView::Make(gpu,
119                                           info.fImage,
120                                           info.fFormat,
121                                           viewType,
122                                           info.fLevelCount,
123                                           info.fYcbcrConversionInfo);
124        if (!*textureView) {
125            return false;
126        }
127    }
128    return true;
129}
130
131sk_sp<GrVkImage> GrVkImage::Make(GrVkGpu* gpu,
132                                 SkISize dimensions,
133                                 UsageFlags attachmentUsages,
134                                 int sampleCnt,
135                                 VkFormat format,
136                                 uint32_t mipLevels,
137                                 VkImageUsageFlags vkUsageFlags,
138                                 GrProtected isProtected,
139                                 GrMemoryless memoryless,
140                                 SkBudgeted budgeted) {
141    GrVkImage::ImageDesc imageDesc;
142    imageDesc.fImageType = VK_IMAGE_TYPE_2D;
143    imageDesc.fFormat = format;
144    imageDesc.fWidth = dimensions.width();
145    imageDesc.fHeight = dimensions.height();
146    imageDesc.fLevels = mipLevels;
147    imageDesc.fSamples = sampleCnt;
148    imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
149    imageDesc.fUsageFlags = vkUsageFlags;
150    imageDesc.fIsProtected = isProtected;
151
152    GrVkImageInfo info;
153    if (!GrVkImage::InitImageInfo(gpu, imageDesc, &info)) {
154        return nullptr;
155    }
156
157    sk_sp<const GrVkImageView> framebufferView;
158    sk_sp<const GrVkImageView> textureView;
159    if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
160        GrVkImage::DestroyImageInfo(gpu, &info);
161        return nullptr;
162    }
163
164    sk_sp<GrBackendSurfaceMutableStateImpl> mutableState(
165            new GrBackendSurfaceMutableStateImpl(info.fImageLayout, info.fCurrentQueueFamily));
166    return sk_sp<GrVkImage>(new GrVkImage(gpu,
167                                          dimensions,
168                                          attachmentUsages,
169                                          info,
170                                          std::move(mutableState),
171                                          std::move(framebufferView),
172                                          std::move(textureView),
173                                          budgeted));
174}
175
176sk_sp<GrVkImage> GrVkImage::MakeWrapped(GrVkGpu* gpu,
177                                        SkISize dimensions,
178                                        const GrVkImageInfo& info,
179                                        sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
180                                        UsageFlags attachmentUsages,
181                                        GrWrapOwnership ownership,
182                                        GrWrapCacheable cacheable,
183                                        bool forSecondaryCB) {
184    sk_sp<const GrVkImageView> framebufferView;
185    sk_sp<const GrVkImageView> textureView;
186    if (!forSecondaryCB) {
187        if (!make_views(gpu, info, attachmentUsages, &framebufferView, &textureView)) {
188            return nullptr;
189        }
190    }
191
192    GrBackendObjectOwnership backendOwnership = kBorrow_GrWrapOwnership == ownership
193                                                        ? GrBackendObjectOwnership::kBorrowed
194                                                        : GrBackendObjectOwnership::kOwned;
195
196    return sk_sp<GrVkImage>(new GrVkImage(gpu,
197                                          dimensions,
198                                          attachmentUsages,
199                                          info,
200                                          std::move(mutableState),
201                                          std::move(framebufferView),
202                                          std::move(textureView),
203                                          backendOwnership,
204                                          cacheable,
205                                          forSecondaryCB));
206}
207
208// OH ISSUE: Integrate Destroy and Free
209void GrVkImage::DestroyAndFreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc, const VkImage& image)
210{
211    VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
212    GrVkMemory::FreeImageMemory(gpu, alloc);
213}
214
215GrVkImage::GrVkImage(GrVkGpu* gpu,
216                     SkISize dimensions,
217                     UsageFlags supportedUsages,
218                     const GrVkImageInfo& info,
219                     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
220                     sk_sp<const GrVkImageView> framebufferView,
221                     sk_sp<const GrVkImageView> textureView,
222                     SkBudgeted budgeted)
223        : GrAttachment(gpu,
224                       dimensions,
225                       supportedUsages,
226                       info.fSampleCount,
227                       info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
228                       info.fProtected,
229                       info.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag ? GrMemoryless::kYes
230                                                                             : GrMemoryless::kNo)
231        , fInfo(info)
232        , fInitialQueueFamily(info.fCurrentQueueFamily)
233        , fMutableState(std::move(mutableState))
234        , fFramebufferView(std::move(framebufferView))
235        , fTextureView(std::move(textureView))
236#ifdef SKIA_OHOS
237        , fBudgeted(budgeted)
238#endif
239        , fIsBorrowed(false) {
240    this->init(gpu, false);
241    this->setRealAlloc(true); // OH ISSUE: set real alloc flag
242    this->setRealAllocSize(dimensions.height() * dimensions.width() * 4); // OH ISSUE: set real alloc size
243    this->registerWithCache(budgeted);
244}
245
246GrVkImage::GrVkImage(GrVkGpu* gpu,
247                     SkISize dimensions,
248                     UsageFlags supportedUsages,
249                     const GrVkImageInfo& info,
250                     sk_sp<GrBackendSurfaceMutableStateImpl> mutableState,
251                     sk_sp<const GrVkImageView> framebufferView,
252                     sk_sp<const GrVkImageView> textureView,
253                     GrBackendObjectOwnership ownership,
254                     GrWrapCacheable cacheable,
255                     bool forSecondaryCB)
256        : GrAttachment(gpu,
257                       dimensions,
258                       supportedUsages,
259                       info.fSampleCount,
260                       info.fLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo,
261                       info.fProtected)
262        , fInfo(info)
263        , fInitialQueueFamily(info.fCurrentQueueFamily)
264        , fMutableState(std::move(mutableState))
265        , fFramebufferView(std::move(framebufferView))
266        , fTextureView(std::move(textureView))
267        , fIsBorrowed(GrBackendObjectOwnership::kBorrowed == ownership) {
268    this->init(gpu, forSecondaryCB);
269    this->registerWithCacheWrapped(cacheable);
270}
271
272void GrVkImage::init(GrVkGpu* gpu, bool forSecondaryCB) {
273    SkASSERT(fMutableState->getImageLayout() == fInfo.fImageLayout);
274    SkASSERT(fMutableState->getQueueFamilyIndex() == fInfo.fCurrentQueueFamily);
275#ifdef SK_DEBUG
276    if (fInfo.fImageUsageFlags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
277        SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT));
278    } else {
279        if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
280            SkASSERT(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
281            SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
282                     !SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
283        } else {
284            SkASSERT(!SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT));
285            SkASSERT(SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) &&
286                     SkToBool(fInfo.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
287        }
288    }
289    // We can't transfer from the non graphics queue to the graphics queue since we can't
290    // release the image from the original queue without having that queue. This limits us in terms
291    // of the types of queue indices we can handle.
292    if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
293        fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
294        fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
295        if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
296            if (fInfo.fCurrentQueueFamily != gpu->queueIndex()) {
297                SkASSERT(false);
298            }
299        } else {
300            SkASSERT(false);
301        }
302    }
303#endif
304    if (forSecondaryCB) {
305        fResource = nullptr;
306    } else if (fIsBorrowed) {
307        fResource = new BorrowedResource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
308    } else {
309        SkASSERT(VK_NULL_HANDLE != fInfo.fAlloc.fMemory);
310        fResource = new Resource(gpu, fInfo.fImage, fInfo.fAlloc, fInfo.fImageTiling);
311    }
312}
313
314VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
315    if (VK_IMAGE_LAYOUT_GENERAL == layout) {
316        return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
317    } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
318               VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
319        return VK_PIPELINE_STAGE_TRANSFER_BIT;
320    } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
321        return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
322    } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
323               VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
324        return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
325    } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
326        return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
327    } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
328        return VK_PIPELINE_STAGE_HOST_BIT;
329    } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
330        return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
331    }
332
333    SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
334    return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
335}
336
337VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
338    // Currently we assume we will never being doing any explict shader writes (this doesn't include
339    // color attachment or depth/stencil writes). So we will ignore the
340    // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
341
342    // We can only directly access the host memory if we are in preinitialized or general layout,
343    // and the image is linear.
344    // TODO: Add check for linear here so we are not always adding host to general, and we should
345    //       only be in preinitialized if we are linear
346    VkAccessFlags flags = 0;
347    if (VK_IMAGE_LAYOUT_GENERAL == layout) {
348        flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
349                VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
350                VK_ACCESS_TRANSFER_WRITE_BIT |
351                VK_ACCESS_HOST_WRITE_BIT;
352    } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
353        flags = VK_ACCESS_HOST_WRITE_BIT;
354    } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
355        flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
356    } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
357        flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
358    } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
359        flags = VK_ACCESS_TRANSFER_WRITE_BIT;
360    } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
361               VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
362               VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
363        // There are no writes that need to be made available
364        flags = 0;
365    }
366    return flags;
367}
368
369VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
370    switch (format) {
371        case VK_FORMAT_S8_UINT:
372            return VK_IMAGE_ASPECT_STENCIL_BIT;
373        case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
374        case VK_FORMAT_D32_SFLOAT_S8_UINT:
375            return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
376        default:
377            return VK_IMAGE_ASPECT_COLOR_BIT;
378    }
379}
380
381void GrVkImage::setImageLayoutAndQueueIndex(const GrVkGpu* gpu,
382                                            VkImageLayout newLayout,
383                                            VkAccessFlags dstAccessMask,
384                                            VkPipelineStageFlags dstStageMask,
385                                            bool byRegion,
386                                            uint32_t newQueueFamilyIndex) {
387// Enable the following block to test new devices to confirm their lazy images stay at 0 memory use.
388#if 0
389    if (fInfo.fAlloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag) {
390        VkDeviceSize size;
391        VK_CALL(gpu, GetDeviceMemoryCommitment(gpu->device(), fInfo.fAlloc.fMemory, &size));
392
393        SkDebugf("Lazy Image. This: %p, image: %d, size: %d\n", this, fInfo.fImage, size);
394    }
395#endif
396    SkASSERT(!gpu->isDeviceLost());
397    SkASSERT(newLayout == this->currentLayout() ||
398             (VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
399              VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout));
400    VkImageLayout currentLayout = this->currentLayout();
401    uint32_t currentQueueIndex = this->currentQueueFamilyIndex();
402
403#ifdef SK_DEBUG
404    if (fInfo.fSharingMode == VK_SHARING_MODE_CONCURRENT) {
405        if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
406            SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
407                     currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
408                     currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
409        } else {
410            SkASSERT(newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
411                     newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT);
412            SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED);
413        }
414    } else {
415        SkASSERT(fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE);
416        if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED ||
417            currentQueueIndex == gpu->queueIndex()) {
418            SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
419                     currentQueueIndex == VK_QUEUE_FAMILY_EXTERNAL ||
420                     currentQueueIndex == VK_QUEUE_FAMILY_FOREIGN_EXT ||
421                     currentQueueIndex == gpu->queueIndex());
422        } else if (newQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
423                   newQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) {
424            SkASSERT(currentQueueIndex == VK_QUEUE_FAMILY_IGNORED ||
425                     currentQueueIndex == gpu->queueIndex());
426        }
427    }
428#endif
429
430    if (fInfo.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
431        if (newQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) {
432            newQueueFamilyIndex = gpu->queueIndex();
433        }
434        if (currentQueueIndex == VK_QUEUE_FAMILY_IGNORED) {
435            currentQueueIndex = gpu->queueIndex();
436        }
437    }
438
439    // If the old and new layout are the same and the layout is a read only layout, there is no need
440    // to put in a barrier unless we also need to switch queues.
441    if (newLayout == currentLayout && currentQueueIndex == newQueueFamilyIndex &&
442        (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
443         VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
444         VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
445        return;
446    }
447
448    VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
449    VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
450
451    VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
452
453    VkImageMemoryBarrier imageMemoryBarrier = {
454        VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
455        nullptr,                                         // pNext
456        srcAccessMask,                                   // srcAccessMask
457        dstAccessMask,                                   // dstAccessMask
458        currentLayout,                                   // oldLayout
459        newLayout,                                       // newLayout
460        currentQueueIndex,                               // srcQueueFamilyIndex
461        newQueueFamilyIndex,                             // dstQueueFamilyIndex
462        fInfo.fImage,                                    // image
463        { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
464    };
465    SkASSERT(srcAccessMask == imageMemoryBarrier.srcAccessMask);
466    gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
467                               &imageMemoryBarrier);
468
469    this->updateImageLayout(newLayout);
470    this->setQueueFamilyIndex(newQueueFamilyIndex);
471}
472
473bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
474    if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
475        return false;
476    }
477    if ((imageDesc.fIsProtected == GrProtected::kYes) && !gpu->vkCaps().supportsProtectedMemory()) {
478        return false;
479    }
480
481    bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
482    VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
483                                           : VK_IMAGE_LAYOUT_UNDEFINED;
484
485    // Create Image
486    VkSampleCountFlagBits vkSamples;
487    if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
488        return false;
489    }
490
491    SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
492             VK_SAMPLE_COUNT_1_BIT == vkSamples);
493
494    VkImageCreateFlags createflags = 0;
495    if (imageDesc.fIsProtected == GrProtected::kYes || gpu->protectedContext()) {
496        createflags |= VK_IMAGE_CREATE_PROTECTED_BIT;
497    }
498    const VkImageCreateInfo imageCreateInfo = {
499        VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
500        nullptr,                                     // pNext
501        createflags,                                 // VkImageCreateFlags
502        imageDesc.fImageType,                        // VkImageType
503        imageDesc.fFormat,                           // VkFormat
504        { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
505        imageDesc.fLevels,                           // mipLevels
506        1,                                           // arrayLayers
507        vkSamples,                                   // samples
508        imageDesc.fImageTiling,                      // VkImageTiling
509        imageDesc.fUsageFlags,                       // VkImageUsageFlags
510        VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
511        0,                                           // queueFamilyCount
512        nullptr,                                     // pQueueFamilyIndices
513        initialLayout                                // initialLayout
514    };
515
516    VkImage image = VK_NULL_HANDLE;
517    VkResult result;
518    if (imageDesc.fWidth * imageDesc.fHeight > VKIMAGE_LIMIT_SIZE) {
519        SkDebugf("GrVkImage::InitImageInfoInner failed, image is too large, width:%u, height::%u",
520            imageDesc.fWidth, imageDesc.fHeight);
521        return false;
522    }
523    GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
524    if (result != VK_SUCCESS) {
525        return false;
526    }
527
528    GrMemoryless memoryless = imageDesc.fUsageFlags & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
529                                      ? GrMemoryless::kYes
530                                      : GrMemoryless::kNo;
531    GrVkAlloc alloc;
532    if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, memoryless, &alloc,
533        imageDesc.fWidth * imageDesc.fHeight * 4) ||
534        (memoryless == GrMemoryless::kYes &&
535         !SkToBool(alloc.fFlags & GrVkAlloc::kLazilyAllocated_Flag))) {
536        VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
537        return false;
538    }
539
540    info->fImage = image;
541    info->fAlloc = alloc;
542    info->fImageTiling = imageDesc.fImageTiling;
543    info->fImageLayout = initialLayout;
544    info->fFormat = imageDesc.fFormat;
545    info->fImageUsageFlags = imageDesc.fUsageFlags;
546    info->fSampleCount = imageDesc.fSamples;
547    info->fLevelCount = imageDesc.fLevels;
548    info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
549    info->fProtected =
550            (createflags & VK_IMAGE_CREATE_PROTECTED_BIT) ? GrProtected::kYes : GrProtected::kNo;
551    info->fSharingMode = VK_SHARING_MODE_EXCLUSIVE;
552    return true;
553}
554
555void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
556    DestroyAndFreeImageMemory(gpu, info->fAlloc, info->fImage);
557}
558
559GrVkImage::~GrVkImage() {
560    // should have been released first
561    SkASSERT(!fResource);
562    SkASSERT(!fFramebufferView);
563    SkASSERT(!fTextureView);
564}
565
566void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
567    VkImageLayout layout = this->currentLayout();
568    if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
569        fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
570        if (gpu->vkCaps().supportsSwapchain()) {
571            layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
572        }
573    }
574    this->setImageLayoutAndQueueIndex(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
575                                      fInitialQueueFamily);
576}
577
578void GrVkImage::prepareForExternal(GrVkGpu* gpu) {
579    this->setImageLayoutAndQueueIndex(gpu, this->currentLayout(), 0,
580                                      VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false,
581                                     fInitialQueueFamily);
582}
583
584void GrVkImage::releaseImage() {
585    if (fResource) {
586        fResource->unref();
587        fResource = nullptr;
588    }
589    fFramebufferView.reset();
590    fTextureView.reset();
591    fCachedBlendingInputDescSet.reset();
592    fCachedMSAALoadInputDescSet.reset();
593}
594
595void GrVkImage::onRelease() {
596    this->releaseImage();
597    GrAttachment::onRelease();
598}
599
600void GrVkImage::onAbandon() {
601    this->releaseImage();
602    GrAttachment::onAbandon();
603}
604
605void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
606    SkASSERT(fResource);
607    // Forward the release proc on to GrVkImage::Resource
608    fResource->setRelease(std::move(releaseHelper));
609}
610
611void GrVkImage::Resource::freeGPUData() const {
612    this->invokeReleaseProc();
613
614    // OH ISSUE: asyn memory reclaimer
615    auto reclaimer = fGpu->memoryReclaimer();
616    if (reclaimer && reclaimer->addMemoryToWaitQueue(fGpu, fAlloc, fImage)) {
617        return;
618    }
619
620    DestroyAndFreeImageMemory(fGpu, fAlloc, fImage);
621}
622
623void GrVkImage::BorrowedResource::freeGPUData() const {
624    this->invokeReleaseProc();
625}
626
627static void write_input_desc_set(GrVkGpu* gpu,
628                                 VkImageView view,
629                                 VkImageLayout layout,
630                                 VkDescriptorSet descSet) {
631    VkDescriptorImageInfo imageInfo;
632    memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
633    imageInfo.sampler = VK_NULL_HANDLE;
634    imageInfo.imageView = view;
635    imageInfo.imageLayout = layout;
636
637    VkWriteDescriptorSet writeInfo;
638    memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
639    writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
640    writeInfo.pNext = nullptr;
641    writeInfo.dstSet = descSet;
642    writeInfo.dstBinding = GrVkUniformHandler::kInputBinding;
643    writeInfo.dstArrayElement = 0;
644    writeInfo.descriptorCount = 1;
645    writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
646    writeInfo.pImageInfo = &imageInfo;
647    writeInfo.pBufferInfo = nullptr;
648    writeInfo.pTexelBufferView = nullptr;
649
650    GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
651}
652
653gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForBlending(GrVkGpu* gpu) {
654    if (!this->supportsInputAttachmentUsage()) {
655        return nullptr;
656    }
657    if (fCachedBlendingInputDescSet) {
658        return fCachedBlendingInputDescSet;
659    }
660
661    fCachedBlendingInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
662    if (!fCachedBlendingInputDescSet) {
663        return nullptr;
664    }
665
666    write_input_desc_set(gpu,
667                         this->framebufferView()->imageView(),
668                         VK_IMAGE_LAYOUT_GENERAL,
669                         *fCachedBlendingInputDescSet->descriptorSet());
670
671    return fCachedBlendingInputDescSet;
672}
673
674gr_rp<const GrVkDescriptorSet> GrVkImage::inputDescSetForMSAALoad(GrVkGpu* gpu) {
675    if (!this->supportsInputAttachmentUsage()) {
676        return nullptr;
677    }
678    if (fCachedMSAALoadInputDescSet) {
679        return fCachedMSAALoadInputDescSet;
680    }
681
682    fCachedMSAALoadInputDescSet.reset(gpu->resourceProvider().getInputDescriptorSet());
683    if (!fCachedMSAALoadInputDescSet) {
684        return nullptr;
685    }
686
687    write_input_desc_set(gpu,
688                         this->framebufferView()->imageView(),
689                         VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
690                         *fCachedMSAALoadInputDescSet->descriptorSet());
691
692    return fCachedMSAALoadInputDescSet;
693}
694
695GrVkGpu* GrVkImage::getVkGpu() const {
696    SkASSERT(!this->wasDestroyed());
697    return static_cast<GrVkGpu*>(this->getGpu());
698}
699
700size_t GrVkImage::onGpuMemorySize() const
701{
702    if (supportedUsages() & UsageFlags::kTexture) {
703        return GrSurface::ComputeSize(this->backendFormat(), this->dimensions(), 1, this->mipmapped());
704    } else {
705        return GrAttachment::onGpuMemorySize();
706    }
707}
708
709#if GR_TEST_UTILS
710void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
711    fMutableState->setQueueFamilyIndex(gpu->queueIndex());
712}
713#endif
714
715