1/*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include "util/format/u_format.h"
24#include "util/u_inlines.h"
25#include "util/u_memory.h"
26#include "util/u_upload_mgr.h"
27#include "virgl_context.h"
28#include "virgl_resource.h"
29#include "virgl_screen.h"
30#include "virgl_staging_mgr.h"
31#include "virgl_encode.h" // for declaration of virgl_encode_copy_transfer
32
33/* A (soft) limit for the amount of memory we want to allow for queued staging
34 * resources. This is used to decide when we should force a flush, in order to
35 * avoid exhausting virtio-gpu memory.
36 */
37#define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
38
39enum virgl_transfer_map_type {
40   VIRGL_TRANSFER_MAP_ERROR = -1,
41   VIRGL_TRANSFER_MAP_HW_RES,
42
43   /* Map a range of a staging buffer. The updated contents should be transferred
44    * with a copy transfer.
45    */
46   VIRGL_TRANSFER_MAP_WRITE_TO_STAGING,
47
48   /* Reallocate the underlying virgl_hw_res. */
49   VIRGL_TRANSFER_MAP_REALLOC,
50
51   /* Map type for read of texture data from host to guest
52    * using staging buffer. */
53   VIRGL_TRANSFER_MAP_READ_FROM_STAGING,
54   /* Map type for write of texture data to host using staging
55    * buffer that needs a readback first. */
56   VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK,
57};
58
59/* Check if copy transfer from host can be used:
60 *  1. if resource is a texture,
61 *  2. if renderer supports copy transfer from host,
62 *  3. the host is not GLES (no fake FP64)
63 *  4. the format can be rendered to and the format is a readback format
64 *     or the format is a scanout format and we can read back from scanout
65 */
66static bool virgl_can_readback_from_rendertarget(struct virgl_screen *vs,
67                                                 struct virgl_resource *res)
68{
69   return res->b.nr_samples < 2 &&
70         vs->base.is_format_supported(&vs->base, res->b.format, res->b.target,
71                                      res->b.nr_samples, res->b.nr_samples,
72                                      PIPE_BIND_RENDER_TARGET);
73}
74
75static bool virgl_can_readback_from_scanout(struct virgl_screen *vs,
76                                            struct virgl_resource *res,
77                                            int bind)
78{
79   return (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_SCANOUT_USES_GBM) &&
80         (bind & VIRGL_BIND_SCANOUT) &&
81         virgl_has_scanout_format(vs, res->b.format, true);
82}
83
84static bool virgl_can_use_staging(struct virgl_screen *vs,
85                                  struct virgl_resource *res)
86{
87   return (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) &&
88         (res->b.target != PIPE_BUFFER);
89}
90
91static bool is_stencil_array(struct virgl_resource *res)
92{
93   const struct util_format_description *descr = util_format_description(res->b.format);
94   return (res->b.array_size > 1 || res->b.depth0 > 1) && util_format_has_stencil(descr);
95}
96
97static bool virgl_can_copy_transfer_from_host(struct virgl_screen *vs,
98                                              struct virgl_resource *res,
99                                              int bind)
100{
101   return virgl_can_use_staging(vs, res) &&
102         !is_stencil_array(res) &&
103         virgl_has_readback_format(&vs->base, pipe_to_virgl_format(res->b.format), false) &&
104         ((!(vs->caps.caps.v2.capability_bits & VIRGL_CAP_FAKE_FP64)) ||
105          virgl_can_readback_from_rendertarget(vs, res) ||
106          virgl_can_readback_from_scanout(vs, res, bind));
107}
108
109/* We need to flush to properly sync the transfer with the current cmdbuf.
110 * But there are cases where the flushing can be skipped:
111 *
112 *  - synchronization is disabled
113 *  - the resource is not referenced by the current cmdbuf
114 */
115static bool virgl_res_needs_flush(struct virgl_context *vctx,
116                                  struct virgl_transfer *trans)
117{
118   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
119   struct virgl_resource *res = virgl_resource(trans->base.resource);
120
121   if (trans->base.usage & PIPE_MAP_UNSYNCHRONIZED)
122      return false;
123
124   if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
125      return false;
126
127   return true;
128}
129
130/* We need to read back from the host storage to make sure the guest storage
131 * is up-to-date.  But there are cases where the readback can be skipped:
132 *
133 *  - the content can be discarded
134 *  - the host storage is read-only
135 *
136 * Note that PIPE_MAP_WRITE without discard bits requires readback.
137 * PIPE_MAP_READ becomes irrelevant.  PIPE_MAP_UNSYNCHRONIZED and
138 * PIPE_MAP_FLUSH_EXPLICIT are also irrelevant.
139 */
140static bool virgl_res_needs_readback(struct virgl_context *vctx,
141                                     struct virgl_resource *res,
142                                     unsigned usage, unsigned level)
143{
144   if (usage & (PIPE_MAP_DISCARD_RANGE |
145                PIPE_MAP_DISCARD_WHOLE_RESOURCE))
146      return false;
147
148   if (res->clean_mask & (1 << level))
149      return false;
150
151   return true;
152}
153
154static enum virgl_transfer_map_type
155virgl_resource_transfer_prepare(struct virgl_context *vctx,
156                                struct virgl_transfer *xfer)
157{
158   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
159   struct virgl_winsys *vws = vs->vws;
160   struct virgl_resource *res = virgl_resource(xfer->base.resource);
161   enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
162   bool flush;
163   bool readback;
164   bool wait;
165
166   /* there is no way to map the host storage currently */
167   if (xfer->base.usage & PIPE_MAP_DIRECTLY)
168      return VIRGL_TRANSFER_MAP_ERROR;
169
170   /* We break the logic down into four steps
171    *
172    * step 1: determine the required operations independently
173    * step 2: look for chances to skip the operations
174    * step 3: resolve dependencies between the operations
175    * step 4: execute the operations
176    */
177
178   flush = virgl_res_needs_flush(vctx, xfer);
179   readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
180                                       xfer->base.level);
181   /* We need to wait for all cmdbufs, current or previous, that access the
182    * resource to finish unless synchronization is disabled.
183    */
184   wait = !(xfer->base.usage & PIPE_MAP_UNSYNCHRONIZED);
185
186   /* When the transfer range consists of only uninitialized data, we can
187    * assume the GPU is not accessing the range and readback is unnecessary.
188    * We can proceed as if PIPE_MAP_UNSYNCHRONIZED and
189    * PIPE_MAP_DISCARD_RANGE are set.
190    */
191   if (res->b.target == PIPE_BUFFER &&
192       !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
193                              xfer->base.box.x + xfer->base.box.width) &&
194       likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
195      flush = false;
196      readback = false;
197      wait = false;
198   }
199
200   /* When the resource is busy but its content can be discarded, we can
201    * replace its HW resource or use a staging buffer to avoid waiting.
202    */
203   if (wait &&
204       (xfer->base.usage & (PIPE_MAP_DISCARD_RANGE |
205                            PIPE_MAP_DISCARD_WHOLE_RESOURCE)) &&
206       likely(!(virgl_debug & VIRGL_DEBUG_XFER))) {
207      bool can_realloc = false;
208
209      /* A PIPE_MAP_DISCARD_WHOLE_RESOURCE transfer may be followed by
210       * PIPE_MAP_UNSYNCHRONIZED transfers to non-overlapping regions.
211       * It cannot be treated as a PIPE_MAP_DISCARD_RANGE transfer,
212       * otherwise those following unsynchronized transfers may overwrite
213       * valid data.
214       */
215      if (xfer->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
216         can_realloc = virgl_can_rebind_resource(vctx, &res->b);
217      }
218
219      /* discard implies no readback */
220      assert(!readback);
221
222      if (can_realloc || vctx->supports_staging) {
223         /* Both map types have some costs.  Do them only when the resource is
224          * (or will be) busy for real.  Otherwise, set wait to false.
225          */
226         wait = (flush || vws->resource_is_busy(vws, res->hw_res));
227         if (wait) {
228            map_type = (can_realloc) ?
229               VIRGL_TRANSFER_MAP_REALLOC :
230               VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
231
232            wait = false;
233
234            /* There is normally no need to flush either, unless the amount of
235             * memory we are using for staging resources starts growing, in
236             * which case we want to flush to keep our memory consumption in
237             * check.
238             */
239            flush = (vctx->queued_staging_res_size >
240               VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT);
241         }
242      }
243   }
244
245   /* readback has some implications */
246   if (readback) {
247      /* If we are performing readback for textures and renderer supports
248       * copy_transfer_from_host, then we can return here with proper map.
249       */
250      if (res->use_staging) {
251         if (xfer->base.usage & PIPE_MAP_READ)
252            return VIRGL_TRANSFER_MAP_READ_FROM_STAGING;
253         else
254            return VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK;
255      }
256
257      /* When the transfer queue has pending writes to this transfer's region,
258       * we have to flush before readback.
259       */
260      if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
261         flush = true;
262   }
263
264   if (flush)
265      vctx->base.flush(&vctx->base, NULL, 0);
266
267   /* If we are not allowed to block, and we know that we will have to wait,
268    * either because the resource is busy, or because it will become busy due
269    * to a readback, return early to avoid performing an incomplete
270    * transfer_get. Such an incomplete transfer_get may finish at any time,
271    * during which another unsynchronized map could write to the resource
272    * contents, leaving the contents in an undefined state.
273    */
274   if ((xfer->base.usage & PIPE_MAP_DONTBLOCK) &&
275       (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
276      return VIRGL_TRANSFER_MAP_ERROR;
277
278   if (readback) {
279      /* Readback is yet another command and is transparent to the state
280       * trackers.  It should be waited for in all cases, including when
281       * PIPE_MAP_UNSYNCHRONIZED is set.
282       */
283      vws->resource_wait(vws, res->hw_res);
284      vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
285                        xfer->l_stride, xfer->offset, xfer->base.level);
286      /* transfer_get puts the resource into a maybe_busy state, so we will have
287       * to wait another time if we want to use that resource. */
288      wait = true;
289   }
290
291   if (wait)
292      vws->resource_wait(vws, res->hw_res);
293
294   if (res->use_staging) {
295      map_type = VIRGL_TRANSFER_MAP_WRITE_TO_STAGING;
296   }
297
298   return map_type;
299}
300
301/* Calculate the minimum size of the memory required to service a resource
302 * transfer map. Also return the stride and layer_stride for the corresponding
303 * layout.
304 */
305static unsigned
306virgl_transfer_map_size(struct virgl_transfer *vtransfer,
307                        unsigned *out_stride,
308                        unsigned *out_layer_stride)
309{
310   struct pipe_resource *pres = vtransfer->base.resource;
311   struct pipe_box *box = &vtransfer->base.box;
312   unsigned stride;
313   unsigned layer_stride;
314   unsigned size;
315
316   assert(out_stride);
317   assert(out_layer_stride);
318
319   stride = util_format_get_stride(pres->format, box->width);
320   layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
321
322   if (pres->target == PIPE_TEXTURE_CUBE ||
323       pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
324       pres->target == PIPE_TEXTURE_3D ||
325       pres->target == PIPE_TEXTURE_2D_ARRAY) {
326      size = box->depth * layer_stride;
327   } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
328      size = box->depth * stride;
329   } else {
330      size = layer_stride;
331   }
332
333   *out_stride = stride;
334   *out_layer_stride = layer_stride;
335
336   return size;
337}
338
339/* Maps a region from staging to service the transfer. */
340static void *
341virgl_staging_map(struct virgl_context *vctx,
342                  struct virgl_transfer *vtransfer)
343{
344   struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
345   unsigned size;
346   unsigned align_offset;
347   unsigned stride;
348   unsigned layer_stride;
349   void *map_addr;
350   bool alloc_succeeded;
351
352   assert(vctx->supports_staging);
353
354   size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
355
356   /* For buffers we need to ensure that the start of the buffer would be
357    * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
358    * actually include it. To achieve this we may need to allocate a slightly
359    * larger range from the upload buffer, and later update the uploader
360    * resource offset and map address to point to the requested x coordinate
361    * within that range.
362    *
363    * 0       A       2A      3A
364    * |-------|---bbbb|bbbbb--|
365    *             |--------|    ==> size
366    *         |---|             ==> align_offset
367    *         |------------|    ==> allocation of size + align_offset
368    */
369   align_offset = vres->b.target == PIPE_BUFFER ?
370                  vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
371                  0;
372
373   alloc_succeeded =
374      virgl_staging_alloc(&vctx->staging, size + align_offset,
375                          VIRGL_MAP_BUFFER_ALIGNMENT,
376                          &vtransfer->copy_src_offset,
377                          &vtransfer->copy_src_hw_res,
378                          &map_addr);
379   if (alloc_succeeded) {
380      /* Update source offset and address to point to the requested x coordinate
381       * if we have an align_offset (see above for more information). */
382      vtransfer->copy_src_offset += align_offset;
383      map_addr += align_offset;
384
385      /* Mark as dirty, since we are updating the host side resource
386       * without going through the corresponding guest side resource, and
387       * hence the two will diverge.
388       */
389      virgl_resource_dirty(vres, vtransfer->base.level);
390
391      /* We are using the minimum required size to hold the contents,
392       * possibly using a layout different from the layout of the resource,
393       * so update the transfer strides accordingly.
394       */
395      vtransfer->base.stride = stride;
396      vtransfer->base.layer_stride = layer_stride;
397
398      /* Track the total size of active staging resources. */
399      vctx->queued_staging_res_size += size + align_offset;
400   }
401
402   return map_addr;
403}
404
405/* Maps a region from staging to service the transfer from host.
406 * This function should be called only for texture readbacks
407 * from host. */
408static void *
409virgl_staging_read_map(struct virgl_context *vctx,
410                  struct virgl_transfer *vtransfer)
411{
412   struct virgl_screen *vscreen = virgl_screen(vctx->base.screen);
413   struct virgl_winsys *vws = vscreen->vws;
414   assert(vtransfer->base.resource->target != PIPE_BUFFER);
415   void *map_addr;
416
417   /* There are two possibilities to perform readback via:
418    * a) calling transfer_get();
419    * b) calling submit_cmd() with encoded transfer inside cmd.
420    *
421    * For b) we need:
422    *   1. select offset from staging buffer
423    *   2. encode this transfer in wire
424    *   3. flush the execbuffer to the host
425    *   4. wait till copy on the host is done
426    */
427   map_addr = virgl_staging_map(vctx, vtransfer);
428   vtransfer->direction = VIRGL_TRANSFER_FROM_HOST;
429   virgl_encode_copy_transfer(vctx, vtransfer);
430   vctx->base.flush(&vctx->base, NULL, 0);
431   vws->resource_wait(vws, vtransfer->copy_src_hw_res);
432   return map_addr;
433}
434
435static bool
436virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
437{
438   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
439   const struct pipe_resource *templ = &res->b;
440   unsigned vbind, vflags;
441   struct virgl_hw_res *hw_res;
442
443   vbind = pipe_to_virgl_bind(vs, templ->bind);
444   vflags = pipe_to_virgl_flags(vs, templ->flags);
445
446   int alloc_size = res->use_staging ? 1 : res->metadata.total_size;
447
448   hw_res = vs->vws->resource_create(vs->vws,
449                                     templ->target,
450                                     NULL,
451                                     templ->format,
452                                     vbind,
453                                     templ->width0,
454                                     templ->height0,
455                                     templ->depth0,
456                                     templ->array_size,
457                                     templ->last_level,
458                                     templ->nr_samples,
459                                     vflags,
460                                     alloc_size);
461   if (!hw_res)
462      return false;
463
464   vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
465   res->hw_res = hw_res;
466
467   /* We can safely clear the range here, since it will be repopulated in the
468    * following rebind operation, according to the active buffer binds.
469    */
470   util_range_set_empty(&res->valid_buffer_range);
471
472   /* count toward the staging resource size limit */
473   vctx->queued_staging_res_size += res->metadata.total_size;
474
475   virgl_rebind_resource(vctx, &res->b);
476
477   return true;
478}
479
480void *
481virgl_resource_transfer_map(struct pipe_context *ctx,
482                            struct pipe_resource *resource,
483                            unsigned level,
484                            unsigned usage,
485                            const struct pipe_box *box,
486                            struct pipe_transfer **transfer)
487{
488   struct virgl_context *vctx = virgl_context(ctx);
489   struct virgl_screen *vscreen = virgl_screen(ctx->screen);
490   struct virgl_winsys *vws = vscreen->vws;
491   struct virgl_resource *vres = virgl_resource(resource);
492   struct virgl_transfer *trans;
493   enum virgl_transfer_map_type map_type;
494   void *map_addr;
495
496   /* Multisampled resources require resolve before mapping. */
497   assert(resource->nr_samples <= 1);
498
499   /* If virgl resource was created using persistence and coherency flags,
500    * then its memory mapping can be only made in accordance to these
501    * flags. We record the "usage" flags in struct virgl_transfer and
502    * then virgl_buffer_transfer_unmap() uses them to differentiate
503    * unmapping of a host blob resource from guest.
504    */
505   if (resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
506      usage |= PIPE_MAP_PERSISTENT;
507
508   if (resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
509      usage |= PIPE_MAP_COHERENT;
510
511   trans = virgl_resource_create_transfer(vctx, resource,
512                                          &vres->metadata, level, usage, box);
513
514   map_type = virgl_resource_transfer_prepare(vctx, trans);
515   switch (map_type) {
516   case VIRGL_TRANSFER_MAP_REALLOC:
517      if (!virgl_resource_realloc(vctx, vres)) {
518         map_addr = NULL;
519         break;
520      }
521      vws->resource_reference(vws, &trans->hw_res, vres->hw_res);
522      FALLTHROUGH;
523   case VIRGL_TRANSFER_MAP_HW_RES:
524      trans->hw_res_map = vws->resource_map(vws, vres->hw_res);
525      if (trans->hw_res_map)
526         map_addr = trans->hw_res_map + trans->offset;
527      else
528         map_addr = NULL;
529      break;
530   case VIRGL_TRANSFER_MAP_WRITE_TO_STAGING:
531      map_addr = virgl_staging_map(vctx, trans);
532      /* Copy transfers don't make use of hw_res_map at the moment. */
533      trans->hw_res_map = NULL;
534      trans->direction = VIRGL_TRANSFER_TO_HOST;
535      break;
536   case VIRGL_TRANSFER_MAP_READ_FROM_STAGING:
537      map_addr = virgl_staging_read_map(vctx, trans);
538      /* Copy transfers don't make use of hw_res_map at the moment. */
539      trans->hw_res_map = NULL;
540      break;
541   case VIRGL_TRANSFER_MAP_WRITE_TO_STAGING_WITH_READBACK:
542      map_addr = virgl_staging_read_map(vctx, trans);
543      /* Copy transfers don't make use of hw_res_map at the moment. */
544      trans->hw_res_map = NULL;
545      trans->direction = VIRGL_TRANSFER_TO_HOST;
546      break;
547   case VIRGL_TRANSFER_MAP_ERROR:
548   default:
549      trans->hw_res_map = NULL;
550      map_addr = NULL;
551      break;
552   }
553
554   if (!map_addr) {
555      virgl_resource_destroy_transfer(vctx, trans);
556      return NULL;
557   }
558
559   if (vres->b.target == PIPE_BUFFER) {
560      /* For the checks below to be able to use 'usage', we assume that
561       * transfer preparation doesn't affect the usage.
562       */
563      assert(usage == trans->base.usage);
564
565      /* If we are doing a whole resource discard with a hw_res map, the buffer
566       * storage can now be considered unused and we don't care about previous
567       * contents.  We can thus mark the storage as uninitialized, but only if
568       * the buffer is not host writable (in which case we can't clear the
569       * valid range, since that would result in missed readbacks in future
570       * transfers).  We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for
571       * VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range
572       * when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not
573       * currently used for whole resource discards.
574       */
575      if (map_type == VIRGL_TRANSFER_MAP_HW_RES &&
576          (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
577          (vres->clean_mask & 1)) {
578         util_range_set_empty(&vres->valid_buffer_range);
579      }
580
581      if (usage & PIPE_MAP_WRITE)
582          util_range_add(&vres->b, &vres->valid_buffer_range, box->x, box->x + box->width);
583   }
584
585   *transfer = &trans->base;
586   return map_addr;
587}
588
589static void virgl_resource_layout(struct pipe_resource *pt,
590                                  struct virgl_resource_metadata *metadata,
591                                  uint32_t plane,
592                                  uint32_t winsys_stride,
593                                  uint32_t plane_offset,
594                                  uint64_t modifier)
595{
596   unsigned level, nblocksy;
597   unsigned width = pt->width0;
598   unsigned height = pt->height0;
599   unsigned depth = pt->depth0;
600   unsigned buffer_size = 0;
601
602   for (level = 0; level <= pt->last_level; level++) {
603      unsigned slices;
604
605      if (pt->target == PIPE_TEXTURE_CUBE)
606         slices = 6;
607      else if (pt->target == PIPE_TEXTURE_3D)
608         slices = depth;
609      else
610         slices = pt->array_size;
611
612      nblocksy = util_format_get_nblocksy(pt->format, height);
613      metadata->stride[level] = winsys_stride ? winsys_stride :
614                                util_format_get_stride(pt->format, width);
615      metadata->layer_stride[level] = nblocksy * metadata->stride[level];
616      metadata->level_offset[level] = buffer_size;
617
618      buffer_size += slices * metadata->layer_stride[level];
619
620      width = u_minify(width, 1);
621      height = u_minify(height, 1);
622      depth = u_minify(depth, 1);
623   }
624
625   metadata->plane = plane;
626   metadata->plane_offset = plane_offset;
627   metadata->modifier = modifier;
628   if (pt->nr_samples <= 1)
629      metadata->total_size = buffer_size;
630   else /* don't create guest backing store for MSAA */
631      metadata->total_size = 0;
632}
633
634static struct pipe_resource *virgl_resource_create_front(struct pipe_screen *screen,
635                                                         const struct pipe_resource *templ,
636                                                         const void *map_front_private)
637{
638   unsigned vbind, vflags;
639   struct virgl_screen *vs = virgl_screen(screen);
640   struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
641   uint32_t alloc_size;
642
643   res->b = *templ;
644   res->b.screen = &vs->base;
645   pipe_reference_init(&res->b.reference, 1);
646   vbind = pipe_to_virgl_bind(vs, templ->bind);
647   vflags = pipe_to_virgl_flags(vs, templ->flags);
648   virgl_resource_layout(&res->b, &res->metadata, 0, 0, 0, 0);
649
650   if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) &&
651       vs->tweak_gles_emulate_bgra &&
652      (templ->format == PIPE_FORMAT_B8G8R8A8_SRGB ||
653        templ->format == PIPE_FORMAT_B8G8R8A8_UNORM ||
654        templ->format == PIPE_FORMAT_B8G8R8X8_SRGB ||
655        templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) {
656      vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA;
657   }
658
659   // If renderer supports copy transfer from host, and we either have support
660   // for then for textures alloc minimum size of bo
661   // This size is not passed to the host
662   res->use_staging = virgl_can_copy_transfer_from_host(vs, res, vbind);
663
664   if (res->use_staging)
665      alloc_size = 1;
666   else
667      alloc_size = res->metadata.total_size;
668
669   res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
670                                          map_front_private,
671                                          templ->format, vbind,
672                                          templ->width0,
673                                          templ->height0,
674                                          templ->depth0,
675                                          templ->array_size,
676                                          templ->last_level,
677                                          templ->nr_samples,
678                                          vflags,
679                                          alloc_size);
680   if (!res->hw_res) {
681      FREE(res);
682      return NULL;
683   }
684
685   res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
686
687   if (templ->target == PIPE_BUFFER) {
688      util_range_init(&res->valid_buffer_range);
689      virgl_buffer_init(res);
690   } else {
691      virgl_texture_init(res);
692   }
693
694   return &res->b;
695
696}
697
698static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
699                                                   const struct pipe_resource *templ)
700{
701   return virgl_resource_create_front(screen, templ, NULL);
702}
703
704static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen,
705                                                        const struct pipe_resource *templ,
706                                                        struct winsys_handle *whandle,
707                                                        unsigned usage)
708{
709   uint32_t winsys_stride, plane_offset, plane;
710   uint64_t modifier;
711   struct virgl_screen *vs = virgl_screen(screen);
712   if (templ->target == PIPE_BUFFER)
713      return NULL;
714
715   struct virgl_resource *res = CALLOC_STRUCT(virgl_resource);
716   res->b = *templ;
717   res->b.screen = &vs->base;
718   pipe_reference_init(&res->b.reference, 1);
719
720   plane = winsys_stride = plane_offset = modifier = 0;
721   res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle,
722                                                      &plane,
723                                                      &winsys_stride,
724                                                      &plane_offset,
725                                                      &modifier,
726                                                      &res->blob_mem);
727
728   /* do not use winsys returns for guest storage info of classic resource */
729   if (!res->blob_mem) {
730      winsys_stride = 0;
731      plane_offset = 0;
732      modifier = 0;
733   }
734
735   virgl_resource_layout(&res->b, &res->metadata, plane, winsys_stride,
736                         plane_offset, modifier);
737   if (!res->hw_res) {
738      FREE(res);
739      return NULL;
740   }
741
742   /* assign blob resource a type in case it was created untyped */
743   if (res->blob_mem && plane == 0 &&
744       (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_UNTYPED_RESOURCE)) {
745      uint32_t plane_strides[VIRGL_MAX_PLANE_COUNT];
746      uint32_t plane_offsets[VIRGL_MAX_PLANE_COUNT];
747      uint32_t plane_count = 0;
748      struct pipe_resource *iter = &res->b;
749
750      do {
751         struct virgl_resource *plane = virgl_resource(iter);
752
753         /* must be a plain 2D texture sharing the same hw_res */
754         if (plane->b.target != PIPE_TEXTURE_2D ||
755             plane->b.depth0 != 1 ||
756             plane->b.array_size != 1 ||
757             plane->b.last_level != 0 ||
758             plane->b.nr_samples > 1 ||
759             plane->hw_res != res->hw_res ||
760             plane_count >= VIRGL_MAX_PLANE_COUNT) {
761            vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
762            FREE(res);
763            return NULL;
764         }
765
766         plane_strides[plane_count] = plane->metadata.stride[0];
767         plane_offsets[plane_count] = plane->metadata.plane_offset;
768         plane_count++;
769         iter = iter->next;
770      } while (iter);
771
772      vs->vws->resource_set_type(vs->vws,
773                                 res->hw_res,
774                                 pipe_to_virgl_format(res->b.format),
775                                 pipe_to_virgl_bind(vs, res->b.bind),
776                                 res->b.width0,
777                                 res->b.height0,
778                                 usage,
779                                 res->metadata.modifier,
780                                 plane_count,
781                                 plane_strides,
782                                 plane_offsets);
783   }
784
785   virgl_texture_init(res);
786
787   return &res->b;
788}
789
790void virgl_init_screen_resource_functions(struct pipe_screen *screen)
791{
792    screen->resource_create_front = virgl_resource_create_front;
793    screen->resource_create = virgl_resource_create;
794    screen->resource_from_handle = virgl_resource_from_handle;
795    screen->resource_get_handle = virgl_resource_get_handle;
796    screen->resource_destroy = virgl_resource_destroy;
797}
798
799static void virgl_buffer_subdata(struct pipe_context *pipe,
800                                 struct pipe_resource *resource,
801                                 unsigned usage, unsigned offset,
802                                 unsigned size, const void *data)
803{
804   struct virgl_context *vctx = virgl_context(pipe);
805   struct virgl_resource *vbuf = virgl_resource(resource);
806
807   /* We can try virgl_transfer_queue_extend_buffer when there is no
808    * flush/readback/wait required.  Based on virgl_resource_transfer_prepare,
809    * the simplest way to make sure that is the case is to check the valid
810    * buffer range.
811    */
812   if (!util_ranges_intersect(&vbuf->valid_buffer_range,
813                              offset, offset + size) &&
814       likely(!(virgl_debug & VIRGL_DEBUG_XFER)) &&
815       virgl_transfer_queue_extend_buffer(&vctx->queue,
816                                          vbuf->hw_res, offset, size, data)) {
817      util_range_add(&vbuf->b, &vbuf->valid_buffer_range, offset, offset + size);
818      return;
819   }
820
821   u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
822}
823
824void virgl_init_context_resource_functions(struct pipe_context *ctx)
825{
826    ctx->buffer_map = virgl_resource_transfer_map;
827    ctx->texture_map = virgl_texture_transfer_map;
828    ctx->transfer_flush_region = virgl_buffer_transfer_flush_region;
829    ctx->buffer_unmap = virgl_buffer_transfer_unmap;
830    ctx->texture_unmap = virgl_texture_transfer_unmap;
831    ctx->buffer_subdata = virgl_buffer_subdata;
832    ctx->texture_subdata = u_default_texture_subdata;
833}
834
835
836struct virgl_transfer *
837virgl_resource_create_transfer(struct virgl_context *vctx,
838                               struct pipe_resource *pres,
839                               const struct virgl_resource_metadata *metadata,
840                               unsigned level, unsigned usage,
841                               const struct pipe_box *box)
842{
843   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
844   struct virgl_transfer *trans;
845   enum pipe_format format = pres->format;
846   const unsigned blocksy = box->y / util_format_get_blockheight(format);
847   const unsigned blocksx = box->x / util_format_get_blockwidth(format);
848
849   unsigned offset = metadata->plane_offset + metadata->level_offset[level];
850   if (pres->target == PIPE_TEXTURE_CUBE ||
851       pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
852       pres->target == PIPE_TEXTURE_3D ||
853       pres->target == PIPE_TEXTURE_2D_ARRAY) {
854      offset += box->z * metadata->layer_stride[level];
855   }
856   else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
857      offset += box->z * metadata->stride[level];
858      assert(box->y == 0);
859   } else if (pres->target == PIPE_BUFFER) {
860      assert(box->y == 0 && box->z == 0);
861   } else {
862      assert(box->z == 0);
863   }
864
865   offset += blocksy * metadata->stride[level];
866   offset += blocksx * util_format_get_blocksize(format);
867
868   trans = slab_zalloc(&vctx->transfer_pool);
869   if (!trans)
870      return NULL;
871
872   pipe_resource_reference(&trans->base.resource, pres);
873   vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res);
874
875   trans->base.level = level;
876   trans->base.usage = usage;
877   trans->base.box = *box;
878   trans->base.stride = metadata->stride[level];
879   trans->base.layer_stride = metadata->layer_stride[level];
880   trans->offset = offset;
881   util_range_init(&trans->range);
882
883   if (trans->base.resource->target != PIPE_TEXTURE_3D &&
884       trans->base.resource->target != PIPE_TEXTURE_CUBE &&
885       trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY &&
886       trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY &&
887       trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY)
888      trans->l_stride = 0;
889   else
890      trans->l_stride = trans->base.layer_stride;
891
892   return trans;
893}
894
895void virgl_resource_destroy_transfer(struct virgl_context *vctx,
896                                     struct virgl_transfer *trans)
897{
898   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
899
900   vws->resource_reference(vws, &trans->copy_src_hw_res, NULL);
901
902   util_range_destroy(&trans->range);
903   vws->resource_reference(vws, &trans->hw_res, NULL);
904   pipe_resource_reference(&trans->base.resource, NULL);
905   slab_free(&vctx->transfer_pool, trans);
906}
907
908void virgl_resource_destroy(struct pipe_screen *screen,
909                            struct pipe_resource *resource)
910{
911   struct virgl_screen *vs = virgl_screen(screen);
912   struct virgl_resource *res = virgl_resource(resource);
913
914   if (res->b.target == PIPE_BUFFER)
915      util_range_destroy(&res->valid_buffer_range);
916
917   vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
918   FREE(res);
919}
920
921bool virgl_resource_get_handle(struct pipe_screen *screen,
922                               struct pipe_context *context,
923                               struct pipe_resource *resource,
924                               struct winsys_handle *whandle,
925                               unsigned usage)
926{
927   struct virgl_screen *vs = virgl_screen(screen);
928   struct virgl_resource *res = virgl_resource(resource);
929
930   if (res->b.target == PIPE_BUFFER)
931      return false;
932
933   return vs->vws->resource_get_handle(vs->vws, res->hw_res,
934                                       res->metadata.stride[0],
935                                       whandle);
936}
937
938void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
939{
940   if (res) {
941      if (res->b.target == PIPE_BUFFER)
942         res->clean_mask &= ~1;
943      else
944         res->clean_mask &= ~(1 << level);
945   }
946}
947