1/*
2 * Copyright © 2014-2018 NVIDIA Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <inttypes.h>
25#include <stdlib.h>
26
27#include "util/u_debug.h"
28#include "util/u_draw.h"
29#include "util/u_inlines.h"
30#include "util/u_upload_mgr.h"
31
32#include "tegra_context.h"
33#include "tegra_resource.h"
34#include "tegra_screen.h"
35
36static void
37tegra_destroy(struct pipe_context *pcontext)
38{
39   struct tegra_context *context = to_tegra_context(pcontext);
40
41   if (context->base.stream_uploader)
42      u_upload_destroy(context->base.stream_uploader);
43
44   context->gpu->destroy(context->gpu);
45   free(context);
46}
47
48static void
49tegra_draw_vbo(struct pipe_context *pcontext,
50               const struct pipe_draw_info *pinfo,
51               unsigned drawid_offset,
52               const struct pipe_draw_indirect_info *pindirect,
53               const struct pipe_draw_start_count_bias *draws,
54               unsigned num_draws)
55{
56   if (num_draws > 1) {
57      util_draw_multi(pcontext, pinfo, drawid_offset, pindirect, draws, num_draws);
58      return;
59   }
60
61   if (!pindirect && (!draws[0].count || !pinfo->instance_count))
62      return;
63
64   struct tegra_context *context = to_tegra_context(pcontext);
65   struct pipe_draw_indirect_info indirect;
66   struct pipe_draw_info info;
67
68   if (pinfo && ((pindirect && pindirect->buffer) || pinfo->index_size)) {
69      memcpy(&info, pinfo, sizeof(info));
70
71      if (pindirect && pindirect->buffer) {
72         memcpy(&indirect, pindirect, sizeof(indirect));
73         indirect.buffer = tegra_resource_unwrap(pindirect->buffer);
74         indirect.indirect_draw_count = tegra_resource_unwrap(pindirect->indirect_draw_count);
75         pindirect = &indirect;
76      }
77
78      if (pinfo->index_size && !pinfo->has_user_indices)
79         info.index.resource = tegra_resource_unwrap(info.index.resource);
80
81      pinfo = &info;
82   }
83
84   context->gpu->draw_vbo(context->gpu, pinfo, drawid_offset, pindirect, draws, num_draws);
85}
86
87static void
88tegra_render_condition(struct pipe_context *pcontext,
89                       struct pipe_query *query,
90                       bool condition,
91                       unsigned int mode)
92{
93   struct tegra_context *context = to_tegra_context(pcontext);
94
95   context->gpu->render_condition(context->gpu, query, condition, mode);
96}
97
98static struct pipe_query *
99tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
100                   unsigned int index)
101{
102   struct tegra_context *context = to_tegra_context(pcontext);
103
104   return context->gpu->create_query(context->gpu, query_type, index);
105}
106
107static struct pipe_query *
108tegra_create_batch_query(struct pipe_context *pcontext,
109                         unsigned int num_queries,
110                         unsigned int *queries)
111{
112   struct tegra_context *context = to_tegra_context(pcontext);
113
114   return context->gpu->create_batch_query(context->gpu, num_queries,
115                                           queries);
116}
117
118static void
119tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
120{
121   struct tegra_context *context = to_tegra_context(pcontext);
122
123   context->gpu->destroy_query(context->gpu, query);
124}
125
126static bool
127tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
128{
129   struct tegra_context *context = to_tegra_context(pcontext);
130
131   return context->gpu->begin_query(context->gpu, query);
132}
133
134static bool
135tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
136{
137   struct tegra_context *context = to_tegra_context(pcontext);
138
139   return context->gpu->end_query(context->gpu, query);
140}
141
142static bool
143tegra_get_query_result(struct pipe_context *pcontext,
144                       struct pipe_query *query,
145                       bool wait,
146                       union pipe_query_result *result)
147{
148   struct tegra_context *context = to_tegra_context(pcontext);
149
150   return context->gpu->get_query_result(context->gpu, query, wait,
151                     result);
152}
153
154static void
155tegra_get_query_result_resource(struct pipe_context *pcontext,
156                                struct pipe_query *query,
157                                enum pipe_query_flags flags,
158                                enum pipe_query_value_type result_type,
159                                int index,
160                                struct pipe_resource *resource,
161                                unsigned int offset)
162{
163   struct tegra_context *context = to_tegra_context(pcontext);
164
165   context->gpu->get_query_result_resource(context->gpu, query, flags,
166                                           result_type, index, resource,
167                                           offset);
168}
169
170static void
171tegra_set_active_query_state(struct pipe_context *pcontext, bool enable)
172{
173   struct tegra_context *context = to_tegra_context(pcontext);
174
175   context->gpu->set_active_query_state(context->gpu, enable);
176}
177
178static void *
179tegra_create_blend_state(struct pipe_context *pcontext,
180                         const struct pipe_blend_state *cso)
181{
182   struct tegra_context *context = to_tegra_context(pcontext);
183
184   return context->gpu->create_blend_state(context->gpu, cso);
185}
186
187static void
188tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
189{
190   struct tegra_context *context = to_tegra_context(pcontext);
191
192   context->gpu->bind_blend_state(context->gpu, so);
193}
194
195static void
196tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
197{
198   struct tegra_context *context = to_tegra_context(pcontext);
199
200   context->gpu->delete_blend_state(context->gpu, so);
201}
202
203static void *
204tegra_create_sampler_state(struct pipe_context *pcontext,
205                           const struct pipe_sampler_state *cso)
206{
207   struct tegra_context *context = to_tegra_context(pcontext);
208
209   return context->gpu->create_sampler_state(context->gpu, cso);
210}
211
212static void
213tegra_bind_sampler_states(struct pipe_context *pcontext, unsigned shader,
214                          unsigned start_slot, unsigned num_samplers,
215                          void **samplers)
216{
217   struct tegra_context *context = to_tegra_context(pcontext);
218
219   context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
220                                     num_samplers, samplers);
221}
222
223static void
224tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
225{
226   struct tegra_context *context = to_tegra_context(pcontext);
227
228   context->gpu->delete_sampler_state(context->gpu, so);
229}
230
231static void *
232tegra_create_rasterizer_state(struct pipe_context *pcontext,
233                              const struct pipe_rasterizer_state *cso)
234{
235   struct tegra_context *context = to_tegra_context(pcontext);
236
237   return context->gpu->create_rasterizer_state(context->gpu, cso);
238}
239
240static void
241tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
242{
243   struct tegra_context *context = to_tegra_context(pcontext);
244
245   context->gpu->bind_rasterizer_state(context->gpu, so);
246}
247
248static void
249tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
250{
251   struct tegra_context *context = to_tegra_context(pcontext);
252
253   context->gpu->delete_rasterizer_state(context->gpu, so);
254}
255
256static void *
257tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
258                                       const struct pipe_depth_stencil_alpha_state *cso)
259{
260   struct tegra_context *context = to_tegra_context(pcontext);
261
262   return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
263}
264
265static void
266tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
267{
268   struct tegra_context *context = to_tegra_context(pcontext);
269
270   context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
271}
272
273static void
274tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
275{
276   struct tegra_context *context = to_tegra_context(pcontext);
277
278   context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
279}
280
281static void *
282tegra_create_fs_state(struct pipe_context *pcontext,
283                      const struct pipe_shader_state *cso)
284{
285   struct tegra_context *context = to_tegra_context(pcontext);
286
287   return context->gpu->create_fs_state(context->gpu, cso);
288}
289
290static void
291tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
292{
293   struct tegra_context *context = to_tegra_context(pcontext);
294
295   context->gpu->bind_fs_state(context->gpu, so);
296}
297
298static void
299tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
300{
301   struct tegra_context *context = to_tegra_context(pcontext);
302
303   context->gpu->delete_fs_state(context->gpu, so);
304}
305
306static void *
307tegra_create_vs_state(struct pipe_context *pcontext,
308                      const struct pipe_shader_state *cso)
309{
310   struct tegra_context *context = to_tegra_context(pcontext);
311
312   return context->gpu->create_vs_state(context->gpu, cso);
313}
314
315static void
316tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
317{
318   struct tegra_context *context = to_tegra_context(pcontext);
319
320   context->gpu->bind_vs_state(context->gpu, so);
321}
322
323static void
324tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
325{
326   struct tegra_context *context = to_tegra_context(pcontext);
327
328   context->gpu->delete_vs_state(context->gpu, so);
329}
330
331static void *
332tegra_create_gs_state(struct pipe_context *pcontext,
333                      const struct pipe_shader_state *cso)
334{
335   struct tegra_context *context = to_tegra_context(pcontext);
336
337   return context->gpu->create_gs_state(context->gpu, cso);
338}
339
340static void
341tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
342{
343   struct tegra_context *context = to_tegra_context(pcontext);
344
345   context->gpu->bind_gs_state(context->gpu, so);
346}
347
348static void
349tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
350{
351   struct tegra_context *context = to_tegra_context(pcontext);
352
353   context->gpu->delete_gs_state(context->gpu, so);
354}
355
356static void *
357tegra_create_tcs_state(struct pipe_context *pcontext,
358                       const struct pipe_shader_state *cso)
359{
360   struct tegra_context *context = to_tegra_context(pcontext);
361
362   return context->gpu->create_tcs_state(context->gpu, cso);
363}
364
365static void
366tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
367{
368   struct tegra_context *context = to_tegra_context(pcontext);
369
370   context->gpu->bind_tcs_state(context->gpu, so);
371}
372
373static void
374tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
375{
376   struct tegra_context *context = to_tegra_context(pcontext);
377
378   context->gpu->delete_tcs_state(context->gpu, so);
379}
380
381static void *
382tegra_create_tes_state(struct pipe_context *pcontext,
383                       const struct pipe_shader_state *cso)
384{
385   struct tegra_context *context = to_tegra_context(pcontext);
386
387   return context->gpu->create_tes_state(context->gpu, cso);
388}
389
390static void
391tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
392{
393   struct tegra_context *context = to_tegra_context(pcontext);
394
395   context->gpu->bind_tes_state(context->gpu, so);
396}
397
398static void
399tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
400{
401   struct tegra_context *context = to_tegra_context(pcontext);
402
403   context->gpu->delete_tes_state(context->gpu, so);
404}
405
406static void *
407tegra_create_vertex_elements_state(struct pipe_context *pcontext,
408                                   unsigned num_elements,
409                                   const struct pipe_vertex_element *elements)
410{
411   struct tegra_context *context = to_tegra_context(pcontext);
412
413   return context->gpu->create_vertex_elements_state(context->gpu,
414                                                     num_elements,
415                                                     elements);
416}
417
418static void
419tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
420{
421   struct tegra_context *context = to_tegra_context(pcontext);
422
423   context->gpu->bind_vertex_elements_state(context->gpu, so);
424}
425
426static void
427tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
428{
429   struct tegra_context *context = to_tegra_context(pcontext);
430
431   context->gpu->delete_vertex_elements_state(context->gpu, so);
432}
433
434static void
435tegra_set_blend_color(struct pipe_context *pcontext,
436                      const struct pipe_blend_color *color)
437{
438   struct tegra_context *context = to_tegra_context(pcontext);
439
440   context->gpu->set_blend_color(context->gpu, color);
441}
442
443static void
444tegra_set_stencil_ref(struct pipe_context *pcontext,
445                      const struct pipe_stencil_ref ref)
446{
447   struct tegra_context *context = to_tegra_context(pcontext);
448
449   context->gpu->set_stencil_ref(context->gpu, ref);
450}
451
452static void
453tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
454{
455   struct tegra_context *context = to_tegra_context(pcontext);
456
457   context->gpu->set_sample_mask(context->gpu, mask);
458}
459
460static void
461tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
462{
463   struct tegra_context *context = to_tegra_context(pcontext);
464
465   context->gpu->set_min_samples(context->gpu, samples);
466}
467
468static void
469tegra_set_clip_state(struct pipe_context *pcontext,
470                     const struct pipe_clip_state *state)
471{
472   struct tegra_context *context = to_tegra_context(pcontext);
473
474   context->gpu->set_clip_state(context->gpu, state);
475}
476
477static void
478tegra_set_constant_buffer(struct pipe_context *pcontext, unsigned int shader,
479                          unsigned int index, bool take_ownership,
480                          const struct pipe_constant_buffer *buf)
481{
482   struct tegra_context *context = to_tegra_context(pcontext);
483   struct pipe_constant_buffer buffer;
484
485   if (buf && buf->buffer) {
486      memcpy(&buffer, buf, sizeof(buffer));
487      buffer.buffer = tegra_resource_unwrap(buffer.buffer);
488      buf = &buffer;
489   }
490
491   context->gpu->set_constant_buffer(context->gpu, shader, index, take_ownership, buf);
492}
493
494static void
495tegra_set_framebuffer_state(struct pipe_context *pcontext,
496                            const struct pipe_framebuffer_state *fb)
497{
498   struct tegra_context *context = to_tegra_context(pcontext);
499   struct pipe_framebuffer_state state;
500   unsigned i;
501
502   if (fb) {
503      memcpy(&state, fb, sizeof(state));
504
505      for (i = 0; i < fb->nr_cbufs; i++)
506         state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
507
508      while (i < PIPE_MAX_COLOR_BUFS)
509         state.cbufs[i++] = NULL;
510
511      state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
512
513      fb = &state;
514   }
515
516   context->gpu->set_framebuffer_state(context->gpu, fb);
517}
518
519static void
520tegra_set_polygon_stipple(struct pipe_context *pcontext,
521                          const struct pipe_poly_stipple *stipple)
522{
523   struct tegra_context *context = to_tegra_context(pcontext);
524
525   context->gpu->set_polygon_stipple(context->gpu, stipple);
526}
527
528static void
529tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
530                         unsigned num_scissors,
531                         const struct pipe_scissor_state *scissors)
532{
533   struct tegra_context *context = to_tegra_context(pcontext);
534
535   context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
536                                    scissors);
537}
538
539static void
540tegra_set_window_rectangles(struct pipe_context *pcontext, bool include,
541                            unsigned int num_rectangles,
542                            const struct pipe_scissor_state *rectangles)
543{
544   struct tegra_context *context = to_tegra_context(pcontext);
545
546   context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
547                                       rectangles);
548}
549
550static void
551tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
552                          unsigned num_viewports,
553                          const struct pipe_viewport_state *viewports)
554{
555   struct tegra_context *context = to_tegra_context(pcontext);
556
557   context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
558                                     viewports);
559}
560
561static void
562tegra_set_sampler_views(struct pipe_context *pcontext, unsigned shader,
563                        unsigned start_slot, unsigned num_views,
564                        unsigned unbind_num_trailing_slots,
565                        bool take_ownership,
566                        struct pipe_sampler_view **pviews)
567{
568   struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
569   struct tegra_context *context = to_tegra_context(pcontext);
570   struct tegra_sampler_view *view;
571   unsigned i;
572
573   for (i = 0; i < num_views; i++) {
574      /* adjust private reference count */
575      view = to_tegra_sampler_view(pviews[i]);
576      if (view) {
577         view->refcount--;
578         if (!view->refcount) {
579            view->refcount = 100000000;
580            p_atomic_add(&view->gpu->reference.count, view->refcount);
581         }
582      }
583
584      views[i] = tegra_sampler_view_unwrap(pviews[i]);
585   }
586
587   context->gpu->set_sampler_views(context->gpu, shader, start_slot,
588                                   num_views, unbind_num_trailing_slots,
589                                   take_ownership, views);
590}
591
592static void
593tegra_set_tess_state(struct pipe_context *pcontext,
594                     const float default_outer_level[4],
595                     const float default_inner_level[2])
596{
597   struct tegra_context *context = to_tegra_context(pcontext);
598
599   context->gpu->set_tess_state(context->gpu, default_outer_level,
600                                default_inner_level);
601}
602
603static void
604tegra_set_debug_callback(struct pipe_context *pcontext,
605                         const struct util_debug_callback *callback)
606{
607   struct tegra_context *context = to_tegra_context(pcontext);
608
609   context->gpu->set_debug_callback(context->gpu, callback);
610}
611
612static void
613tegra_set_shader_buffers(struct pipe_context *pcontext, unsigned int shader,
614                         unsigned start, unsigned count,
615                         const struct pipe_shader_buffer *buffers,
616                         unsigned writable_bitmask)
617{
618   struct tegra_context *context = to_tegra_context(pcontext);
619
620   context->gpu->set_shader_buffers(context->gpu, shader, start, count,
621                                    buffers, writable_bitmask);
622}
623
624static void
625tegra_set_shader_images(struct pipe_context *pcontext, unsigned int shader,
626                        unsigned start, unsigned count,
627                        unsigned unbind_num_trailing_slots,
628                        const struct pipe_image_view *images)
629{
630   struct tegra_context *context = to_tegra_context(pcontext);
631
632   context->gpu->set_shader_images(context->gpu, shader, start, count,
633                                   unbind_num_trailing_slots, images);
634}
635
636static void
637tegra_set_vertex_buffers(struct pipe_context *pcontext, unsigned start_slot,
638                         unsigned num_buffers, unsigned unbind_num_trailing_slots,
639                         bool take_ownership,
640                         const struct pipe_vertex_buffer *buffers)
641{
642   struct tegra_context *context = to_tegra_context(pcontext);
643   struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
644   unsigned i;
645
646   if (num_buffers && buffers) {
647      memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
648
649      for (i = 0; i < num_buffers; i++) {
650         if (!buf[i].is_user_buffer)
651            buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
652      }
653
654      buffers = buf;
655   }
656
657   context->gpu->set_vertex_buffers(context->gpu, start_slot, num_buffers,
658                                    unbind_num_trailing_slots,
659                                    take_ownership, buffers);
660}
661
662static struct pipe_stream_output_target *
663tegra_create_stream_output_target(struct pipe_context *pcontext,
664                                  struct pipe_resource *presource,
665                                  unsigned buffer_offset,
666                                  unsigned buffer_size)
667{
668   struct tegra_resource *resource = to_tegra_resource(presource);
669   struct tegra_context *context = to_tegra_context(pcontext);
670
671   return context->gpu->create_stream_output_target(context->gpu,
672                                                    resource->gpu,
673                                                    buffer_offset,
674                                                    buffer_size);
675}
676
677static void
678tegra_stream_output_target_destroy(struct pipe_context *pcontext,
679                                   struct pipe_stream_output_target *target)
680{
681   struct tegra_context *context = to_tegra_context(pcontext);
682
683   context->gpu->stream_output_target_destroy(context->gpu, target);
684}
685
686static void
687tegra_set_stream_output_targets(struct pipe_context *pcontext,
688                                unsigned num_targets,
689                                struct pipe_stream_output_target **targets,
690                                const unsigned *offsets)
691{
692   struct tegra_context *context = to_tegra_context(pcontext);
693
694   context->gpu->set_stream_output_targets(context->gpu, num_targets,
695                                           targets, offsets);
696}
697
698static void
699tegra_resource_copy_region(struct pipe_context *pcontext,
700                           struct pipe_resource *pdst,
701                           unsigned int dst_level,
702                           unsigned int dstx,
703                           unsigned int dsty,
704                           unsigned int dstz,
705                           struct pipe_resource *psrc,
706                           unsigned int src_level,
707                           const struct pipe_box *src_box)
708{
709   struct tegra_context *context = to_tegra_context(pcontext);
710   struct tegra_resource *dst = to_tegra_resource(pdst);
711   struct tegra_resource *src = to_tegra_resource(psrc);
712
713   context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
714                                      dsty, dstz, src->gpu, src_level,
715                                      src_box);
716}
717
718static void
719tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
720{
721   struct tegra_context *context = to_tegra_context(pcontext);
722   struct pipe_blit_info info;
723
724   if (pinfo) {
725      memcpy(&info, pinfo, sizeof(info));
726      info.dst.resource = tegra_resource_unwrap(info.dst.resource);
727      info.src.resource = tegra_resource_unwrap(info.src.resource);
728      pinfo = &info;
729   }
730
731   context->gpu->blit(context->gpu, pinfo);
732}
733
734static void
735tegra_clear(struct pipe_context *pcontext, unsigned buffers, const struct pipe_scissor_state *scissor_state,
736            const union pipe_color_union *color, double depth,
737            unsigned stencil)
738{
739   struct tegra_context *context = to_tegra_context(pcontext);
740
741   context->gpu->clear(context->gpu, buffers, NULL, color, depth, stencil);
742}
743
744static void
745tegra_clear_render_target(struct pipe_context *pcontext,
746                          struct pipe_surface *pdst,
747                          const union pipe_color_union *color,
748                          unsigned int dstx,
749                          unsigned int dsty,
750                          unsigned int width,
751                          unsigned int height,
752                          bool render_condition)
753{
754   struct tegra_context *context = to_tegra_context(pcontext);
755   struct tegra_surface *dst = to_tegra_surface(pdst);
756
757   context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
758                                     dsty, width, height, render_condition);
759}
760
761static void
762tegra_clear_depth_stencil(struct pipe_context *pcontext,
763                          struct pipe_surface *pdst,
764                          unsigned int flags,
765                          double depth,
766                          unsigned int stencil,
767                          unsigned int dstx,
768                          unsigned int dsty,
769                          unsigned int width,
770                          unsigned int height,
771                          bool render_condition)
772{
773   struct tegra_context *context = to_tegra_context(pcontext);
774   struct tegra_surface *dst = to_tegra_surface(pdst);
775
776   context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
777                                     stencil, dstx, dsty, width, height,
778                                     render_condition);
779}
780
781static void
782tegra_clear_texture(struct pipe_context *pcontext,
783                    struct pipe_resource *presource,
784                    unsigned int level,
785                    const struct pipe_box *box,
786                    const void *data)
787{
788   struct tegra_resource *resource = to_tegra_resource(presource);
789   struct tegra_context *context = to_tegra_context(pcontext);
790
791   context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
792}
793
794static void
795tegra_clear_buffer(struct pipe_context *pcontext,
796                   struct pipe_resource *presource,
797                   unsigned int offset,
798                   unsigned int size,
799                   const void *value,
800                   int value_size)
801{
802   struct tegra_resource *resource = to_tegra_resource(presource);
803   struct tegra_context *context = to_tegra_context(pcontext);
804
805   context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
806                              value, value_size);
807}
808
809static void
810tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
811            unsigned flags)
812{
813   struct tegra_context *context = to_tegra_context(pcontext);
814
815   context->gpu->flush(context->gpu, fence, flags);
816}
817
818static void
819tegra_create_fence_fd(struct pipe_context *pcontext,
820                      struct pipe_fence_handle **fence,
821                      int fd, enum pipe_fd_type type)
822{
823   struct tegra_context *context = to_tegra_context(pcontext);
824
825   assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
826   context->gpu->create_fence_fd(context->gpu, fence, fd, type);
827}
828
829static void
830tegra_fence_server_sync(struct pipe_context *pcontext,
831                        struct pipe_fence_handle *fence)
832{
833   struct tegra_context *context = to_tegra_context(pcontext);
834
835   context->gpu->fence_server_sync(context->gpu, fence);
836}
837
838static struct pipe_sampler_view *
839tegra_create_sampler_view(struct pipe_context *pcontext,
840                          struct pipe_resource *presource,
841                          const struct pipe_sampler_view *template)
842{
843   struct tegra_resource *resource = to_tegra_resource(presource);
844   struct tegra_context *context = to_tegra_context(pcontext);
845   struct tegra_sampler_view *view;
846
847   view = calloc(1, sizeof(*view));
848   if (!view)
849      return NULL;
850
851   view->base = *template;
852   view->base.context = pcontext;
853   /* overwrite to prevent reference from being released */
854   view->base.texture = NULL;
855   pipe_reference_init(&view->base.reference, 1);
856   pipe_resource_reference(&view->base.texture, presource);
857
858   view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
859                                                 template);
860
861   /* use private reference count */
862   view->gpu->reference.count += 100000000;
863   view->refcount = 100000000;
864
865   return &view->base;
866}
867
868static void
869tegra_sampler_view_destroy(struct pipe_context *pcontext,
870                           struct pipe_sampler_view *pview)
871{
872   struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
873
874   pipe_resource_reference(&view->base.texture, NULL);
875   /* adjust private reference count */
876   p_atomic_add(&view->gpu->reference.count, -view->refcount);
877   pipe_sampler_view_reference(&view->gpu, NULL);
878   free(view);
879}
880
881static struct pipe_surface *
882tegra_create_surface(struct pipe_context *pcontext,
883                     struct pipe_resource *presource,
884                     const struct pipe_surface *template)
885{
886   struct tegra_resource *resource = to_tegra_resource(presource);
887   struct tegra_context *context = to_tegra_context(pcontext);
888   struct tegra_surface *surface;
889
890   surface = calloc(1, sizeof(*surface));
891   if (!surface)
892      return NULL;
893
894   surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
895                                               template);
896   if (!surface->gpu) {
897      free(surface);
898      return NULL;
899   }
900
901   memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
902   /* overwrite to prevent reference from being released */
903   surface->base.texture = NULL;
904
905   pipe_reference_init(&surface->base.reference, 1);
906   pipe_resource_reference(&surface->base.texture, presource);
907   surface->base.context = &context->base;
908
909   return &surface->base;
910}
911
912static void
913tegra_surface_destroy(struct pipe_context *pcontext,
914                      struct pipe_surface *psurface)
915{
916   struct tegra_surface *surface = to_tegra_surface(psurface);
917
918   pipe_resource_reference(&surface->base.texture, NULL);
919   pipe_surface_reference(&surface->gpu, NULL);
920   free(surface);
921}
922
923static void *
924tegra_transfer_map(struct pipe_context *pcontext,
925                   struct pipe_resource *presource,
926                   unsigned level, unsigned usage,
927                   const struct pipe_box *box,
928                   struct pipe_transfer **ptransfer)
929{
930   struct tegra_resource *resource = to_tegra_resource(presource);
931   struct tegra_context *context = to_tegra_context(pcontext);
932   struct tegra_transfer *transfer;
933
934   transfer = calloc(1, sizeof(*transfer));
935   if (!transfer)
936      return NULL;
937
938   if (presource->target == PIPE_BUFFER) {
939      transfer->map = context->gpu->buffer_map(context->gpu, resource->gpu,
940                                                 level, usage, box,
941                                                 &transfer->gpu);
942   } else {
943      transfer->map = context->gpu->texture_map(context->gpu, resource->gpu,
944                                                 level, usage, box,
945                                                 &transfer->gpu);
946   }
947   memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
948   transfer->base.resource = NULL;
949   pipe_resource_reference(&transfer->base.resource, presource);
950
951   *ptransfer = &transfer->base;
952
953   return transfer->map;
954}
955
956static void
957tegra_transfer_flush_region(struct pipe_context *pcontext,
958                            struct pipe_transfer *ptransfer,
959                            const struct pipe_box *box)
960{
961   struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
962   struct tegra_context *context = to_tegra_context(pcontext);
963
964   context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
965}
966
967static void
968tegra_transfer_unmap(struct pipe_context *pcontext,
969                     struct pipe_transfer *ptransfer)
970{
971   struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
972   struct tegra_context *context = to_tegra_context(pcontext);
973
974   if (ptransfer->resource->target == PIPE_BUFFER)
975      context->gpu->buffer_unmap(context->gpu, transfer->gpu);
976   else
977      context->gpu->texture_unmap(context->gpu, transfer->gpu);
978   pipe_resource_reference(&transfer->base.resource, NULL);
979   free(transfer);
980}
981
982static void
983tegra_buffer_subdata(struct pipe_context *pcontext,
984                     struct pipe_resource *presource,
985                     unsigned usage, unsigned offset,
986                     unsigned size, const void *data)
987{
988   struct tegra_resource *resource = to_tegra_resource(presource);
989   struct tegra_context *context = to_tegra_context(pcontext);
990
991   context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
992                                size, data);
993}
994
995static void
996tegra_texture_subdata(struct pipe_context *pcontext,
997                      struct pipe_resource *presource,
998                      unsigned level,
999                      unsigned usage,
1000                      const struct pipe_box *box,
1001                      const void *data,
1002                      unsigned stride,
1003                      unsigned layer_stride)
1004{
1005   struct tegra_resource *resource = to_tegra_resource(presource);
1006   struct tegra_context *context = to_tegra_context(pcontext);
1007
1008   context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
1009                                 box, data, stride, layer_stride);
1010}
1011
1012static void
1013tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
1014{
1015   struct tegra_context *context = to_tegra_context(pcontext);
1016
1017   context->gpu->texture_barrier(context->gpu, flags);
1018}
1019
1020static void
1021tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
1022{
1023   struct tegra_context *context = to_tegra_context(pcontext);
1024
1025   if (!(flags & ~PIPE_BARRIER_UPDATE))
1026      return;
1027
1028   context->gpu->memory_barrier(context->gpu, flags);
1029}
1030
1031static struct pipe_video_codec *
1032tegra_create_video_codec(struct pipe_context *pcontext,
1033                         const struct pipe_video_codec *template)
1034{
1035   struct tegra_context *context = to_tegra_context(pcontext);
1036
1037   return context->gpu->create_video_codec(context->gpu, template);
1038}
1039
1040static struct pipe_video_buffer *
1041tegra_create_video_buffer(struct pipe_context *pcontext,
1042                          const struct pipe_video_buffer *template)
1043{
1044   struct tegra_context *context = to_tegra_context(pcontext);
1045
1046   return context->gpu->create_video_buffer(context->gpu, template);
1047}
1048
1049static void *
1050tegra_create_compute_state(struct pipe_context *pcontext,
1051                           const struct pipe_compute_state *template)
1052{
1053   struct tegra_context *context = to_tegra_context(pcontext);
1054
1055   return context->gpu->create_compute_state(context->gpu, template);
1056}
1057
1058static void
1059tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
1060{
1061   struct tegra_context *context = to_tegra_context(pcontext);
1062
1063   context->gpu->bind_compute_state(context->gpu, so);
1064}
1065
1066static void
1067tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
1068{
1069   struct tegra_context *context = to_tegra_context(pcontext);
1070
1071   context->gpu->delete_compute_state(context->gpu, so);
1072}
1073
1074static void
1075tegra_set_compute_resources(struct pipe_context *pcontext,
1076                            unsigned int start, unsigned int count,
1077                            struct pipe_surface **resources)
1078{
1079   struct tegra_context *context = to_tegra_context(pcontext);
1080
1081   /* XXX unwrap resources */
1082
1083   context->gpu->set_compute_resources(context->gpu, start, count, resources);
1084}
1085
1086static void
1087tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
1088                         unsigned int count, struct pipe_resource **resources,
1089                         uint32_t **handles)
1090{
1091   struct tegra_context *context = to_tegra_context(pcontext);
1092
1093   /* XXX unwrap resources */
1094
1095   context->gpu->set_global_binding(context->gpu, first, count, resources,
1096                                    handles);
1097}
1098
1099static void
1100tegra_launch_grid(struct pipe_context *pcontext,
1101                  const struct pipe_grid_info *info)
1102{
1103   struct tegra_context *context = to_tegra_context(pcontext);
1104
1105   /* XXX unwrap info->indirect? */
1106
1107   context->gpu->launch_grid(context->gpu, info);
1108}
1109
1110static void
1111tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
1112                          unsigned int index, float *value)
1113{
1114   struct tegra_context *context = to_tegra_context(pcontext);
1115
1116   context->gpu->get_sample_position(context->gpu, count, index, value);
1117}
1118
1119static uint64_t
1120tegra_get_timestamp(struct pipe_context *pcontext)
1121{
1122   struct tegra_context *context = to_tegra_context(pcontext);
1123
1124   return context->gpu->get_timestamp(context->gpu);
1125}
1126
1127static void
1128tegra_flush_resource(struct pipe_context *pcontext,
1129                     struct pipe_resource *presource)
1130{
1131   struct tegra_resource *resource = to_tegra_resource(presource);
1132   struct tegra_context *context = to_tegra_context(pcontext);
1133
1134   context->gpu->flush_resource(context->gpu, resource->gpu);
1135}
1136
1137static void
1138tegra_invalidate_resource(struct pipe_context *pcontext,
1139                          struct pipe_resource *presource)
1140{
1141   struct tegra_resource *resource = to_tegra_resource(presource);
1142   struct tegra_context *context = to_tegra_context(pcontext);
1143
1144   context->gpu->invalidate_resource(context->gpu, resource->gpu);
1145}
1146
1147static enum pipe_reset_status
1148tegra_get_device_reset_status(struct pipe_context *pcontext)
1149{
1150   struct tegra_context *context = to_tegra_context(pcontext);
1151
1152   return context->gpu->get_device_reset_status(context->gpu);
1153}
1154
1155static void
1156tegra_set_device_reset_callback(struct pipe_context *pcontext,
1157                                const struct pipe_device_reset_callback *cb)
1158{
1159   struct tegra_context *context = to_tegra_context(pcontext);
1160
1161   context->gpu->set_device_reset_callback(context->gpu, cb);
1162}
1163
1164static void
1165tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
1166                       unsigned int flags)
1167{
1168   struct tegra_context *context = to_tegra_context(pcontext);
1169
1170   context->gpu->dump_debug_state(context->gpu, stream, flags);
1171}
1172
1173static void
1174tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
1175                         int length)
1176{
1177   struct tegra_context *context = to_tegra_context(pcontext);
1178
1179   context->gpu->emit_string_marker(context->gpu, string, length);
1180}
1181
1182static bool
1183tegra_generate_mipmap(struct pipe_context *pcontext,
1184                      struct pipe_resource *presource,
1185                      enum pipe_format format,
1186                      unsigned int base_level,
1187                      unsigned int last_level,
1188                      unsigned int first_layer,
1189                      unsigned int last_layer)
1190{
1191   struct tegra_resource *resource = to_tegra_resource(presource);
1192   struct tegra_context *context = to_tegra_context(pcontext);
1193
1194   return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
1195                                        base_level, last_level, first_layer,
1196                                        last_layer);
1197}
1198
1199static uint64_t
1200tegra_create_texture_handle(struct pipe_context *pcontext,
1201                            struct pipe_sampler_view *view,
1202                            const struct pipe_sampler_state *state)
1203{
1204   struct tegra_context *context = to_tegra_context(pcontext);
1205
1206   return context->gpu->create_texture_handle(context->gpu, view, state);
1207}
1208
1209static void tegra_delete_texture_handle(struct pipe_context *pcontext,
1210                                        uint64_t handle)
1211{
1212   struct tegra_context *context = to_tegra_context(pcontext);
1213
1214   context->gpu->delete_texture_handle(context->gpu, handle);
1215}
1216
1217static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
1218                                               uint64_t handle, bool resident)
1219{
1220   struct tegra_context *context = to_tegra_context(pcontext);
1221
1222   context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
1223}
1224
1225static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
1226                                          const struct pipe_image_view *image)
1227{
1228   struct tegra_context *context = to_tegra_context(pcontext);
1229
1230   return context->gpu->create_image_handle(context->gpu, image);
1231}
1232
1233static void tegra_delete_image_handle(struct pipe_context *pcontext,
1234                                      uint64_t handle)
1235{
1236   struct tegra_context *context = to_tegra_context(pcontext);
1237
1238   context->gpu->delete_image_handle(context->gpu, handle);
1239}
1240
1241static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
1242                                             uint64_t handle, unsigned access,
1243                                             bool resident)
1244{
1245   struct tegra_context *context = to_tegra_context(pcontext);
1246
1247   context->gpu->make_image_handle_resident(context->gpu, handle, access,
1248                                            resident);
1249}
1250
1251struct pipe_context *
1252tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
1253                            unsigned int flags)
1254{
1255   struct tegra_screen *screen = to_tegra_screen(pscreen);
1256   struct tegra_context *context;
1257
1258   context = calloc(1, sizeof(*context));
1259   if (!context)
1260      return NULL;
1261
1262   context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
1263   if (!context->gpu) {
1264      debug_error("failed to create GPU context\n");
1265      goto free;
1266   }
1267
1268   context->base.screen = &screen->base;
1269   context->base.priv = priv;
1270
1271   /*
1272    * Create custom stream and const uploaders. Note that technically nouveau
1273    * already creates uploaders that could be reused, but that would make the
1274    * resource unwrapping rather complicate. The reason for that is that both
1275    * uploaders create resources based on the context that they were created
1276    * from, which means that nouveau's uploader will use the nouveau context
1277    * which means that those resources must not be unwrapped. So before each
1278    * resource is unwrapped, the code would need to check that it does not
1279    * correspond to the uploaders' buffers.
1280    *
1281    * However, duplicating the uploaders here sounds worse than it is. The
1282    * default implementation that nouveau uses allocates buffers lazily, and
1283    * since it is never used, no buffers will every be allocated and the only
1284    * memory wasted is that occupied by the nouveau uploader itself.
1285    */
1286   context->base.stream_uploader = u_upload_create_default(&context->base);
1287   if (!context->base.stream_uploader)
1288      goto destroy;
1289
1290   context->base.const_uploader = context->base.stream_uploader;
1291
1292   context->base.destroy = tegra_destroy;
1293
1294   context->base.draw_vbo = tegra_draw_vbo;
1295
1296   context->base.render_condition = tegra_render_condition;
1297
1298   context->base.create_query = tegra_create_query;
1299   context->base.create_batch_query = tegra_create_batch_query;
1300   context->base.destroy_query = tegra_destroy_query;
1301   context->base.begin_query = tegra_begin_query;
1302   context->base.end_query = tegra_end_query;
1303   context->base.get_query_result = tegra_get_query_result;
1304   context->base.get_query_result_resource = tegra_get_query_result_resource;
1305   context->base.set_active_query_state = tegra_set_active_query_state;
1306
1307   context->base.create_blend_state = tegra_create_blend_state;
1308   context->base.bind_blend_state = tegra_bind_blend_state;
1309   context->base.delete_blend_state = tegra_delete_blend_state;
1310
1311   context->base.create_sampler_state = tegra_create_sampler_state;
1312   context->base.bind_sampler_states = tegra_bind_sampler_states;
1313   context->base.delete_sampler_state = tegra_delete_sampler_state;
1314
1315   context->base.create_rasterizer_state = tegra_create_rasterizer_state;
1316   context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
1317   context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
1318
1319   context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
1320   context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
1321   context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
1322
1323   context->base.create_fs_state = tegra_create_fs_state;
1324   context->base.bind_fs_state = tegra_bind_fs_state;
1325   context->base.delete_fs_state = tegra_delete_fs_state;
1326
1327   context->base.create_vs_state = tegra_create_vs_state;
1328   context->base.bind_vs_state = tegra_bind_vs_state;
1329   context->base.delete_vs_state = tegra_delete_vs_state;
1330
1331   context->base.create_gs_state = tegra_create_gs_state;
1332   context->base.bind_gs_state = tegra_bind_gs_state;
1333   context->base.delete_gs_state = tegra_delete_gs_state;
1334
1335   context->base.create_tcs_state = tegra_create_tcs_state;
1336   context->base.bind_tcs_state = tegra_bind_tcs_state;
1337   context->base.delete_tcs_state = tegra_delete_tcs_state;
1338
1339   context->base.create_tes_state = tegra_create_tes_state;
1340   context->base.bind_tes_state = tegra_bind_tes_state;
1341   context->base.delete_tes_state = tegra_delete_tes_state;
1342
1343   context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
1344   context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
1345   context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
1346
1347   context->base.set_blend_color = tegra_set_blend_color;
1348   context->base.set_stencil_ref = tegra_set_stencil_ref;
1349   context->base.set_sample_mask = tegra_set_sample_mask;
1350   context->base.set_min_samples = tegra_set_min_samples;
1351   context->base.set_clip_state = tegra_set_clip_state;
1352
1353   context->base.set_constant_buffer = tegra_set_constant_buffer;
1354   context->base.set_framebuffer_state = tegra_set_framebuffer_state;
1355   context->base.set_polygon_stipple = tegra_set_polygon_stipple;
1356   context->base.set_scissor_states = tegra_set_scissor_states;
1357   context->base.set_window_rectangles = tegra_set_window_rectangles;
1358   context->base.set_viewport_states = tegra_set_viewport_states;
1359   context->base.set_sampler_views = tegra_set_sampler_views;
1360   context->base.set_tess_state = tegra_set_tess_state;
1361
1362   context->base.set_debug_callback = tegra_set_debug_callback;
1363
1364   context->base.set_shader_buffers = tegra_set_shader_buffers;
1365   context->base.set_shader_images = tegra_set_shader_images;
1366   context->base.set_vertex_buffers = tegra_set_vertex_buffers;
1367
1368   context->base.create_stream_output_target = tegra_create_stream_output_target;
1369   context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
1370   context->base.set_stream_output_targets = tegra_set_stream_output_targets;
1371
1372   context->base.resource_copy_region = tegra_resource_copy_region;
1373   context->base.blit = tegra_blit;
1374   context->base.clear = tegra_clear;
1375   context->base.clear_render_target = tegra_clear_render_target;
1376   context->base.clear_depth_stencil = tegra_clear_depth_stencil;
1377   context->base.clear_texture = tegra_clear_texture;
1378   context->base.clear_buffer = tegra_clear_buffer;
1379   context->base.flush = tegra_flush;
1380
1381   context->base.create_fence_fd = tegra_create_fence_fd;
1382   context->base.fence_server_sync = tegra_fence_server_sync;
1383
1384   context->base.create_sampler_view = tegra_create_sampler_view;
1385   context->base.sampler_view_destroy = tegra_sampler_view_destroy;
1386
1387   context->base.create_surface = tegra_create_surface;
1388   context->base.surface_destroy = tegra_surface_destroy;
1389
1390   context->base.buffer_map = tegra_transfer_map;
1391   context->base.texture_map = tegra_transfer_map;
1392   context->base.transfer_flush_region = tegra_transfer_flush_region;
1393   context->base.buffer_unmap = tegra_transfer_unmap;
1394   context->base.texture_unmap = tegra_transfer_unmap;
1395   context->base.buffer_subdata = tegra_buffer_subdata;
1396   context->base.texture_subdata = tegra_texture_subdata;
1397
1398   context->base.texture_barrier = tegra_texture_barrier;
1399   context->base.memory_barrier = tegra_memory_barrier;
1400
1401   context->base.create_video_codec = tegra_create_video_codec;
1402   context->base.create_video_buffer = tegra_create_video_buffer;
1403
1404   context->base.create_compute_state = tegra_create_compute_state;
1405   context->base.bind_compute_state = tegra_bind_compute_state;
1406   context->base.delete_compute_state = tegra_delete_compute_state;
1407   context->base.set_compute_resources = tegra_set_compute_resources;
1408   context->base.set_global_binding = tegra_set_global_binding;
1409   context->base.launch_grid = tegra_launch_grid;
1410   context->base.get_sample_position = tegra_get_sample_position;
1411   context->base.get_timestamp = tegra_get_timestamp;
1412
1413   context->base.flush_resource = tegra_flush_resource;
1414   context->base.invalidate_resource = tegra_invalidate_resource;
1415
1416   context->base.get_device_reset_status = tegra_get_device_reset_status;
1417   context->base.set_device_reset_callback = tegra_set_device_reset_callback;
1418   context->base.dump_debug_state = tegra_dump_debug_state;
1419   context->base.emit_string_marker = tegra_emit_string_marker;
1420
1421   context->base.generate_mipmap = tegra_generate_mipmap;
1422
1423   context->base.create_texture_handle = tegra_create_texture_handle;
1424   context->base.delete_texture_handle = tegra_delete_texture_handle;
1425   context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
1426   context->base.create_image_handle = tegra_create_image_handle;
1427   context->base.delete_image_handle = tegra_delete_image_handle;
1428   context->base.make_image_handle_resident = tegra_make_image_handle_resident;
1429
1430   return &context->base;
1431
1432destroy:
1433   context->gpu->destroy(context->gpu);
1434free:
1435   free(context);
1436   return NULL;
1437}
1438