1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include "vmwgfx_binding.h"
28#include "vmwgfx_bo.h"
29#include "vmwgfx_drv.h"
30#include "vmwgfx_mksstat.h"
31#include "vmwgfx_so.h"
32
33#include <drm/ttm/ttm_bo.h>
34#include <drm/ttm/ttm_placement.h>
35
36#include <linux/sync_file.h>
37#include <linux/hashtable.h>
38
39/*
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
43 */
44#define VMW_GET_CTX_NODE(__sw_context)                                        \
45({                                                                            \
46	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
47		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
48		__sw_context->dx_ctx_node;                                    \
49	});                                                                   \
50})
51
52#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
53	struct {                                                              \
54		SVGA3dCmdHeader header;                                       \
55		__type body;                                                  \
56	} __var
57
58/**
59 * struct vmw_relocation - Buffer object relocation
60 *
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
65 */
66struct vmw_relocation {
67	struct list_head head;
68	struct vmw_bo *vbo;
69	union {
70		SVGAMobId *mob_loc;
71		SVGAGuestPtr *location;
72	};
73};
74
75/**
76 * enum vmw_resource_relocation_type - Relocation type for resources
77 *
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * with a NOP.
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
85*/
86enum vmw_resource_relocation_type {
87	vmw_res_rel_normal,
88	vmw_res_rel_nop,
89	vmw_res_rel_cond_nop,
90	vmw_res_rel_max
91};
92
93/**
94 * struct vmw_resource_relocation - Relocation info for resources
95 *
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
101 */
102struct vmw_resource_relocation {
103	struct list_head head;
104	const struct vmw_resource *res;
105	u32 offset:29;
106	enum vmw_resource_relocation_type rel_type:3;
107};
108
109/**
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111 *
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
116 */
117struct vmw_ctx_validation_info {
118	struct list_head head;
119	struct vmw_resource *ctx;
120	struct vmw_ctx_binding_state *cur;
121	struct vmw_ctx_binding_state *staged;
122};
123
124/**
125 * struct vmw_cmd_entry - Describe a command for the verifier
126 *
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
132 */
133struct vmw_cmd_entry {
134	int (*func) (struct vmw_private *, struct vmw_sw_context *,
135		     SVGA3dCmdHeader *);
136	bool user_allow;
137	bool gb_disable;
138	bool gb_enable;
139	const char *cmd_name;
140};
141
142#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
143	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144				       (_gb_disable), (_gb_enable), #_cmd}
145
146static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147					struct vmw_sw_context *sw_context,
148					struct vmw_resource *ctx);
149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150				 struct vmw_sw_context *sw_context,
151				 SVGAMobId *id,
152				 struct vmw_bo **vmw_bo_p);
153/**
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
155 *
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
158 *
159 * Returns: The offset in bytes between the two pointers.
160 */
161static size_t vmw_ptr_diff(void *a, void *b)
162{
163	return (unsigned long) b - (unsigned long) a;
164}
165
166/**
167 * vmw_execbuf_bindings_commit - Commit modified binding state
168 *
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
171 * should be ignored
172 */
173static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174					bool backoff)
175{
176	struct vmw_ctx_validation_info *entry;
177
178	list_for_each_entry(entry, &sw_context->ctx_list, head) {
179		if (!backoff)
180			vmw_binding_state_commit(entry->cur, entry->staged);
181
182		if (entry->staged != sw_context->staged_bindings)
183			vmw_binding_state_free(entry->staged);
184		else
185			sw_context->staged_bindings_inuse = false;
186	}
187
188	/* List entries are freed with the validation context */
189	INIT_LIST_HEAD(&sw_context->ctx_list);
190}
191
192/**
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194 *
195 * @sw_context: The command submission context
196 */
197static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198{
199	if (sw_context->dx_query_mob)
200		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201					  sw_context->dx_query_mob);
202}
203
204/**
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206 * the validate list.
207 *
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
212 */
213static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214				   struct vmw_sw_context *sw_context,
215				   struct vmw_resource *res,
216				   struct vmw_ctx_validation_info *node)
217{
218	int ret;
219
220	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221	if (unlikely(ret != 0))
222		goto out_err;
223
224	if (!sw_context->staged_bindings) {
225		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226		if (IS_ERR(sw_context->staged_bindings)) {
227			ret = PTR_ERR(sw_context->staged_bindings);
228			sw_context->staged_bindings = NULL;
229			goto out_err;
230		}
231	}
232
233	if (sw_context->staged_bindings_inuse) {
234		node->staged = vmw_binding_state_alloc(dev_priv);
235		if (IS_ERR(node->staged)) {
236			ret = PTR_ERR(node->staged);
237			node->staged = NULL;
238			goto out_err;
239		}
240	} else {
241		node->staged = sw_context->staged_bindings;
242		sw_context->staged_bindings_inuse = true;
243	}
244
245	node->ctx = res;
246	node->cur = vmw_context_binding_state(res);
247	list_add_tail(&node->head, &sw_context->ctx_list);
248
249	return 0;
250
251out_err:
252	return ret;
253}
254
255/**
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 *
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
260 *
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
264 *
265 * Returns: The extra size requirement based on resource type.
266 */
267static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268					 enum vmw_res_type res_type)
269{
270	return (res_type == vmw_res_dx_context ||
271		(res_type == vmw_res_context && dev_priv->has_mob)) ?
272		sizeof(struct vmw_ctx_validation_info) : 0;
273}
274
275/**
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 *
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
281 * node.
282 */
283static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284				      struct vmw_resource *res,
285				      void *private)
286{
287	rcache->res = res;
288	rcache->private = private;
289	rcache->valid = 1;
290	rcache->valid_handle = 0;
291}
292
293enum vmw_val_add_flags {
294	vmw_val_add_flag_none  =      0,
295	vmw_val_add_flag_noctx = 1 << 0,
296};
297
298/**
299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
300 *
301 * @sw_context: Pointer to the software context.
302 * @res: Unreferenced rcu-protected pointer to the resource.
303 * @dirty: Whether to change dirty status.
304 * @flags: specifies whether to use the context or not
305 *
306 * Returns: 0 on success. Negative error code on failure. Typical error codes
307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
308 */
309static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310				   struct vmw_resource *res,
311				   u32 dirty,
312				   u32 flags)
313{
314	struct vmw_private *dev_priv = res->dev_priv;
315	int ret;
316	enum vmw_res_type res_type = vmw_res_type(res);
317	struct vmw_res_cache_entry *rcache;
318	struct vmw_ctx_validation_info *ctx_info;
319	bool first_usage;
320	unsigned int priv_size;
321
322	rcache = &sw_context->res_cache[res_type];
323	if (likely(rcache->valid && rcache->res == res)) {
324		if (dirty)
325			vmw_validation_res_set_dirty(sw_context->ctx,
326						     rcache->private, dirty);
327		return 0;
328	}
329
330	if ((flags & vmw_val_add_flag_noctx) != 0) {
331		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332						  (void **)&ctx_info, NULL);
333		if (ret)
334			return ret;
335
336	} else {
337		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339						  dirty, (void **)&ctx_info,
340						  &first_usage);
341		if (ret)
342			return ret;
343
344		if (priv_size && first_usage) {
345			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
346						      ctx_info);
347			if (ret) {
348				VMW_DEBUG_USER("Failed first usage context setup.\n");
349				return ret;
350			}
351		}
352	}
353
354	vmw_execbuf_rcache_update(rcache, res, ctx_info);
355	return 0;
356}
357
358/**
359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
360 * validation list
361 *
362 * @sw_context: The software context holding the validation list.
363 * @view: Pointer to the view resource.
364 *
365 * Returns 0 if success, negative error code otherwise.
366 */
367static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368				struct vmw_resource *view)
369{
370	int ret;
371
372	/*
373	 * First add the resource the view is pointing to, otherwise it may be
374	 * swapped out when the view is validated.
375	 */
376	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
378	if (ret)
379		return ret;
380
381	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382				       vmw_val_add_flag_noctx);
383}
384
385/**
386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387 * to to the validation list.
388 *
389 * @sw_context: The software context holding the validation list.
390 * @view_type: The view type to look up.
391 * @id: view id of the view.
392 *
393 * The view is represented by a view id and the DX context it's created on, or
394 * scheduled for creation on. If there is no DX context set, the function will
395 * return an -EINVAL error pointer.
396 *
397 * Returns: Unreferenced pointer to the resource on success, negative error
398 * pointer on failure.
399 */
400static struct vmw_resource *
401vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402		    enum vmw_view_type view_type, u32 id)
403{
404	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405	struct vmw_resource *view;
406	int ret;
407
408	if (!ctx_node)
409		return ERR_PTR(-EINVAL);
410
411	view = vmw_view_lookup(sw_context->man, view_type, id);
412	if (IS_ERR(view))
413		return view;
414
415	ret = vmw_view_res_val_add(sw_context, view);
416	if (ret)
417		return ERR_PTR(ret);
418
419	return view;
420}
421
422/**
423 * vmw_resource_context_res_add - Put resources previously bound to a context on
424 * the validation list
425 *
426 * @dev_priv: Pointer to a device private structure
427 * @sw_context: Pointer to a software context used for this command submission
428 * @ctx: Pointer to the context resource
429 *
430 * This function puts all resources that were previously bound to @ctx on the
431 * resource validation list. This is part of the context state reemission
432 */
433static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434					struct vmw_sw_context *sw_context,
435					struct vmw_resource *ctx)
436{
437	struct list_head *binding_list;
438	struct vmw_ctx_bindinfo *entry;
439	int ret = 0;
440	struct vmw_resource *res;
441	u32 i;
442	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
444
445	/* Add all cotables to the validation list. */
446	if (has_sm4_context(dev_priv) &&
447	    vmw_res_type(ctx) == vmw_res_dx_context) {
448		for (i = 0; i < cotable_max; ++i) {
449			res = vmw_context_cotable(ctx, i);
450			if (IS_ERR_OR_NULL(res))
451				continue;
452
453			ret = vmw_execbuf_res_val_add(sw_context, res,
454						      VMW_RES_DIRTY_SET,
455						      vmw_val_add_flag_noctx);
456			if (unlikely(ret != 0))
457				return ret;
458		}
459	}
460
461	/* Add all resources bound to the context to the validation list */
462	mutex_lock(&dev_priv->binding_mutex);
463	binding_list = vmw_context_binding_list(ctx);
464
465	list_for_each_entry(entry, binding_list, ctx_list) {
466		if (vmw_res_type(entry->res) == vmw_res_view)
467			ret = vmw_view_res_val_add(sw_context, entry->res);
468		else
469			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470						      vmw_binding_dirtying(entry->bt),
471						      vmw_val_add_flag_noctx);
472		if (unlikely(ret != 0))
473			break;
474	}
475
476	if (has_sm4_context(dev_priv) &&
477	    vmw_res_type(ctx) == vmw_res_dx_context) {
478		struct vmw_bo *dx_query_mob;
479
480		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
481		if (dx_query_mob) {
482			vmw_bo_placement_set(dx_query_mob,
483					     VMW_BO_DOMAIN_MOB,
484					     VMW_BO_DOMAIN_MOB);
485			ret = vmw_validation_add_bo(sw_context->ctx,
486						    dx_query_mob);
487		}
488	}
489
490	mutex_unlock(&dev_priv->binding_mutex);
491	return ret;
492}
493
494/**
495 * vmw_resource_relocation_add - Add a relocation to the relocation list
496 *
497 * @sw_context: Pointer to the software context.
498 * @res: The resource.
499 * @offset: Offset into the command buffer currently being parsed where the id
500 * that needs fixup is located. Granularity is one byte.
501 * @rel_type: Relocation type.
502 */
503static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
504				       const struct vmw_resource *res,
505				       unsigned long offset,
506				       enum vmw_resource_relocation_type
507				       rel_type)
508{
509	struct vmw_resource_relocation *rel;
510
511	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
512	if (unlikely(!rel)) {
513		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
514		return -ENOMEM;
515	}
516
517	rel->res = res;
518	rel->offset = offset;
519	rel->rel_type = rel_type;
520	list_add_tail(&rel->head, &sw_context->res_relocations);
521
522	return 0;
523}
524
525/**
526 * vmw_resource_relocations_free - Free all relocations on a list
527 *
528 * @list: Pointer to the head of the relocation list
529 */
530static void vmw_resource_relocations_free(struct list_head *list)
531{
532	/* Memory is validation context memory, so no need to free it */
533	INIT_LIST_HEAD(list);
534}
535
536/**
537 * vmw_resource_relocations_apply - Apply all relocations on a list
538 *
539 * @cb: Pointer to the start of the command buffer bein patch. This need not be
540 * the same buffer as the one being parsed when the relocation list was built,
541 * but the contents must be the same modulo the resource ids.
542 * @list: Pointer to the head of the relocation list.
543 */
544static void vmw_resource_relocations_apply(uint32_t *cb,
545					   struct list_head *list)
546{
547	struct vmw_resource_relocation *rel;
548
549	/* Validate the struct vmw_resource_relocation member size */
550	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
551	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
552
553	list_for_each_entry(rel, list, head) {
554		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
555		switch (rel->rel_type) {
556		case vmw_res_rel_normal:
557			*addr = rel->res->id;
558			break;
559		case vmw_res_rel_nop:
560			*addr = SVGA_3D_CMD_NOP;
561			break;
562		default:
563			if (rel->res->id == -1)
564				*addr = SVGA_3D_CMD_NOP;
565			break;
566		}
567	}
568}
569
570static int vmw_cmd_invalid(struct vmw_private *dev_priv,
571			   struct vmw_sw_context *sw_context,
572			   SVGA3dCmdHeader *header)
573{
574	return -EINVAL;
575}
576
577static int vmw_cmd_ok(struct vmw_private *dev_priv,
578		      struct vmw_sw_context *sw_context,
579		      SVGA3dCmdHeader *header)
580{
581	return 0;
582}
583
584/**
585 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
586 * list.
587 *
588 * @sw_context: Pointer to the software context.
589 *
590 * Note that since vmware's command submission currently is protected by the
591 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
592 * only a single thread at once will attempt this.
593 */
594static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
595{
596	int ret;
597
598	ret = vmw_validation_res_reserve(sw_context->ctx, true);
599	if (ret)
600		return ret;
601
602	if (sw_context->dx_query_mob) {
603		struct vmw_bo *expected_dx_query_mob;
604
605		expected_dx_query_mob =
606			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
607		if (expected_dx_query_mob &&
608		    expected_dx_query_mob != sw_context->dx_query_mob) {
609			ret = -EINVAL;
610		}
611	}
612
613	return ret;
614}
615
616/**
617 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
618 * resource validate list unless it's already there.
619 *
620 * @dev_priv: Pointer to a device private structure.
621 * @sw_context: Pointer to the software context.
622 * @res_type: Resource type.
623 * @dirty: Whether to change dirty status.
624 * @converter: User-space visisble type specific information.
625 * @id_loc: Pointer to the location in the command buffer currently being parsed
626 * from where the user-space resource id handle is located.
627 * @p_res: Pointer to pointer to resource validalidation node. Populated on
628 * exit.
629 */
630static int
631vmw_cmd_res_check(struct vmw_private *dev_priv,
632		  struct vmw_sw_context *sw_context,
633		  enum vmw_res_type res_type,
634		  u32 dirty,
635		  const struct vmw_user_resource_conv *converter,
636		  uint32_t *id_loc,
637		  struct vmw_resource **p_res)
638{
639	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
640	struct vmw_resource *res;
641	int ret = 0;
642	bool needs_unref = false;
643
644	if (p_res)
645		*p_res = NULL;
646
647	if (*id_loc == SVGA3D_INVALID_ID) {
648		if (res_type == vmw_res_context) {
649			VMW_DEBUG_USER("Illegal context invalid id.\n");
650			return -EINVAL;
651		}
652		return 0;
653	}
654
655	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
656		res = rcache->res;
657		if (dirty)
658			vmw_validation_res_set_dirty(sw_context->ctx,
659						     rcache->private, dirty);
660	} else {
661		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
662
663		ret = vmw_validation_preload_res(sw_context->ctx, size);
664		if (ret)
665			return ret;
666
667		ret = vmw_user_resource_lookup_handle
668			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
669		if (ret != 0) {
670			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
671				       (unsigned int) *id_loc);
672			return ret;
673		}
674		needs_unref = true;
675
676		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
677		if (unlikely(ret != 0))
678			goto res_check_done;
679
680		if (rcache->valid && rcache->res == res) {
681			rcache->valid_handle = true;
682			rcache->handle = *id_loc;
683		}
684	}
685
686	ret = vmw_resource_relocation_add(sw_context, res,
687					  vmw_ptr_diff(sw_context->buf_start,
688						       id_loc),
689					  vmw_res_rel_normal);
690	if (p_res)
691		*p_res = res;
692
693res_check_done:
694	if (needs_unref)
695		vmw_resource_unreference(&res);
696
697	return ret;
698}
699
700/**
701 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
702 *
703 * @ctx_res: context the query belongs to
704 *
705 * This function assumes binding_mutex is held.
706 */
707static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
708{
709	struct vmw_private *dev_priv = ctx_res->dev_priv;
710	struct vmw_bo *dx_query_mob;
711	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
712
713	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
714
715	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
716		return 0;
717
718	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
719	if (cmd == NULL)
720		return -ENOMEM;
721
722	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
723	cmd->header.size = sizeof(cmd->body);
724	cmd->body.cid = ctx_res->id;
725	cmd->body.mobid = dx_query_mob->tbo.resource->start;
726	vmw_cmd_commit(dev_priv, sizeof(*cmd));
727
728	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
729
730	return 0;
731}
732
733/**
734 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
735 * contexts.
736 *
737 * @sw_context: Pointer to the software context.
738 *
739 * Rebind context binding points that have been scrubbed because of eviction.
740 */
741static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
742{
743	struct vmw_ctx_validation_info *val;
744	int ret;
745
746	list_for_each_entry(val, &sw_context->ctx_list, head) {
747		ret = vmw_binding_rebind_all(val->cur);
748		if (unlikely(ret != 0)) {
749			if (ret != -ERESTARTSYS)
750				VMW_DEBUG_USER("Failed to rebind context.\n");
751			return ret;
752		}
753
754		ret = vmw_rebind_all_dx_query(val->ctx);
755		if (ret != 0) {
756			VMW_DEBUG_USER("Failed to rebind queries.\n");
757			return ret;
758		}
759	}
760
761	return 0;
762}
763
764/**
765 * vmw_view_bindings_add - Add an array of view bindings to a context binding
766 * state tracker.
767 *
768 * @sw_context: The execbuf state used for this command.
769 * @view_type: View type for the bindings.
770 * @binding_type: Binding type for the bindings.
771 * @shader_slot: The shader slot to user for the bindings.
772 * @view_ids: Array of view ids to be bound.
773 * @num_views: Number of view ids in @view_ids.
774 * @first_slot: The binding slot to be used for the first view id in @view_ids.
775 */
776static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
777				 enum vmw_view_type view_type,
778				 enum vmw_ctx_binding_type binding_type,
779				 uint32 shader_slot,
780				 uint32 view_ids[], u32 num_views,
781				 u32 first_slot)
782{
783	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
784	u32 i;
785
786	if (!ctx_node)
787		return -EINVAL;
788
789	for (i = 0; i < num_views; ++i) {
790		struct vmw_ctx_bindinfo_view binding;
791		struct vmw_resource *view = NULL;
792
793		if (view_ids[i] != SVGA3D_INVALID_ID) {
794			view = vmw_view_id_val_add(sw_context, view_type,
795						   view_ids[i]);
796			if (IS_ERR(view)) {
797				VMW_DEBUG_USER("View not found.\n");
798				return PTR_ERR(view);
799			}
800		}
801		binding.bi.ctx = ctx_node->ctx;
802		binding.bi.res = view;
803		binding.bi.bt = binding_type;
804		binding.shader_slot = shader_slot;
805		binding.slot = first_slot + i;
806		vmw_binding_add(ctx_node->staged, &binding.bi,
807				shader_slot, binding.slot);
808	}
809
810	return 0;
811}
812
813/**
814 * vmw_cmd_cid_check - Check a command header for valid context information.
815 *
816 * @dev_priv: Pointer to a device private structure.
817 * @sw_context: Pointer to the software context.
818 * @header: A command header with an embedded user-space context handle.
819 *
820 * Convenience function: Call vmw_cmd_res_check with the user-space context
821 * handle embedded in @header.
822 */
823static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
824			     struct vmw_sw_context *sw_context,
825			     SVGA3dCmdHeader *header)
826{
827	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
828		container_of(header, typeof(*cmd), header);
829
830	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831				 VMW_RES_DIRTY_SET, user_context_converter,
832				 &cmd->body, NULL);
833}
834
835/**
836 * vmw_execbuf_info_from_res - Get the private validation metadata for a
837 * recently validated resource
838 *
839 * @sw_context: Pointer to the command submission context
840 * @res: The resource
841 *
842 * The resource pointed to by @res needs to be present in the command submission
843 * context's resource cache and hence the last resource of that type to be
844 * processed by the validation code.
845 *
846 * Return: a pointer to the private metadata of the resource, or NULL if it
847 * wasn't found
848 */
849static struct vmw_ctx_validation_info *
850vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
851			  struct vmw_resource *res)
852{
853	struct vmw_res_cache_entry *rcache =
854		&sw_context->res_cache[vmw_res_type(res)];
855
856	if (rcache->valid && rcache->res == res)
857		return rcache->private;
858
859	WARN_ON_ONCE(true);
860	return NULL;
861}
862
863static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
864					   struct vmw_sw_context *sw_context,
865					   SVGA3dCmdHeader *header)
866{
867	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
868	struct vmw_resource *ctx;
869	struct vmw_resource *res;
870	int ret;
871
872	cmd = container_of(header, typeof(*cmd), header);
873
874	if (cmd->body.type >= SVGA3D_RT_MAX) {
875		VMW_DEBUG_USER("Illegal render target type %u.\n",
876			       (unsigned int) cmd->body.type);
877		return -EINVAL;
878	}
879
880	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881				VMW_RES_DIRTY_SET, user_context_converter,
882				&cmd->body.cid, &ctx);
883	if (unlikely(ret != 0))
884		return ret;
885
886	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
887				VMW_RES_DIRTY_SET, user_surface_converter,
888				&cmd->body.target.sid, &res);
889	if (unlikely(ret))
890		return ret;
891
892	if (dev_priv->has_mob) {
893		struct vmw_ctx_bindinfo_view binding;
894		struct vmw_ctx_validation_info *node;
895
896		node = vmw_execbuf_info_from_res(sw_context, ctx);
897		if (!node)
898			return -EINVAL;
899
900		binding.bi.ctx = ctx;
901		binding.bi.res = res;
902		binding.bi.bt = vmw_ctx_binding_rt;
903		binding.slot = cmd->body.type;
904		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
905	}
906
907	return 0;
908}
909
910static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
911				      struct vmw_sw_context *sw_context,
912				      SVGA3dCmdHeader *header)
913{
914	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
915	int ret;
916
917	cmd = container_of(header, typeof(*cmd), header);
918
919	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
920				VMW_RES_DIRTY_NONE, user_surface_converter,
921				&cmd->body.src.sid, NULL);
922	if (ret)
923		return ret;
924
925	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
926				 VMW_RES_DIRTY_SET, user_surface_converter,
927				 &cmd->body.dest.sid, NULL);
928}
929
930static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
931				     struct vmw_sw_context *sw_context,
932				     SVGA3dCmdHeader *header)
933{
934	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
935	int ret;
936
937	cmd = container_of(header, typeof(*cmd), header);
938	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939				VMW_RES_DIRTY_NONE, user_surface_converter,
940				&cmd->body.src, NULL);
941	if (ret != 0)
942		return ret;
943
944	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945				 VMW_RES_DIRTY_SET, user_surface_converter,
946				 &cmd->body.dest, NULL);
947}
948
949static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
950				   struct vmw_sw_context *sw_context,
951				   SVGA3dCmdHeader *header)
952{
953	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
954	int ret;
955
956	cmd = container_of(header, typeof(*cmd), header);
957	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958				VMW_RES_DIRTY_NONE, user_surface_converter,
959				&cmd->body.srcSid, NULL);
960	if (ret != 0)
961		return ret;
962
963	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
964				 VMW_RES_DIRTY_SET, user_surface_converter,
965				 &cmd->body.dstSid, NULL);
966}
967
968static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
969				     struct vmw_sw_context *sw_context,
970				     SVGA3dCmdHeader *header)
971{
972	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
973	int ret;
974
975	cmd = container_of(header, typeof(*cmd), header);
976	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977				VMW_RES_DIRTY_NONE, user_surface_converter,
978				&cmd->body.src.sid, NULL);
979	if (unlikely(ret != 0))
980		return ret;
981
982	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
983				 VMW_RES_DIRTY_SET, user_surface_converter,
984				 &cmd->body.dest.sid, NULL);
985}
986
987static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
988					 struct vmw_sw_context *sw_context,
989					 SVGA3dCmdHeader *header)
990{
991	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
992		container_of(header, typeof(*cmd), header);
993
994	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
995				 VMW_RES_DIRTY_NONE, user_surface_converter,
996				 &cmd->body.srcImage.sid, NULL);
997}
998
999static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000				 struct vmw_sw_context *sw_context,
1001				 SVGA3dCmdHeader *header)
1002{
1003	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004		container_of(header, typeof(*cmd), header);
1005
1006	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007				 VMW_RES_DIRTY_NONE, user_surface_converter,
1008				 &cmd->body.sid, NULL);
1009}
1010
1011/**
1012 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013 *
1014 * @dev_priv: The device private structure.
1015 * @new_query_bo: The new buffer holding query results.
1016 * @sw_context: The software context used for this command submission.
1017 *
1018 * This function checks whether @new_query_bo is suitable for holding query
1019 * results, and if another buffer currently is pinned for query results. If so,
1020 * the function prepares the state of @sw_context for switching pinned buffers
1021 * after successful submission of the current command batch.
1022 */
1023static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024				       struct vmw_bo *new_query_bo,
1025				       struct vmw_sw_context *sw_context)
1026{
1027	struct vmw_res_cache_entry *ctx_entry =
1028		&sw_context->res_cache[vmw_res_context];
1029	int ret;
1030
1031	BUG_ON(!ctx_entry->valid);
1032	sw_context->last_query_ctx = ctx_entry->res;
1033
1034	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
1036		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1037			VMW_DEBUG_USER("Query buffer too large.\n");
1038			return -EINVAL;
1039		}
1040
1041		if (unlikely(sw_context->cur_query_bo != NULL)) {
1042			sw_context->needs_post_query_barrier = true;
1043			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044			ret = vmw_validation_add_bo(sw_context->ctx,
1045						    sw_context->cur_query_bo);
1046			if (unlikely(ret != 0))
1047				return ret;
1048		}
1049		sw_context->cur_query_bo = new_query_bo;
1050
1051		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052		ret = vmw_validation_add_bo(sw_context->ctx,
1053					    dev_priv->dummy_query_bo);
1054		if (unlikely(ret != 0))
1055			return ret;
1056	}
1057
1058	return 0;
1059}
1060
1061/**
1062 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1063 *
1064 * @dev_priv: The device private structure.
1065 * @sw_context: The software context used for this command submission batch.
1066 *
1067 * This function will check if we're switching query buffers, and will then,
1068 * issue a dummy occlusion query wait used as a query barrier. When the fence
1069 * object following that query wait has signaled, we are sure that all preceding
1070 * queries have finished, and the old query buffer can be unpinned. However,
1071 * since both the new query buffer and the old one are fenced with that fence,
1072 * we can do an asynchronus unpin now, and be sure that the old query buffer
1073 * won't be moved until the fence has signaled.
1074 *
1075 * As mentioned above, both the new - and old query buffers need to be fenced
1076 * using a sequence emitted *after* calling this function.
1077 */
1078static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079				     struct vmw_sw_context *sw_context)
1080{
1081	/*
1082	 * The validate list should still hold references to all
1083	 * contexts here.
1084	 */
1085	if (sw_context->needs_post_query_barrier) {
1086		struct vmw_res_cache_entry *ctx_entry =
1087			&sw_context->res_cache[vmw_res_context];
1088		struct vmw_resource *ctx;
1089		int ret;
1090
1091		BUG_ON(!ctx_entry->valid);
1092		ctx = ctx_entry->res;
1093
1094		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1095
1096		if (unlikely(ret != 0))
1097			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1098	}
1099
1100	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101		if (dev_priv->pinned_bo) {
1102			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103			vmw_bo_unreference(&dev_priv->pinned_bo);
1104		}
1105
1106		if (!sw_context->needs_post_query_barrier) {
1107			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1108
1109			/*
1110			 * We pin also the dummy_query_bo buffer so that we
1111			 * don't need to validate it when emitting dummy queries
1112			 * in context destroy paths.
1113			 */
1114			if (!dev_priv->dummy_query_bo_pinned) {
1115				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1116						    true);
1117				dev_priv->dummy_query_bo_pinned = true;
1118			}
1119
1120			BUG_ON(sw_context->last_query_ctx == NULL);
1121			dev_priv->query_cid = sw_context->last_query_ctx->id;
1122			dev_priv->query_cid_valid = true;
1123			dev_priv->pinned_bo =
1124				vmw_bo_reference(sw_context->cur_query_bo);
1125		}
1126	}
1127}
1128
1129/**
1130 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1131 * to a MOB id.
1132 *
1133 * @dev_priv: Pointer to a device private structure.
1134 * @sw_context: The software context used for this command batch validation.
1135 * @id: Pointer to the user-space handle to be translated.
1136 * @vmw_bo_p: Points to a location that, on successful return will carry a
1137 * non-reference-counted pointer to the buffer object identified by the
1138 * user-space handle in @id.
1139 *
1140 * This function saves information needed to translate a user-space buffer
1141 * handle to a MOB id. The translation does not take place immediately, but
1142 * during a call to vmw_apply_relocations().
1143 *
1144 * This function builds a relocation list and a list of buffers to validate. The
1145 * former needs to be freed using either vmw_apply_relocations() or
1146 * vmw_free_relocations(). The latter needs to be freed using
1147 * vmw_clear_validations.
1148 */
1149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150				 struct vmw_sw_context *sw_context,
1151				 SVGAMobId *id,
1152				 struct vmw_bo **vmw_bo_p)
1153{
1154	struct vmw_bo *vmw_bo, *tmp_bo;
1155	uint32_t handle = *id;
1156	struct vmw_relocation *reloc;
1157	int ret;
1158
1159	vmw_validation_preload_bo(sw_context->ctx);
1160	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1161	if (ret != 0) {
1162		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163		return PTR_ERR(vmw_bo);
1164	}
1165	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167	tmp_bo = vmw_bo;
1168	vmw_user_bo_unref(&tmp_bo);
1169	if (unlikely(ret != 0))
1170		return ret;
1171
1172	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1173	if (!reloc)
1174		return -ENOMEM;
1175
1176	reloc->mob_loc = id;
1177	reloc->vbo = vmw_bo;
1178
1179	*vmw_bo_p = vmw_bo;
1180	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1181
1182	return 0;
1183}
1184
1185/**
1186 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1187 * to a valid SVGAGuestPtr
1188 *
1189 * @dev_priv: Pointer to a device private structure.
1190 * @sw_context: The software context used for this command batch validation.
1191 * @ptr: Pointer to the user-space handle to be translated.
1192 * @vmw_bo_p: Points to a location that, on successful return will carry a
1193 * non-reference-counted pointer to the DMA buffer identified by the user-space
1194 * handle in @id.
1195 *
1196 * This function saves information needed to translate a user-space buffer
1197 * handle to a valid SVGAGuestPtr. The translation does not take place
1198 * immediately, but during a call to vmw_apply_relocations().
1199 *
1200 * This function builds a relocation list and a list of buffers to validate.
1201 * The former needs to be freed using either vmw_apply_relocations() or
1202 * vmw_free_relocations(). The latter needs to be freed using
1203 * vmw_clear_validations.
1204 */
1205static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206				   struct vmw_sw_context *sw_context,
1207				   SVGAGuestPtr *ptr,
1208				   struct vmw_bo **vmw_bo_p)
1209{
1210	struct vmw_bo *vmw_bo, *tmp_bo;
1211	uint32_t handle = ptr->gmrId;
1212	struct vmw_relocation *reloc;
1213	int ret;
1214
1215	vmw_validation_preload_bo(sw_context->ctx);
1216	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1217	if (ret != 0) {
1218		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1219		return PTR_ERR(vmw_bo);
1220	}
1221	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1222			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1223	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1224	tmp_bo = vmw_bo;
1225	vmw_user_bo_unref(&tmp_bo);
1226	if (unlikely(ret != 0))
1227		return ret;
1228
1229	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1230	if (!reloc)
1231		return -ENOMEM;
1232
1233	reloc->location = ptr;
1234	reloc->vbo = vmw_bo;
1235	*vmw_bo_p = vmw_bo;
1236	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1237
1238	return 0;
1239}
1240
1241/**
1242 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243 *
1244 * @dev_priv: Pointer to a device private struct.
1245 * @sw_context: The software context used for this command submission.
1246 * @header: Pointer to the command header in the command stream.
1247 *
1248 * This function adds the new query into the query COTABLE
1249 */
1250static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251				   struct vmw_sw_context *sw_context,
1252				   SVGA3dCmdHeader *header)
1253{
1254	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1255	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1256	struct vmw_resource *cotable_res;
1257	int ret;
1258
1259	if (!ctx_node)
1260		return -EINVAL;
1261
1262	cmd = container_of(header, typeof(*cmd), header);
1263
1264	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1265	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1266		return -EINVAL;
1267
1268	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1269	if (IS_ERR_OR_NULL(cotable_res))
1270		return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1271	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1272
1273	return ret;
1274}
1275
1276/**
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1278 *
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1282 *
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB.  In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1286 */
1287static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288				 struct vmw_sw_context *sw_context,
1289				 SVGA3dCmdHeader *header)
1290{
1291	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292	struct vmw_bo *vmw_bo;
1293	int ret;
1294
1295	cmd = container_of(header, typeof(*cmd), header);
1296
1297	/*
1298	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299	 * list so its kernel mode MOB ID can be filled in later
1300	 */
1301	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1302				    &vmw_bo);
1303
1304	if (ret != 0)
1305		return ret;
1306
1307	sw_context->dx_query_mob = vmw_bo;
1308	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309	return 0;
1310}
1311
1312/**
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1314 *
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1318 */
1319static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320				  struct vmw_sw_context *sw_context,
1321				  SVGA3dCmdHeader *header)
1322{
1323	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324		container_of(header, typeof(*cmd), header);
1325
1326	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327				 VMW_RES_DIRTY_SET, user_context_converter,
1328				 &cmd->body.cid, NULL);
1329}
1330
1331/**
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 */
1338static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339			       struct vmw_sw_context *sw_context,
1340			       SVGA3dCmdHeader *header)
1341{
1342	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343		container_of(header, typeof(*cmd), header);
1344
1345	if (unlikely(dev_priv->has_mob)) {
1346		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1347
1348		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349
1350		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351		gb_cmd.header.size = cmd->header.size;
1352		gb_cmd.body.cid = cmd->body.cid;
1353		gb_cmd.body.type = cmd->body.type;
1354
1355		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357	}
1358
1359	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360				 VMW_RES_DIRTY_SET, user_context_converter,
1361				 &cmd->body.cid, NULL);
1362}
1363
1364/**
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1366 *
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1370 */
1371static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372				struct vmw_sw_context *sw_context,
1373				SVGA3dCmdHeader *header)
1374{
1375	struct vmw_bo *vmw_bo;
1376	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1377	int ret;
1378
1379	cmd = container_of(header, typeof(*cmd), header);
1380	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381	if (unlikely(ret != 0))
1382		return ret;
1383
1384	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1385				    &vmw_bo);
1386	if (unlikely(ret != 0))
1387		return ret;
1388
1389	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1390
1391	return ret;
1392}
1393
1394/**
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
1401static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402			     struct vmw_sw_context *sw_context,
1403			     SVGA3dCmdHeader *header)
1404{
1405	struct vmw_bo *vmw_bo;
1406	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1407	int ret;
1408
1409	cmd = container_of(header, typeof(*cmd), header);
1410	if (dev_priv->has_mob) {
1411		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1412
1413		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414
1415		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416		gb_cmd.header.size = cmd->header.size;
1417		gb_cmd.body.cid = cmd->body.cid;
1418		gb_cmd.body.type = cmd->body.type;
1419		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420		gb_cmd.body.offset = cmd->body.guestResult.offset;
1421
1422		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424	}
1425
1426	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427	if (unlikely(ret != 0))
1428		return ret;
1429
1430	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431				      &cmd->body.guestResult, &vmw_bo);
1432	if (unlikely(ret != 0))
1433		return ret;
1434
1435	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1436
1437	return ret;
1438}
1439
1440/**
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1442 *
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1446 */
1447static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448				 struct vmw_sw_context *sw_context,
1449				 SVGA3dCmdHeader *header)
1450{
1451	struct vmw_bo *vmw_bo;
1452	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1453	int ret;
1454
1455	cmd = container_of(header, typeof(*cmd), header);
1456	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457	if (unlikely(ret != 0))
1458		return ret;
1459
1460	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1461				    &vmw_bo);
1462	if (unlikely(ret != 0))
1463		return ret;
1464
1465	return 0;
1466}
1467
1468/**
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1470 *
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1474 */
1475static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476			      struct vmw_sw_context *sw_context,
1477			      SVGA3dCmdHeader *header)
1478{
1479	struct vmw_bo *vmw_bo;
1480	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1481	int ret;
1482
1483	cmd = container_of(header, typeof(*cmd), header);
1484	if (dev_priv->has_mob) {
1485		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1486
1487		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488
1489		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490		gb_cmd.header.size = cmd->header.size;
1491		gb_cmd.body.cid = cmd->body.cid;
1492		gb_cmd.body.type = cmd->body.type;
1493		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494		gb_cmd.body.offset = cmd->body.guestResult.offset;
1495
1496		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498	}
1499
1500	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501	if (unlikely(ret != 0))
1502		return ret;
1503
1504	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505				      &cmd->body.guestResult, &vmw_bo);
1506	if (unlikely(ret != 0))
1507		return ret;
1508
1509	return 0;
1510}
1511
1512static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513		       struct vmw_sw_context *sw_context,
1514		       SVGA3dCmdHeader *header)
1515{
1516	struct vmw_bo *vmw_bo = NULL;
1517	struct vmw_surface *srf = NULL;
1518	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1519	int ret;
1520	SVGA3dCmdSurfaceDMASuffix *suffix;
1521	uint32_t bo_size;
1522	bool dirty;
1523
1524	cmd = container_of(header, typeof(*cmd), header);
1525	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526					       header->size - sizeof(*suffix));
1527
1528	/* Make sure device and verifier stays in sync. */
1529	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1531		return -EINVAL;
1532	}
1533
1534	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535				      &cmd->body.guest.ptr, &vmw_bo);
1536	if (unlikely(ret != 0))
1537		return ret;
1538
1539	/* Make sure DMA doesn't cross BO boundaries. */
1540	bo_size = vmw_bo->tbo.base.size;
1541	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542		VMW_DEBUG_USER("Invalid DMA offset.\n");
1543		return -EINVAL;
1544	}
1545
1546	bo_size -= cmd->body.guest.ptr.offset;
1547	if (unlikely(suffix->maximumOffset > bo_size))
1548		suffix->maximumOffset = bo_size;
1549
1550	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551		VMW_RES_DIRTY_SET : 0;
1552	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553				dirty, user_surface_converter,
1554				&cmd->body.host.sid, NULL);
1555	if (unlikely(ret != 0)) {
1556		if (unlikely(ret != -ERESTARTSYS))
1557			VMW_DEBUG_USER("could not find surface for DMA.\n");
1558		return ret;
1559	}
1560
1561	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1562
1563	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1564
1565	return 0;
1566}
1567
1568static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569			struct vmw_sw_context *sw_context,
1570			SVGA3dCmdHeader *header)
1571{
1572	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1573	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574		(unsigned long)header + sizeof(*cmd));
1575	SVGA3dPrimitiveRange *range;
1576	uint32_t i;
1577	uint32_t maxnum;
1578	int ret;
1579
1580	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581	if (unlikely(ret != 0))
1582		return ret;
1583
1584	cmd = container_of(header, typeof(*cmd), header);
1585	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586
1587	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1589		return -EINVAL;
1590	}
1591
1592	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594					VMW_RES_DIRTY_NONE,
1595					user_surface_converter,
1596					&decl->array.surfaceId, NULL);
1597		if (unlikely(ret != 0))
1598			return ret;
1599	}
1600
1601	maxnum = (header->size - sizeof(cmd->body) -
1602		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603	if (unlikely(cmd->body.numRanges > maxnum)) {
1604		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1605		return -EINVAL;
1606	}
1607
1608	range = (SVGA3dPrimitiveRange *) decl;
1609	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1611					VMW_RES_DIRTY_NONE,
1612					user_surface_converter,
1613					&range->indexArray.surfaceId, NULL);
1614		if (unlikely(ret != 0))
1615			return ret;
1616	}
1617	return 0;
1618}
1619
1620static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621			     struct vmw_sw_context *sw_context,
1622			     SVGA3dCmdHeader *header)
1623{
1624	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1625	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626	  ((unsigned long) header + header->size + sizeof(*header));
1627	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628		((unsigned long) header + sizeof(*cmd));
1629	struct vmw_resource *ctx;
1630	struct vmw_resource *res;
1631	int ret;
1632
1633	cmd = container_of(header, typeof(*cmd), header);
1634
1635	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636				VMW_RES_DIRTY_SET, user_context_converter,
1637				&cmd->body.cid, &ctx);
1638	if (unlikely(ret != 0))
1639		return ret;
1640
1641	for (; cur_state < last_state; ++cur_state) {
1642		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643			continue;
1644
1645		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647				       (unsigned int) cur_state->stage);
1648			return -EINVAL;
1649		}
1650
1651		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1652					VMW_RES_DIRTY_NONE,
1653					user_surface_converter,
1654					&cur_state->value, &res);
1655		if (unlikely(ret != 0))
1656			return ret;
1657
1658		if (dev_priv->has_mob) {
1659			struct vmw_ctx_bindinfo_tex binding;
1660			struct vmw_ctx_validation_info *node;
1661
1662			node = vmw_execbuf_info_from_res(sw_context, ctx);
1663			if (!node)
1664				return -EINVAL;
1665
1666			binding.bi.ctx = ctx;
1667			binding.bi.res = res;
1668			binding.bi.bt = vmw_ctx_binding_tex;
1669			binding.texture_stage = cur_state->stage;
1670			vmw_binding_add(node->staged, &binding.bi, 0,
1671					binding.texture_stage);
1672		}
1673	}
1674
1675	return 0;
1676}
1677
1678static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679				      struct vmw_sw_context *sw_context,
1680				      void *buf)
1681{
1682	struct vmw_bo *vmw_bo;
1683
1684	struct {
1685		uint32_t header;
1686		SVGAFifoCmdDefineGMRFB body;
1687	} *cmd = buf;
1688
1689	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690				       &vmw_bo);
1691}
1692
1693/**
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695 * switching
1696 *
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @res: Pointer to the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1701 * stream.
1702 * @backup_offset: Offset of backup into MOB.
1703 *
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1707 */
1708static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709				     struct vmw_sw_context *sw_context,
1710				     struct vmw_resource *res, uint32_t *buf_id,
1711				     unsigned long backup_offset)
1712{
1713	struct vmw_bo *vbo;
1714	void *info;
1715	int ret;
1716
1717	info = vmw_execbuf_info_from_res(sw_context, res);
1718	if (!info)
1719		return -EINVAL;
1720
1721	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1722	if (ret)
1723		return ret;
1724
1725	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726					 backup_offset);
1727	return 0;
1728}
1729
1730/**
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732 *
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1739 * stream.
1740 * @backup_offset: Offset of backup into MOB.
1741 *
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1745 */
1746static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747				 struct vmw_sw_context *sw_context,
1748				 enum vmw_res_type res_type,
1749				 const struct vmw_user_resource_conv
1750				 *converter, uint32_t *res_id, uint32_t *buf_id,
1751				 unsigned long backup_offset)
1752{
1753	struct vmw_resource *res;
1754	int ret;
1755
1756	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1758	if (ret)
1759		return ret;
1760
1761	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762					 backup_offset);
1763}
1764
1765/**
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1767 *
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1771 */
1772static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773				   struct vmw_sw_context *sw_context,
1774				   SVGA3dCmdHeader *header)
1775{
1776	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777		container_of(header, typeof(*cmd), header);
1778
1779	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780				     user_surface_converter, &cmd->body.sid,
1781				     &cmd->body.mobid, 0);
1782}
1783
1784/**
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1786 *
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1790 */
1791static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792				   struct vmw_sw_context *sw_context,
1793				   SVGA3dCmdHeader *header)
1794{
1795	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796		container_of(header, typeof(*cmd), header);
1797
1798	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799				 VMW_RES_DIRTY_NONE, user_surface_converter,
1800				 &cmd->body.image.sid, NULL);
1801}
1802
1803/**
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1805 *
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1809 */
1810static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811				     struct vmw_sw_context *sw_context,
1812				     SVGA3dCmdHeader *header)
1813{
1814	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815		container_of(header, typeof(*cmd), header);
1816
1817	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819				 &cmd->body.sid, NULL);
1820}
1821
1822/**
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1824 *
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1828 */
1829static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830				     struct vmw_sw_context *sw_context,
1831				     SVGA3dCmdHeader *header)
1832{
1833	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834		container_of(header, typeof(*cmd), header);
1835
1836	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837				 VMW_RES_DIRTY_NONE, user_surface_converter,
1838				 &cmd->body.image.sid, NULL);
1839}
1840
1841/**
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1843 * command
1844 *
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1848 */
1849static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850				       struct vmw_sw_context *sw_context,
1851				       SVGA3dCmdHeader *header)
1852{
1853	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854		container_of(header, typeof(*cmd), header);
1855
1856	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858				 &cmd->body.sid, NULL);
1859}
1860
1861/**
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1863 * command
1864 *
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1868 */
1869static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870				       struct vmw_sw_context *sw_context,
1871				       SVGA3dCmdHeader *header)
1872{
1873	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874		container_of(header, typeof(*cmd), header);
1875
1876	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877				 VMW_RES_DIRTY_NONE, user_surface_converter,
1878				 &cmd->body.image.sid, NULL);
1879}
1880
1881/**
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883 * command
1884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890					 struct vmw_sw_context *sw_context,
1891					 SVGA3dCmdHeader *header)
1892{
1893	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894		container_of(header, typeof(*cmd), header);
1895
1896	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898				 &cmd->body.sid, NULL);
1899}
1900
1901/**
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1903 *
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1907 */
1908static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909				 struct vmw_sw_context *sw_context,
1910				 SVGA3dCmdHeader *header)
1911{
1912	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1913	int ret;
1914	size_t size;
1915	struct vmw_resource *ctx;
1916
1917	cmd = container_of(header, typeof(*cmd), header);
1918
1919	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920				VMW_RES_DIRTY_SET, user_context_converter,
1921				&cmd->body.cid, &ctx);
1922	if (unlikely(ret != 0))
1923		return ret;
1924
1925	if (unlikely(!dev_priv->has_mob))
1926		return 0;
1927
1928	size = cmd->header.size - sizeof(cmd->body);
1929	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930				    cmd->body.shid, cmd + 1, cmd->body.type,
1931				    size, &sw_context->staged_cmd_res);
1932	if (unlikely(ret != 0))
1933		return ret;
1934
1935	return vmw_resource_relocation_add(sw_context, NULL,
1936					   vmw_ptr_diff(sw_context->buf_start,
1937							&cmd->header.id),
1938					   vmw_res_rel_nop);
1939}
1940
1941/**
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1943 *
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1947 */
1948static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949				  struct vmw_sw_context *sw_context,
1950				  SVGA3dCmdHeader *header)
1951{
1952	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1953	int ret;
1954	struct vmw_resource *ctx;
1955
1956	cmd = container_of(header, typeof(*cmd), header);
1957
1958	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959				VMW_RES_DIRTY_SET, user_context_converter,
1960				&cmd->body.cid, &ctx);
1961	if (unlikely(ret != 0))
1962		return ret;
1963
1964	if (unlikely(!dev_priv->has_mob))
1965		return 0;
1966
1967	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968				cmd->body.type, &sw_context->staged_cmd_res);
1969	if (unlikely(ret != 0))
1970		return ret;
1971
1972	return vmw_resource_relocation_add(sw_context, NULL,
1973					   vmw_ptr_diff(sw_context->buf_start,
1974							&cmd->header.id),
1975					   vmw_res_rel_nop);
1976}
1977
1978/**
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986			      struct vmw_sw_context *sw_context,
1987			      SVGA3dCmdHeader *header)
1988{
1989	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990	struct vmw_ctx_bindinfo_shader binding;
1991	struct vmw_resource *ctx, *res = NULL;
1992	struct vmw_ctx_validation_info *ctx_info;
1993	int ret;
1994
1995	cmd = container_of(header, typeof(*cmd), header);
1996
1997	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1998		VMW_DEBUG_USER("Illegal shader type %u.\n",
1999			       (unsigned int) cmd->body.type);
2000		return -EINVAL;
2001	}
2002
2003	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004				VMW_RES_DIRTY_SET, user_context_converter,
2005				&cmd->body.cid, &ctx);
2006	if (unlikely(ret != 0))
2007		return ret;
2008
2009	if (!dev_priv->has_mob)
2010		return 0;
2011
2012	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2013		/*
2014		 * This is the compat shader path - Per device guest-backed
2015		 * shaders, but user-space thinks it's per context host-
2016		 * backed shaders.
2017		 */
2018		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019					cmd->body.shid, cmd->body.type);
2020		if (!IS_ERR(res)) {
2021			ret = vmw_execbuf_res_val_add(sw_context, res,
2022						      VMW_RES_DIRTY_NONE,
2023						      vmw_val_add_flag_noctx);
2024			if (unlikely(ret != 0))
2025				return ret;
2026
2027			ret = vmw_resource_relocation_add
2028				(sw_context, res,
2029				 vmw_ptr_diff(sw_context->buf_start,
2030					      &cmd->body.shid),
2031				 vmw_res_rel_normal);
2032			if (unlikely(ret != 0))
2033				return ret;
2034		}
2035	}
2036
2037	if (IS_ERR_OR_NULL(res)) {
2038		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2039					VMW_RES_DIRTY_NONE,
2040					user_shader_converter, &cmd->body.shid,
2041					&res);
2042		if (unlikely(ret != 0))
2043			return ret;
2044	}
2045
2046	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2047	if (!ctx_info)
2048		return -EINVAL;
2049
2050	binding.bi.ctx = ctx;
2051	binding.bi.res = res;
2052	binding.bi.bt = vmw_ctx_binding_shader;
2053	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2054	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2055
2056	return 0;
2057}
2058
2059/**
2060 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2061 *
2062 * @dev_priv: Pointer to a device private struct.
2063 * @sw_context: The software context being used for this batch.
2064 * @header: Pointer to the command header in the command stream.
2065 */
2066static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2067				    struct vmw_sw_context *sw_context,
2068				    SVGA3dCmdHeader *header)
2069{
2070	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2071	int ret;
2072
2073	cmd = container_of(header, typeof(*cmd), header);
2074
2075	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2076				VMW_RES_DIRTY_SET, user_context_converter,
2077				&cmd->body.cid, NULL);
2078	if (unlikely(ret != 0))
2079		return ret;
2080
2081	if (dev_priv->has_mob)
2082		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2083
2084	return 0;
2085}
2086
2087/**
2088 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2089 *
2090 * @dev_priv: Pointer to a device private struct.
2091 * @sw_context: The software context being used for this batch.
2092 * @header: Pointer to the command header in the command stream.
2093 */
2094static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2095				  struct vmw_sw_context *sw_context,
2096				  SVGA3dCmdHeader *header)
2097{
2098	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2099		container_of(header, typeof(*cmd), header);
2100
2101	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2102				     user_shader_converter, &cmd->body.shid,
2103				     &cmd->body.mobid, cmd->body.offsetInBytes);
2104}
2105
2106/**
2107 * vmw_cmd_dx_set_single_constant_buffer - Validate
2108 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2109 *
2110 * @dev_priv: Pointer to a device private struct.
2111 * @sw_context: The software context being used for this batch.
2112 * @header: Pointer to the command header in the command stream.
2113 */
2114static int
2115vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2116				      struct vmw_sw_context *sw_context,
2117				      SVGA3dCmdHeader *header)
2118{
2119	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2120
2121	struct vmw_resource *res = NULL;
2122	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2123	struct vmw_ctx_bindinfo_cb binding;
2124	int ret;
2125
2126	if (!ctx_node)
2127		return -EINVAL;
2128
2129	cmd = container_of(header, typeof(*cmd), header);
2130	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2131				VMW_RES_DIRTY_NONE, user_surface_converter,
2132				&cmd->body.sid, &res);
2133	if (unlikely(ret != 0))
2134		return ret;
2135
2136	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2137	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2138		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2139			       (unsigned int) cmd->body.type,
2140			       (unsigned int) cmd->body.slot);
2141		return -EINVAL;
2142	}
2143
2144	binding.bi.ctx = ctx_node->ctx;
2145	binding.bi.res = res;
2146	binding.bi.bt = vmw_ctx_binding_cb;
2147	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2148	binding.offset = cmd->body.offsetInBytes;
2149	binding.size = cmd->body.sizeInBytes;
2150	binding.slot = cmd->body.slot;
2151
2152	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2153			binding.slot);
2154
2155	return 0;
2156}
2157
2158/**
2159 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2160 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2161 *
2162 * @dev_priv: Pointer to a device private struct.
2163 * @sw_context: The software context being used for this batch.
2164 * @header: Pointer to the command header in the command stream.
2165 */
2166static int
2167vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2168				      struct vmw_sw_context *sw_context,
2169				      SVGA3dCmdHeader *header)
2170{
2171	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2172
2173	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2174	u32 shader_slot;
2175
2176	if (!has_sm5_context(dev_priv))
2177		return -EINVAL;
2178
2179	if (!ctx_node)
2180		return -EINVAL;
2181
2182	cmd = container_of(header, typeof(*cmd), header);
2183	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2184		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2185			       (unsigned int) cmd->body.slot);
2186		return -EINVAL;
2187	}
2188
2189	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2190	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2191				     cmd->body.slot, cmd->body.offsetInBytes);
2192
2193	return 0;
2194}
2195
2196/**
2197 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2198 * command
2199 *
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2203 */
2204static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2205				     struct vmw_sw_context *sw_context,
2206				     SVGA3dCmdHeader *header)
2207{
2208	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2209		container_of(header, typeof(*cmd), header);
2210
2211	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2212		sizeof(SVGA3dShaderResourceViewId);
2213
2214	if ((u64) cmd->body.startView + (u64) num_sr_view >
2215	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2216	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2217		VMW_DEBUG_USER("Invalid shader binding.\n");
2218		return -EINVAL;
2219	}
2220
2221	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2222				     vmw_ctx_binding_sr,
2223				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2224				     (void *) &cmd[1], num_sr_view,
2225				     cmd->body.startView);
2226}
2227
2228/**
2229 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2230 *
2231 * @dev_priv: Pointer to a device private struct.
2232 * @sw_context: The software context being used for this batch.
2233 * @header: Pointer to the command header in the command stream.
2234 */
2235static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2236				 struct vmw_sw_context *sw_context,
2237				 SVGA3dCmdHeader *header)
2238{
2239	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2240	struct vmw_resource *res = NULL;
2241	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2242	struct vmw_ctx_bindinfo_shader binding;
2243	int ret = 0;
2244
2245	if (!ctx_node)
2246		return -EINVAL;
2247
2248	cmd = container_of(header, typeof(*cmd), header);
2249
2250	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2251		VMW_DEBUG_USER("Illegal shader type %u.\n",
2252			       (unsigned int) cmd->body.type);
2253		return -EINVAL;
2254	}
2255
2256	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2257		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2258		if (IS_ERR(res)) {
2259			VMW_DEBUG_USER("Could not find shader for binding.\n");
2260			return PTR_ERR(res);
2261		}
2262
2263		ret = vmw_execbuf_res_val_add(sw_context, res,
2264					      VMW_RES_DIRTY_NONE,
2265					      vmw_val_add_flag_noctx);
2266		if (ret)
2267			return ret;
2268	}
2269
2270	binding.bi.ctx = ctx_node->ctx;
2271	binding.bi.res = res;
2272	binding.bi.bt = vmw_ctx_binding_dx_shader;
2273	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2274
2275	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2276
2277	return 0;
2278}
2279
2280/**
2281 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2282 * command
2283 *
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2287 */
2288static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2289					 struct vmw_sw_context *sw_context,
2290					 SVGA3dCmdHeader *header)
2291{
2292	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2293	struct vmw_ctx_bindinfo_vb binding;
2294	struct vmw_resource *res;
2295	struct {
2296		SVGA3dCmdHeader header;
2297		SVGA3dCmdDXSetVertexBuffers body;
2298		SVGA3dVertexBuffer buf[];
2299	} *cmd;
2300	int i, ret, num;
2301
2302	if (!ctx_node)
2303		return -EINVAL;
2304
2305	cmd = container_of(header, typeof(*cmd), header);
2306	num = (cmd->header.size - sizeof(cmd->body)) /
2307		sizeof(SVGA3dVertexBuffer);
2308	if ((u64)num + (u64)cmd->body.startBuffer >
2309	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2310		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2311		return -EINVAL;
2312	}
2313
2314	for (i = 0; i < num; i++) {
2315		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2316					VMW_RES_DIRTY_NONE,
2317					user_surface_converter,
2318					&cmd->buf[i].sid, &res);
2319		if (unlikely(ret != 0))
2320			return ret;
2321
2322		binding.bi.ctx = ctx_node->ctx;
2323		binding.bi.bt = vmw_ctx_binding_vb;
2324		binding.bi.res = res;
2325		binding.offset = cmd->buf[i].offset;
2326		binding.stride = cmd->buf[i].stride;
2327		binding.slot = i + cmd->body.startBuffer;
2328
2329		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2330	}
2331
2332	return 0;
2333}
2334
2335/**
2336 * vmw_cmd_dx_set_index_buffer - Validate
2337 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2338 *
2339 * @dev_priv: Pointer to a device private struct.
2340 * @sw_context: The software context being used for this batch.
2341 * @header: Pointer to the command header in the command stream.
2342 */
2343static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2344				       struct vmw_sw_context *sw_context,
2345				       SVGA3dCmdHeader *header)
2346{
2347	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2348	struct vmw_ctx_bindinfo_ib binding;
2349	struct vmw_resource *res;
2350	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2351	int ret;
2352
2353	if (!ctx_node)
2354		return -EINVAL;
2355
2356	cmd = container_of(header, typeof(*cmd), header);
2357	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2358				VMW_RES_DIRTY_NONE, user_surface_converter,
2359				&cmd->body.sid, &res);
2360	if (unlikely(ret != 0))
2361		return ret;
2362
2363	binding.bi.ctx = ctx_node->ctx;
2364	binding.bi.res = res;
2365	binding.bi.bt = vmw_ctx_binding_ib;
2366	binding.offset = cmd->body.offset;
2367	binding.format = cmd->body.format;
2368
2369	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2370
2371	return 0;
2372}
2373
2374/**
2375 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2376 * command
2377 *
2378 * @dev_priv: Pointer to a device private struct.
2379 * @sw_context: The software context being used for this batch.
2380 * @header: Pointer to the command header in the command stream.
2381 */
2382static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2383					struct vmw_sw_context *sw_context,
2384					SVGA3dCmdHeader *header)
2385{
2386	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2387		container_of(header, typeof(*cmd), header);
2388	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2389		sizeof(SVGA3dRenderTargetViewId);
2390	int ret;
2391
2392	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2393		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2394		return -EINVAL;
2395	}
2396
2397	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2398				    0, &cmd->body.depthStencilViewId, 1, 0);
2399	if (ret)
2400		return ret;
2401
2402	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2403				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2404				     num_rt_view, 0);
2405}
2406
2407/**
2408 * vmw_cmd_dx_clear_rendertarget_view - Validate
2409 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2410 *
2411 * @dev_priv: Pointer to a device private struct.
2412 * @sw_context: The software context being used for this batch.
2413 * @header: Pointer to the command header in the command stream.
2414 */
2415static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2416					      struct vmw_sw_context *sw_context,
2417					      SVGA3dCmdHeader *header)
2418{
2419	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2420		container_of(header, typeof(*cmd), header);
2421	struct vmw_resource *ret;
2422
2423	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2424				  cmd->body.renderTargetViewId);
2425
2426	return PTR_ERR_OR_ZERO(ret);
2427}
2428
2429/**
2430 * vmw_cmd_dx_clear_depthstencil_view - Validate
2431 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2432 *
2433 * @dev_priv: Pointer to a device private struct.
2434 * @sw_context: The software context being used for this batch.
2435 * @header: Pointer to the command header in the command stream.
2436 */
2437static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2438					      struct vmw_sw_context *sw_context,
2439					      SVGA3dCmdHeader *header)
2440{
2441	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2442		container_of(header, typeof(*cmd), header);
2443	struct vmw_resource *ret;
2444
2445	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2446				  cmd->body.depthStencilViewId);
2447
2448	return PTR_ERR_OR_ZERO(ret);
2449}
2450
2451static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2452				  struct vmw_sw_context *sw_context,
2453				  SVGA3dCmdHeader *header)
2454{
2455	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2456	struct vmw_resource *srf;
2457	struct vmw_resource *res;
2458	enum vmw_view_type view_type;
2459	int ret;
2460	/*
2461	 * This is based on the fact that all affected define commands have the
2462	 * same initial command body layout.
2463	 */
2464	struct {
2465		SVGA3dCmdHeader header;
2466		uint32 defined_id;
2467		uint32 sid;
2468	} *cmd;
2469
2470	if (!ctx_node)
2471		return -EINVAL;
2472
2473	view_type = vmw_view_cmd_to_type(header->id);
2474	if (view_type == vmw_view_max)
2475		return -EINVAL;
2476
2477	cmd = container_of(header, typeof(*cmd), header);
2478	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2479		VMW_DEBUG_USER("Invalid surface id.\n");
2480		return -EINVAL;
2481	}
2482	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2483				VMW_RES_DIRTY_NONE, user_surface_converter,
2484				&cmd->sid, &srf);
2485	if (unlikely(ret != 0))
2486		return ret;
2487
2488	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2489	if (IS_ERR_OR_NULL(res))
2490		return res ? PTR_ERR(res) : -EINVAL;
2491	ret = vmw_cotable_notify(res, cmd->defined_id);
2492	if (unlikely(ret != 0))
2493		return ret;
2494
2495	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2496			    cmd->defined_id, header,
2497			    header->size + sizeof(*header),
2498			    &sw_context->staged_cmd_res);
2499}
2500
2501/**
2502 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2503 *
2504 * @dev_priv: Pointer to a device private struct.
2505 * @sw_context: The software context being used for this batch.
2506 * @header: Pointer to the command header in the command stream.
2507 */
2508static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2509				     struct vmw_sw_context *sw_context,
2510				     SVGA3dCmdHeader *header)
2511{
2512	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2513	struct vmw_ctx_bindinfo_so_target binding;
2514	struct vmw_resource *res;
2515	struct {
2516		SVGA3dCmdHeader header;
2517		SVGA3dCmdDXSetSOTargets body;
2518		SVGA3dSoTarget targets[];
2519	} *cmd;
2520	int i, ret, num;
2521
2522	if (!ctx_node)
2523		return -EINVAL;
2524
2525	cmd = container_of(header, typeof(*cmd), header);
2526	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2527
2528	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2529		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2530		return -EINVAL;
2531	}
2532
2533	for (i = 0; i < num; i++) {
2534		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2535					VMW_RES_DIRTY_SET,
2536					user_surface_converter,
2537					&cmd->targets[i].sid, &res);
2538		if (unlikely(ret != 0))
2539			return ret;
2540
2541		binding.bi.ctx = ctx_node->ctx;
2542		binding.bi.res = res;
2543		binding.bi.bt = vmw_ctx_binding_so_target;
2544		binding.offset = cmd->targets[i].offset;
2545		binding.size = cmd->targets[i].sizeInBytes;
2546		binding.slot = i;
2547
2548		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2549	}
2550
2551	return 0;
2552}
2553
2554static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2555				struct vmw_sw_context *sw_context,
2556				SVGA3dCmdHeader *header)
2557{
2558	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2559	struct vmw_resource *res;
2560	/*
2561	 * This is based on the fact that all affected define commands have
2562	 * the same initial command body layout.
2563	 */
2564	struct {
2565		SVGA3dCmdHeader header;
2566		uint32 defined_id;
2567	} *cmd;
2568	enum vmw_so_type so_type;
2569	int ret;
2570
2571	if (!ctx_node)
2572		return -EINVAL;
2573
2574	so_type = vmw_so_cmd_to_type(header->id);
2575	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2576	if (IS_ERR_OR_NULL(res))
2577		return res ? PTR_ERR(res) : -EINVAL;
2578	cmd = container_of(header, typeof(*cmd), header);
2579	ret = vmw_cotable_notify(res, cmd->defined_id);
2580
2581	return ret;
2582}
2583
2584/**
2585 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2586 * command
2587 *
2588 * @dev_priv: Pointer to a device private struct.
2589 * @sw_context: The software context being used for this batch.
2590 * @header: Pointer to the command header in the command stream.
2591 */
2592static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2593					struct vmw_sw_context *sw_context,
2594					SVGA3dCmdHeader *header)
2595{
2596	struct {
2597		SVGA3dCmdHeader header;
2598		union {
2599			SVGA3dCmdDXReadbackSubResource r_body;
2600			SVGA3dCmdDXInvalidateSubResource i_body;
2601			SVGA3dCmdDXUpdateSubResource u_body;
2602			SVGA3dSurfaceId sid;
2603		};
2604	} *cmd;
2605
2606	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2607		     offsetof(typeof(*cmd), sid));
2608	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2609		     offsetof(typeof(*cmd), sid));
2610	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2611		     offsetof(typeof(*cmd), sid));
2612
2613	cmd = container_of(header, typeof(*cmd), header);
2614	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2615				 VMW_RES_DIRTY_NONE, user_surface_converter,
2616				 &cmd->sid, NULL);
2617}
2618
2619static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2620				struct vmw_sw_context *sw_context,
2621				SVGA3dCmdHeader *header)
2622{
2623	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2624
2625	if (!ctx_node)
2626		return -EINVAL;
2627
2628	return 0;
2629}
2630
2631/**
2632 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2633 * resource for removal.
2634 *
2635 * @dev_priv: Pointer to a device private struct.
2636 * @sw_context: The software context being used for this batch.
2637 * @header: Pointer to the command header in the command stream.
2638 *
2639 * Check that the view exists, and if it was not created using this command
2640 * batch, conditionally make this command a NOP.
2641 */
2642static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2643				  struct vmw_sw_context *sw_context,
2644				  SVGA3dCmdHeader *header)
2645{
2646	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2647	struct {
2648		SVGA3dCmdHeader header;
2649		union vmw_view_destroy body;
2650	} *cmd = container_of(header, typeof(*cmd), header);
2651	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2652	struct vmw_resource *view;
2653	int ret;
2654
2655	if (!ctx_node)
2656		return -EINVAL;
2657
2658	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2659			      &sw_context->staged_cmd_res, &view);
2660	if (ret || !view)
2661		return ret;
2662
2663	/*
2664	 * If the view wasn't created during this command batch, it might
2665	 * have been removed due to a context swapout, so add a
2666	 * relocation to conditionally make this command a NOP to avoid
2667	 * device errors.
2668	 */
2669	return vmw_resource_relocation_add(sw_context, view,
2670					   vmw_ptr_diff(sw_context->buf_start,
2671							&cmd->header.id),
2672					   vmw_res_rel_cond_nop);
2673}
2674
2675/**
2676 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2677 *
2678 * @dev_priv: Pointer to a device private struct.
2679 * @sw_context: The software context being used for this batch.
2680 * @header: Pointer to the command header in the command stream.
2681 */
2682static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2683				    struct vmw_sw_context *sw_context,
2684				    SVGA3dCmdHeader *header)
2685{
2686	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2687	struct vmw_resource *res;
2688	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2689		container_of(header, typeof(*cmd), header);
2690	int ret;
2691
2692	if (!ctx_node)
2693		return -EINVAL;
2694
2695	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2696	if (IS_ERR_OR_NULL(res))
2697		return res ? PTR_ERR(res) : -EINVAL;
2698	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2699	if (ret)
2700		return ret;
2701
2702	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2703				 cmd->body.shaderId, cmd->body.type,
2704				 &sw_context->staged_cmd_res);
2705}
2706
2707/**
2708 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2709 *
2710 * @dev_priv: Pointer to a device private struct.
2711 * @sw_context: The software context being used for this batch.
2712 * @header: Pointer to the command header in the command stream.
2713 */
2714static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2715				     struct vmw_sw_context *sw_context,
2716				     SVGA3dCmdHeader *header)
2717{
2718	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2719	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2720		container_of(header, typeof(*cmd), header);
2721	int ret;
2722
2723	if (!ctx_node)
2724		return -EINVAL;
2725
2726	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2727				&sw_context->staged_cmd_res);
2728
2729	return ret;
2730}
2731
2732/**
2733 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2734 *
2735 * @dev_priv: Pointer to a device private struct.
2736 * @sw_context: The software context being used for this batch.
2737 * @header: Pointer to the command header in the command stream.
2738 */
2739static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2740				  struct vmw_sw_context *sw_context,
2741				  SVGA3dCmdHeader *header)
2742{
2743	struct vmw_resource *ctx;
2744	struct vmw_resource *res;
2745	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2746		container_of(header, typeof(*cmd), header);
2747	int ret;
2748
2749	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2750		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2751					VMW_RES_DIRTY_SET,
2752					user_context_converter, &cmd->body.cid,
2753					&ctx);
2754		if (ret)
2755			return ret;
2756	} else {
2757		struct vmw_ctx_validation_info *ctx_node =
2758			VMW_GET_CTX_NODE(sw_context);
2759
2760		if (!ctx_node)
2761			return -EINVAL;
2762
2763		ctx = ctx_node->ctx;
2764	}
2765
2766	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2767	if (IS_ERR(res)) {
2768		VMW_DEBUG_USER("Could not find shader to bind.\n");
2769		return PTR_ERR(res);
2770	}
2771
2772	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2773				      vmw_val_add_flag_noctx);
2774	if (ret) {
2775		VMW_DEBUG_USER("Error creating resource validation node.\n");
2776		return ret;
2777	}
2778
2779	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2780					 &cmd->body.mobid,
2781					 cmd->body.offsetInBytes);
2782}
2783
2784/**
2785 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2786 *
2787 * @dev_priv: Pointer to a device private struct.
2788 * @sw_context: The software context being used for this batch.
2789 * @header: Pointer to the command header in the command stream.
2790 */
2791static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2792			      struct vmw_sw_context *sw_context,
2793			      SVGA3dCmdHeader *header)
2794{
2795	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2796		container_of(header, typeof(*cmd), header);
2797	struct vmw_resource *view;
2798	struct vmw_res_cache_entry *rcache;
2799
2800	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2801				   cmd->body.shaderResourceViewId);
2802	if (IS_ERR(view))
2803		return PTR_ERR(view);
2804
2805	/*
2806	 * Normally the shader-resource view is not gpu-dirtying, but for
2807	 * this particular command it is...
2808	 * So mark the last looked-up surface, which is the surface
2809	 * the view points to, gpu-dirty.
2810	 */
2811	rcache = &sw_context->res_cache[vmw_res_surface];
2812	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2813				     VMW_RES_DIRTY_SET);
2814	return 0;
2815}
2816
2817/**
2818 * vmw_cmd_dx_transfer_from_buffer - Validate
2819 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2820 *
2821 * @dev_priv: Pointer to a device private struct.
2822 * @sw_context: The software context being used for this batch.
2823 * @header: Pointer to the command header in the command stream.
2824 */
2825static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2826					   struct vmw_sw_context *sw_context,
2827					   SVGA3dCmdHeader *header)
2828{
2829	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2830		container_of(header, typeof(*cmd), header);
2831	int ret;
2832
2833	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834				VMW_RES_DIRTY_NONE, user_surface_converter,
2835				&cmd->body.srcSid, NULL);
2836	if (ret != 0)
2837		return ret;
2838
2839	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2840				 VMW_RES_DIRTY_SET, user_surface_converter,
2841				 &cmd->body.destSid, NULL);
2842}
2843
2844/**
2845 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2846 *
2847 * @dev_priv: Pointer to a device private struct.
2848 * @sw_context: The software context being used for this batch.
2849 * @header: Pointer to the command header in the command stream.
2850 */
2851static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2852					   struct vmw_sw_context *sw_context,
2853					   SVGA3dCmdHeader *header)
2854{
2855	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2856		container_of(header, typeof(*cmd), header);
2857
2858	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2859		return -EINVAL;
2860
2861	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2862				 VMW_RES_DIRTY_SET, user_surface_converter,
2863				 &cmd->body.surface.sid, NULL);
2864}
2865
2866static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2867		       struct vmw_sw_context *sw_context,
2868		       SVGA3dCmdHeader *header)
2869{
2870	if (!has_sm5_context(dev_priv))
2871		return -EINVAL;
2872
2873	return 0;
2874}
2875
2876static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2877				   struct vmw_sw_context *sw_context,
2878				   SVGA3dCmdHeader *header)
2879{
2880	if (!has_sm5_context(dev_priv))
2881		return -EINVAL;
2882
2883	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2884}
2885
2886static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2887				   struct vmw_sw_context *sw_context,
2888				   SVGA3dCmdHeader *header)
2889{
2890	if (!has_sm5_context(dev_priv))
2891		return -EINVAL;
2892
2893	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2894}
2895
2896static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2897				  struct vmw_sw_context *sw_context,
2898				  SVGA3dCmdHeader *header)
2899{
2900	struct {
2901		SVGA3dCmdHeader header;
2902		SVGA3dCmdDXClearUAViewUint body;
2903	} *cmd = container_of(header, typeof(*cmd), header);
2904	struct vmw_resource *ret;
2905
2906	if (!has_sm5_context(dev_priv))
2907		return -EINVAL;
2908
2909	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2910				  cmd->body.uaViewId);
2911
2912	return PTR_ERR_OR_ZERO(ret);
2913}
2914
2915static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2916				   struct vmw_sw_context *sw_context,
2917				   SVGA3dCmdHeader *header)
2918{
2919	struct {
2920		SVGA3dCmdHeader header;
2921		SVGA3dCmdDXClearUAViewFloat body;
2922	} *cmd = container_of(header, typeof(*cmd), header);
2923	struct vmw_resource *ret;
2924
2925	if (!has_sm5_context(dev_priv))
2926		return -EINVAL;
2927
2928	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2929				  cmd->body.uaViewId);
2930
2931	return PTR_ERR_OR_ZERO(ret);
2932}
2933
2934static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2935			   struct vmw_sw_context *sw_context,
2936			   SVGA3dCmdHeader *header)
2937{
2938	struct {
2939		SVGA3dCmdHeader header;
2940		SVGA3dCmdDXSetUAViews body;
2941	} *cmd = container_of(header, typeof(*cmd), header);
2942	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2943		sizeof(SVGA3dUAViewId);
2944	int ret;
2945
2946	if (!has_sm5_context(dev_priv))
2947		return -EINVAL;
2948
2949	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2950		VMW_DEBUG_USER("Invalid UAV binding.\n");
2951		return -EINVAL;
2952	}
2953
2954	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2955				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2956				    num_uav, 0);
2957	if (ret)
2958		return ret;
2959
2960	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2961					 cmd->body.uavSpliceIndex);
2962
2963	return ret;
2964}
2965
2966static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2967			      struct vmw_sw_context *sw_context,
2968			      SVGA3dCmdHeader *header)
2969{
2970	struct {
2971		SVGA3dCmdHeader header;
2972		SVGA3dCmdDXSetCSUAViews body;
2973	} *cmd = container_of(header, typeof(*cmd), header);
2974	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2975		sizeof(SVGA3dUAViewId);
2976	int ret;
2977
2978	if (!has_sm5_context(dev_priv))
2979		return -EINVAL;
2980
2981	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2982		VMW_DEBUG_USER("Invalid UAV binding.\n");
2983		return -EINVAL;
2984	}
2985
2986	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2987				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2988				    num_uav, 0);
2989	if (ret)
2990		return ret;
2991
2992	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2993				  cmd->body.startIndex);
2994
2995	return ret;
2996}
2997
2998static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2999					  struct vmw_sw_context *sw_context,
3000					  SVGA3dCmdHeader *header)
3001{
3002	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3003	struct vmw_resource *res;
3004	struct {
3005		SVGA3dCmdHeader header;
3006		SVGA3dCmdDXDefineStreamOutputWithMob body;
3007	} *cmd = container_of(header, typeof(*cmd), header);
3008	int ret;
3009
3010	if (!has_sm5_context(dev_priv))
3011		return -EINVAL;
3012
3013	if (!ctx_node) {
3014		DRM_ERROR("DX Context not set.\n");
3015		return -EINVAL;
3016	}
3017
3018	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3019	if (IS_ERR_OR_NULL(res))
3020		return res ? PTR_ERR(res) : -EINVAL;
3021	ret = vmw_cotable_notify(res, cmd->body.soid);
3022	if (ret)
3023		return ret;
3024
3025	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3026				       cmd->body.soid,
3027				       &sw_context->staged_cmd_res);
3028}
3029
3030static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3031					   struct vmw_sw_context *sw_context,
3032					   SVGA3dCmdHeader *header)
3033{
3034	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3035	struct vmw_resource *res;
3036	struct {
3037		SVGA3dCmdHeader header;
3038		SVGA3dCmdDXDestroyStreamOutput body;
3039	} *cmd = container_of(header, typeof(*cmd), header);
3040
3041	if (!ctx_node) {
3042		DRM_ERROR("DX Context not set.\n");
3043		return -EINVAL;
3044	}
3045
3046	/*
3047	 * When device does not support SM5 then streamoutput with mob command is
3048	 * not available to user-space. Simply return in this case.
3049	 */
3050	if (!has_sm5_context(dev_priv))
3051		return 0;
3052
3053	/*
3054	 * With SM5 capable device if lookup fails then user-space probably used
3055	 * old streamoutput define command. Return without an error.
3056	 */
3057	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3058					 cmd->body.soid);
3059	if (IS_ERR(res))
3060		return 0;
3061
3062	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3063					  &sw_context->staged_cmd_res);
3064}
3065
3066static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3067					struct vmw_sw_context *sw_context,
3068					SVGA3dCmdHeader *header)
3069{
3070	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3071	struct vmw_resource *res;
3072	struct {
3073		SVGA3dCmdHeader header;
3074		SVGA3dCmdDXBindStreamOutput body;
3075	} *cmd = container_of(header, typeof(*cmd), header);
3076	int ret;
3077
3078	if (!has_sm5_context(dev_priv))
3079		return -EINVAL;
3080
3081	if (!ctx_node) {
3082		DRM_ERROR("DX Context not set.\n");
3083		return -EINVAL;
3084	}
3085
3086	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3087					 cmd->body.soid);
3088	if (IS_ERR(res)) {
3089		DRM_ERROR("Could not find streamoutput to bind.\n");
3090		return PTR_ERR(res);
3091	}
3092
3093	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3094
3095	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3096				      vmw_val_add_flag_noctx);
3097	if (ret) {
3098		DRM_ERROR("Error creating resource validation node.\n");
3099		return ret;
3100	}
3101
3102	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3103					 &cmd->body.mobid,
3104					 cmd->body.offsetInBytes);
3105}
3106
3107static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3108				       struct vmw_sw_context *sw_context,
3109				       SVGA3dCmdHeader *header)
3110{
3111	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3112	struct vmw_resource *res;
3113	struct vmw_ctx_bindinfo_so binding;
3114	struct {
3115		SVGA3dCmdHeader header;
3116		SVGA3dCmdDXSetStreamOutput body;
3117	} *cmd = container_of(header, typeof(*cmd), header);
3118	int ret;
3119
3120	if (!ctx_node) {
3121		DRM_ERROR("DX Context not set.\n");
3122		return -EINVAL;
3123	}
3124
3125	if (cmd->body.soid == SVGA3D_INVALID_ID)
3126		return 0;
3127
3128	/*
3129	 * When device does not support SM5 then streamoutput with mob command is
3130	 * not available to user-space. Simply return in this case.
3131	 */
3132	if (!has_sm5_context(dev_priv))
3133		return 0;
3134
3135	/*
3136	 * With SM5 capable device if lookup fails then user-space probably used
3137	 * old streamoutput define command. Return without an error.
3138	 */
3139	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3140					 cmd->body.soid);
3141	if (IS_ERR(res)) {
3142		return 0;
3143	}
3144
3145	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3146				      vmw_val_add_flag_noctx);
3147	if (ret) {
3148		DRM_ERROR("Error creating resource validation node.\n");
3149		return ret;
3150	}
3151
3152	binding.bi.ctx = ctx_node->ctx;
3153	binding.bi.res = res;
3154	binding.bi.bt = vmw_ctx_binding_so;
3155	binding.slot = 0; /* Only one SO set to context at a time. */
3156
3157	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3158			binding.slot);
3159
3160	return ret;
3161}
3162
3163static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3164					      struct vmw_sw_context *sw_context,
3165					      SVGA3dCmdHeader *header)
3166{
3167	struct vmw_draw_indexed_instanced_indirect_cmd {
3168		SVGA3dCmdHeader header;
3169		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3170	} *cmd = container_of(header, typeof(*cmd), header);
3171
3172	if (!has_sm5_context(dev_priv))
3173		return -EINVAL;
3174
3175	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3176				 VMW_RES_DIRTY_NONE, user_surface_converter,
3177				 &cmd->body.argsBufferSid, NULL);
3178}
3179
3180static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3181				      struct vmw_sw_context *sw_context,
3182				      SVGA3dCmdHeader *header)
3183{
3184	struct vmw_draw_instanced_indirect_cmd {
3185		SVGA3dCmdHeader header;
3186		SVGA3dCmdDXDrawInstancedIndirect body;
3187	} *cmd = container_of(header, typeof(*cmd), header);
3188
3189	if (!has_sm5_context(dev_priv))
3190		return -EINVAL;
3191
3192	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3193				 VMW_RES_DIRTY_NONE, user_surface_converter,
3194				 &cmd->body.argsBufferSid, NULL);
3195}
3196
3197static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3198				     struct vmw_sw_context *sw_context,
3199				     SVGA3dCmdHeader *header)
3200{
3201	struct vmw_dispatch_indirect_cmd {
3202		SVGA3dCmdHeader header;
3203		SVGA3dCmdDXDispatchIndirect body;
3204	} *cmd = container_of(header, typeof(*cmd), header);
3205
3206	if (!has_sm5_context(dev_priv))
3207		return -EINVAL;
3208
3209	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3210				 VMW_RES_DIRTY_NONE, user_surface_converter,
3211				 &cmd->body.argsBufferSid, NULL);
3212}
3213
3214static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3215				struct vmw_sw_context *sw_context,
3216				void *buf, uint32_t *size)
3217{
3218	uint32_t size_remaining = *size;
3219	uint32_t cmd_id;
3220
3221	cmd_id = ((uint32_t *)buf)[0];
3222	switch (cmd_id) {
3223	case SVGA_CMD_UPDATE:
3224		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3225		break;
3226	case SVGA_CMD_DEFINE_GMRFB:
3227		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3228		break;
3229	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3230		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3231		break;
3232	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3233		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3234		break;
3235	default:
3236		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3237		return -EINVAL;
3238	}
3239
3240	if (*size > size_remaining) {
3241		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3242			       cmd_id);
3243		return -EINVAL;
3244	}
3245
3246	if (unlikely(!sw_context->kernel)) {
3247		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3248		return -EPERM;
3249	}
3250
3251	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3252		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3253
3254	return 0;
3255}
3256
3257static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3258	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3259		    false, false, false),
3260	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3261		    false, false, false),
3262	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3263		    true, false, false),
3264	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3265		    true, false, false),
3266	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3267		    true, false, false),
3268	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3269		    false, false, false),
3270	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3271		    false, false, false),
3272	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3273		    true, false, false),
3274	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3275		    true, false, false),
3276	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3277		    true, false, false),
3278	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3279		    &vmw_cmd_set_render_target_check, true, false, false),
3280	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3281		    true, false, false),
3282	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3283		    true, false, false),
3284	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3285		    true, false, false),
3286	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3287		    true, false, false),
3288	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3289		    true, false, false),
3290	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3291		    true, false, false),
3292	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3293		    true, false, false),
3294	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3295		    false, false, false),
3296	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3297		    true, false, false),
3298	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3299		    true, false, false),
3300	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3301		    true, false, false),
3302	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3303		    true, false, false),
3304	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3305		    true, false, false),
3306	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3307		    true, false, false),
3308	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3309		    true, false, false),
3310	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3311		    true, false, false),
3312	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3313		    true, false, false),
3314	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3315		    true, false, false),
3316	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3317		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3318	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3319		    false, false, false),
3320	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3321		    false, false, false),
3322	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3323		    false, false, false),
3324	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3325		    false, false, false),
3326	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3327		    false, false, false),
3328	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3329		    false, false, false),
3330	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3331		    false, false, false),
3332	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3333	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3334	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3335	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3336	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3337	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3338	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3339		    false, false, true),
3340	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3341		    false, false, true),
3342	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3343		    false, false, true),
3344	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3345		    false, false, true),
3346	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3347		    false, false, true),
3348	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3349		    false, false, true),
3350	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3351		    false, false, true),
3352	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3353		    false, false, true),
3354	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3355		    true, false, true),
3356	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3357		    false, false, true),
3358	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3359		    true, false, true),
3360	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3361		    &vmw_cmd_update_gb_surface, true, false, true),
3362	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3363		    &vmw_cmd_readback_gb_image, true, false, true),
3364	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3365		    &vmw_cmd_readback_gb_surface, true, false, true),
3366	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3367		    &vmw_cmd_invalidate_gb_image, true, false, true),
3368	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3369		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3370	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3371		    false, false, true),
3372	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3373		    false, false, true),
3374	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3375		    false, false, true),
3376	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3377		    false, false, true),
3378	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3379		    false, false, true),
3380	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3381		    false, false, true),
3382	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3383		    true, false, true),
3384	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3385		    false, false, true),
3386	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3387		    false, false, false),
3388	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3389		    true, false, true),
3390	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3391		    true, false, true),
3392	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3393		    true, false, true),
3394	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3395		    true, false, true),
3396	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3397		    true, false, true),
3398	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3399		    false, false, true),
3400	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3401		    false, false, true),
3402	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3403		    false, false, true),
3404	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3405		    false, false, true),
3406	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3407		    false, false, true),
3408	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3409		    false, false, true),
3410	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3411		    false, false, true),
3412	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3413		    false, false, true),
3414	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3415		    false, false, true),
3416	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3417		    false, false, true),
3418	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3419		    true, false, true),
3420	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3421		    false, false, true),
3422	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3423		    false, false, true),
3424	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3425		    false, false, true),
3426	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3427		    false, false, true),
3428
3429	/* SM commands */
3430	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3431		    false, false, true),
3432	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3433		    false, false, true),
3434	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3435		    false, false, true),
3436	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3437		    false, false, true),
3438	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3439		    false, false, true),
3440	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3441		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3442	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3443		    &vmw_cmd_dx_set_shader_res, true, false, true),
3444	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3445		    true, false, true),
3446	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3447		    true, false, true),
3448	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3449		    true, false, true),
3450	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3451		    true, false, true),
3452	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3453		    true, false, true),
3454	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3455		    &vmw_cmd_dx_cid_check, true, false, true),
3456	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3457		    true, false, true),
3458	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3459		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3460	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3461		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3462	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3463		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3464	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3465		    true, false, true),
3466	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3467		    &vmw_cmd_dx_cid_check, true, false, true),
3468	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3469		    &vmw_cmd_dx_cid_check, true, false, true),
3470	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3471		    true, false, true),
3472	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3473		    true, false, true),
3474	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3475		    true, false, true),
3476	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3477		    &vmw_cmd_dx_cid_check, true, false, true),
3478	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3479		    true, false, true),
3480	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3481		    true, false, true),
3482	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3483		    true, false, true),
3484	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3485		    true, false, true),
3486	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3487		    true, false, true),
3488	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3489		    true, false, true),
3490	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3491		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3492	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3493		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3494	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3495		    true, false, true),
3496	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3497		    true, false, true),
3498	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3499		    &vmw_cmd_dx_check_subresource, true, false, true),
3500	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3501		    &vmw_cmd_dx_check_subresource, true, false, true),
3502	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3503		    &vmw_cmd_dx_check_subresource, true, false, true),
3504	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3505		    &vmw_cmd_dx_view_define, true, false, true),
3506	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3507		    &vmw_cmd_dx_view_remove, true, false, true),
3508	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3509		    &vmw_cmd_dx_view_define, true, false, true),
3510	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3511		    &vmw_cmd_dx_view_remove, true, false, true),
3512	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3513		    &vmw_cmd_dx_view_define, true, false, true),
3514	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3515		    &vmw_cmd_dx_view_remove, true, false, true),
3516	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3517		    &vmw_cmd_dx_so_define, true, false, true),
3518	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3519		    &vmw_cmd_dx_cid_check, true, false, true),
3520	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3521		    &vmw_cmd_dx_so_define, true, false, true),
3522	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3523		    &vmw_cmd_dx_cid_check, true, false, true),
3524	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3525		    &vmw_cmd_dx_so_define, true, false, true),
3526	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3527		    &vmw_cmd_dx_cid_check, true, false, true),
3528	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3529		    &vmw_cmd_dx_so_define, true, false, true),
3530	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3531		    &vmw_cmd_dx_cid_check, true, false, true),
3532	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3533		    &vmw_cmd_dx_so_define, true, false, true),
3534	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3535		    &vmw_cmd_dx_cid_check, true, false, true),
3536	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3537		    &vmw_cmd_dx_define_shader, true, false, true),
3538	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3539		    &vmw_cmd_dx_destroy_shader, true, false, true),
3540	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3541		    &vmw_cmd_dx_bind_shader, true, false, true),
3542	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3543		    &vmw_cmd_dx_so_define, true, false, true),
3544	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3545		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3546	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3547		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3548	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3549		    &vmw_cmd_dx_set_so_targets, true, false, true),
3550	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3551		    &vmw_cmd_dx_cid_check, true, false, true),
3552	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3553		    &vmw_cmd_dx_cid_check, true, false, true),
3554	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3555		    &vmw_cmd_buffer_copy_check, true, false, true),
3556	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3557		    &vmw_cmd_pred_copy_check, true, false, true),
3558	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3559		    &vmw_cmd_dx_transfer_from_buffer,
3560		    true, false, true),
3561	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3562		    &vmw_cmd_dx_set_constant_buffer_offset,
3563		    true, false, true),
3564	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3565		    &vmw_cmd_dx_set_constant_buffer_offset,
3566		    true, false, true),
3567	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3568		    &vmw_cmd_dx_set_constant_buffer_offset,
3569		    true, false, true),
3570	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3571		    &vmw_cmd_dx_set_constant_buffer_offset,
3572		    true, false, true),
3573	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3574		    &vmw_cmd_dx_set_constant_buffer_offset,
3575		    true, false, true),
3576	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3577		    &vmw_cmd_dx_set_constant_buffer_offset,
3578		    true, false, true),
3579	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3580		    true, false, true),
3581
3582	/*
3583	 * SM5 commands
3584	 */
3585	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3586		    true, false, true),
3587	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3588		    true, false, true),
3589	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3590		    true, false, true),
3591	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3592		    &vmw_cmd_clear_uav_float, true, false, true),
3593	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3594		    false, true),
3595	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3596		    true),
3597	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3598		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3599	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3600		    &vmw_cmd_instanced_indirect, true, false, true),
3601	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3602	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3603		    &vmw_cmd_dispatch_indirect, true, false, true),
3604	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3605		    false, true),
3606	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3607		    &vmw_cmd_sm5_view_define, true, false, true),
3608	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3609		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3610	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3611		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3612	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3613		    &vmw_cmd_dx_so_define, true, false, true),
3614};
3615
3616bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3617{
3618	u32 cmd_id = ((u32 *) buf)[0];
3619
3620	if (cmd_id >= SVGA_CMD_MAX) {
3621		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3622		const struct vmw_cmd_entry *entry;
3623
3624		*size = header->size + sizeof(SVGA3dCmdHeader);
3625		cmd_id = header->id;
3626		if (cmd_id >= SVGA_3D_CMD_MAX)
3627			return false;
3628
3629		cmd_id -= SVGA_3D_CMD_BASE;
3630		entry = &vmw_cmd_entries[cmd_id];
3631		*cmd = entry->cmd_name;
3632		return true;
3633	}
3634
3635	switch (cmd_id) {
3636	case SVGA_CMD_UPDATE:
3637		*cmd = "SVGA_CMD_UPDATE";
3638		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3639		break;
3640	case SVGA_CMD_DEFINE_GMRFB:
3641		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3642		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3643		break;
3644	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3645		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3646		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3647		break;
3648	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3649		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3650		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3651		break;
3652	default:
3653		*cmd = "UNKNOWN";
3654		*size = 0;
3655		return false;
3656	}
3657
3658	return true;
3659}
3660
3661static int vmw_cmd_check(struct vmw_private *dev_priv,
3662			 struct vmw_sw_context *sw_context, void *buf,
3663			 uint32_t *size)
3664{
3665	uint32_t cmd_id;
3666	uint32_t size_remaining = *size;
3667	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3668	int ret;
3669	const struct vmw_cmd_entry *entry;
3670	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3671
3672	cmd_id = ((uint32_t *)buf)[0];
3673	/* Handle any none 3D commands */
3674	if (unlikely(cmd_id < SVGA_CMD_MAX))
3675		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3676
3677
3678	cmd_id = header->id;
3679	*size = header->size + sizeof(SVGA3dCmdHeader);
3680
3681	cmd_id -= SVGA_3D_CMD_BASE;
3682	if (unlikely(*size > size_remaining))
3683		goto out_invalid;
3684
3685	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3686		goto out_invalid;
3687
3688	entry = &vmw_cmd_entries[cmd_id];
3689	if (unlikely(!entry->func))
3690		goto out_invalid;
3691
3692	if (unlikely(!entry->user_allow && !sw_context->kernel))
3693		goto out_privileged;
3694
3695	if (unlikely(entry->gb_disable && gb))
3696		goto out_old;
3697
3698	if (unlikely(entry->gb_enable && !gb))
3699		goto out_new;
3700
3701	ret = entry->func(dev_priv, sw_context, header);
3702	if (unlikely(ret != 0)) {
3703		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3704			       cmd_id + SVGA_3D_CMD_BASE, ret);
3705		return ret;
3706	}
3707
3708	return 0;
3709out_invalid:
3710	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3711		       cmd_id + SVGA_3D_CMD_BASE);
3712	return -EINVAL;
3713out_privileged:
3714	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3715		       cmd_id + SVGA_3D_CMD_BASE);
3716	return -EPERM;
3717out_old:
3718	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3719		       cmd_id + SVGA_3D_CMD_BASE);
3720	return -EINVAL;
3721out_new:
3722	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3723		       cmd_id + SVGA_3D_CMD_BASE);
3724	return -EINVAL;
3725}
3726
3727static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3728			     struct vmw_sw_context *sw_context, void *buf,
3729			     uint32_t size)
3730{
3731	int32_t cur_size = size;
3732	int ret;
3733
3734	sw_context->buf_start = buf;
3735
3736	while (cur_size > 0) {
3737		size = cur_size;
3738		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3739		if (unlikely(ret != 0))
3740			return ret;
3741		buf = (void *)((unsigned long) buf + size);
3742		cur_size -= size;
3743	}
3744
3745	if (unlikely(cur_size != 0)) {
3746		VMW_DEBUG_USER("Command verifier out of sync.\n");
3747		return -EINVAL;
3748	}
3749
3750	return 0;
3751}
3752
3753static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3754{
3755	/* Memory is validation context memory, so no need to free it */
3756	INIT_LIST_HEAD(&sw_context->bo_relocations);
3757}
3758
3759static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3760{
3761	struct vmw_relocation *reloc;
3762	struct ttm_buffer_object *bo;
3763
3764	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3765		bo = &reloc->vbo->tbo;
3766		switch (bo->resource->mem_type) {
3767		case TTM_PL_VRAM:
3768			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3769			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3770			break;
3771		case VMW_PL_GMR:
3772			reloc->location->gmrId = bo->resource->start;
3773			break;
3774		case VMW_PL_MOB:
3775			*reloc->mob_loc = bo->resource->start;
3776			break;
3777		default:
3778			BUG();
3779		}
3780	}
3781	vmw_free_relocations(sw_context);
3782}
3783
3784static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3785				 uint32_t size)
3786{
3787	if (likely(sw_context->cmd_bounce_size >= size))
3788		return 0;
3789
3790	if (sw_context->cmd_bounce_size == 0)
3791		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3792
3793	while (sw_context->cmd_bounce_size < size) {
3794		sw_context->cmd_bounce_size =
3795			PAGE_ALIGN(sw_context->cmd_bounce_size +
3796				   (sw_context->cmd_bounce_size >> 1));
3797	}
3798
3799	vfree(sw_context->cmd_bounce);
3800	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3801
3802	if (sw_context->cmd_bounce == NULL) {
3803		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3804		sw_context->cmd_bounce_size = 0;
3805		return -ENOMEM;
3806	}
3807
3808	return 0;
3809}
3810
3811/*
3812 * vmw_execbuf_fence_commands - create and submit a command stream fence
3813 *
3814 * Creates a fence object and submits a command stream marker.
3815 * If this fails for some reason, We sync the fifo and return NULL.
3816 * It is then safe to fence buffers with a NULL pointer.
3817 *
3818 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3819 * userspace handle if @p_handle is not NULL, otherwise not.
3820 */
3821
3822int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3823			       struct vmw_private *dev_priv,
3824			       struct vmw_fence_obj **p_fence,
3825			       uint32_t *p_handle)
3826{
3827	uint32_t sequence;
3828	int ret;
3829	bool synced = false;
3830
3831	/* p_handle implies file_priv. */
3832	BUG_ON(p_handle != NULL && file_priv == NULL);
3833
3834	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3835	if (unlikely(ret != 0)) {
3836		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3837		synced = true;
3838	}
3839
3840	if (p_handle != NULL)
3841		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3842					    sequence, p_fence, p_handle);
3843	else
3844		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3845
3846	if (unlikely(ret != 0 && !synced)) {
3847		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3848					 false, VMW_FENCE_WAIT_TIMEOUT);
3849		*p_fence = NULL;
3850	}
3851
3852	return ret;
3853}
3854
3855/**
3856 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3857 *
3858 * @dev_priv: Pointer to a vmw_private struct.
3859 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3860 * @ret: Return value from fence object creation.
3861 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3862 * the information should be copied.
3863 * @fence: Pointer to the fenc object.
3864 * @fence_handle: User-space fence handle.
3865 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3866 *
3867 * This function copies fence information to user-space. If copying fails, the
3868 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3869 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3870 * will hopefully be detected.
3871 *
3872 * Also if copying fails, user-space will be unable to signal the fence object
3873 * so we wait for it immediately, and then unreference the user-space reference.
3874 */
3875int
3876vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3877			    struct vmw_fpriv *vmw_fp, int ret,
3878			    struct drm_vmw_fence_rep __user *user_fence_rep,
3879			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3880			    int32_t out_fence_fd)
3881{
3882	struct drm_vmw_fence_rep fence_rep;
3883
3884	if (user_fence_rep == NULL)
3885		return 0;
3886
3887	memset(&fence_rep, 0, sizeof(fence_rep));
3888
3889	fence_rep.error = ret;
3890	fence_rep.fd = out_fence_fd;
3891	if (ret == 0) {
3892		BUG_ON(fence == NULL);
3893
3894		fence_rep.handle = fence_handle;
3895		fence_rep.seqno = fence->base.seqno;
3896		vmw_update_seqno(dev_priv);
3897		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3898	}
3899
3900	/*
3901	 * copy_to_user errors will be detected by user space not seeing
3902	 * fence_rep::error filled in. Typically user-space would have pre-set
3903	 * that member to -EFAULT.
3904	 */
3905	ret = copy_to_user(user_fence_rep, &fence_rep,
3906			   sizeof(fence_rep));
3907
3908	/*
3909	 * User-space lost the fence object. We need to sync and unreference the
3910	 * handle.
3911	 */
3912	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3913		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3914		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3915		(void) vmw_fence_obj_wait(fence, false, false,
3916					  VMW_FENCE_WAIT_TIMEOUT);
3917	}
3918
3919	return ret ? -EFAULT : 0;
3920}
3921
3922/**
3923 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3924 *
3925 * @dev_priv: Pointer to a device private structure.
3926 * @kernel_commands: Pointer to the unpatched command batch.
3927 * @command_size: Size of the unpatched command batch.
3928 * @sw_context: Structure holding the relocation lists.
3929 *
3930 * Side effects: If this function returns 0, then the command batch pointed to
3931 * by @kernel_commands will have been modified.
3932 */
3933static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3934				   void *kernel_commands, u32 command_size,
3935				   struct vmw_sw_context *sw_context)
3936{
3937	void *cmd;
3938
3939	if (sw_context->dx_ctx_node)
3940		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3941					  sw_context->dx_ctx_node->ctx->id);
3942	else
3943		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3944
3945	if (!cmd)
3946		return -ENOMEM;
3947
3948	vmw_apply_relocations(sw_context);
3949	memcpy(cmd, kernel_commands, command_size);
3950	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3951	vmw_resource_relocations_free(&sw_context->res_relocations);
3952	vmw_cmd_commit(dev_priv, command_size);
3953
3954	return 0;
3955}
3956
3957/**
3958 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3959 * command buffer manager.
3960 *
3961 * @dev_priv: Pointer to a device private structure.
3962 * @header: Opaque handle to the command buffer allocation.
3963 * @command_size: Size of the unpatched command batch.
3964 * @sw_context: Structure holding the relocation lists.
3965 *
3966 * Side effects: If this function returns 0, then the command buffer represented
3967 * by @header will have been modified.
3968 */
3969static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3970				     struct vmw_cmdbuf_header *header,
3971				     u32 command_size,
3972				     struct vmw_sw_context *sw_context)
3973{
3974	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3975		  SVGA3D_INVALID_ID);
3976	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3977				       header);
3978
3979	vmw_apply_relocations(sw_context);
3980	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3981	vmw_resource_relocations_free(&sw_context->res_relocations);
3982	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3983
3984	return 0;
3985}
3986
3987/**
3988 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3989 * submission using a command buffer.
3990 *
3991 * @dev_priv: Pointer to a device private structure.
3992 * @user_commands: User-space pointer to the commands to be submitted.
3993 * @command_size: Size of the unpatched command batch.
3994 * @header: Out parameter returning the opaque pointer to the command buffer.
3995 *
3996 * This function checks whether we can use the command buffer manager for
3997 * submission and if so, creates a command buffer of suitable size and copies
3998 * the user data into that buffer.
3999 *
4000 * On successful return, the function returns a pointer to the data in the
4001 * command buffer and *@header is set to non-NULL.
4002 *
4003 * @kernel_commands: If command buffers could not be used, the function will
4004 * return the value of @kernel_commands on function call. That value may be
4005 * NULL. In that case, the value of *@header will be set to NULL.
4006 *
4007 * If an error is encountered, the function will return a pointer error value.
4008 * If the function is interrupted by a signal while sleeping, it will return
4009 * -ERESTARTSYS casted to a pointer error value.
4010 */
4011static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4012				void __user *user_commands,
4013				void *kernel_commands, u32 command_size,
4014				struct vmw_cmdbuf_header **header)
4015{
4016	size_t cmdbuf_size;
4017	int ret;
4018
4019	*header = NULL;
4020	if (command_size > SVGA_CB_MAX_SIZE) {
4021		VMW_DEBUG_USER("Command buffer is too large.\n");
4022		return ERR_PTR(-EINVAL);
4023	}
4024
4025	if (!dev_priv->cman || kernel_commands)
4026		return kernel_commands;
4027
4028	/* If possible, add a little space for fencing. */
4029	cmdbuf_size = command_size + 512;
4030	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4031	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4032					   header);
4033	if (IS_ERR(kernel_commands))
4034		return kernel_commands;
4035
4036	ret = copy_from_user(kernel_commands, user_commands, command_size);
4037	if (ret) {
4038		VMW_DEBUG_USER("Failed copying commands.\n");
4039		vmw_cmdbuf_header_free(*header);
4040		*header = NULL;
4041		return ERR_PTR(-EFAULT);
4042	}
4043
4044	return kernel_commands;
4045}
4046
4047static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4048				   struct vmw_sw_context *sw_context,
4049				   uint32_t handle)
4050{
4051	struct vmw_resource *res;
4052	int ret;
4053	unsigned int size;
4054
4055	if (handle == SVGA3D_INVALID_ID)
4056		return 0;
4057
4058	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4059	ret = vmw_validation_preload_res(sw_context->ctx, size);
4060	if (ret)
4061		return ret;
4062
4063	ret = vmw_user_resource_lookup_handle
4064		(dev_priv, sw_context->fp->tfile, handle,
4065		 user_context_converter, &res);
4066	if (ret != 0) {
4067		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4068			       (unsigned int) handle);
4069		return ret;
4070	}
4071
4072	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4073				      vmw_val_add_flag_none);
4074	if (unlikely(ret != 0)) {
4075		vmw_resource_unreference(&res);
4076		return ret;
4077	}
4078
4079	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4080	sw_context->man = vmw_context_res_man(res);
4081
4082	vmw_resource_unreference(&res);
4083	return 0;
4084}
4085
4086int vmw_execbuf_process(struct drm_file *file_priv,
4087			struct vmw_private *dev_priv,
4088			void __user *user_commands, void *kernel_commands,
4089			uint32_t command_size, uint64_t throttle_us,
4090			uint32_t dx_context_handle,
4091			struct drm_vmw_fence_rep __user *user_fence_rep,
4092			struct vmw_fence_obj **out_fence, uint32_t flags)
4093{
4094	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4095	struct vmw_fence_obj *fence = NULL;
4096	struct vmw_cmdbuf_header *header;
4097	uint32_t handle = 0;
4098	int ret;
4099	int32_t out_fence_fd = -1;
4100	struct sync_file *sync_file = NULL;
4101	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4102
4103	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4104		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4105		if (out_fence_fd < 0) {
4106			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4107			return out_fence_fd;
4108		}
4109	}
4110
4111	if (throttle_us) {
4112		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4113	}
4114
4115	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4116					     kernel_commands, command_size,
4117					     &header);
4118	if (IS_ERR(kernel_commands)) {
4119		ret = PTR_ERR(kernel_commands);
4120		goto out_free_fence_fd;
4121	}
4122
4123	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4124	if (ret) {
4125		ret = -ERESTARTSYS;
4126		goto out_free_header;
4127	}
4128
4129	sw_context->kernel = false;
4130	if (kernel_commands == NULL) {
4131		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4132		if (unlikely(ret != 0))
4133			goto out_unlock;
4134
4135		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4136				     command_size);
4137		if (unlikely(ret != 0)) {
4138			ret = -EFAULT;
4139			VMW_DEBUG_USER("Failed copying commands.\n");
4140			goto out_unlock;
4141		}
4142
4143		kernel_commands = sw_context->cmd_bounce;
4144	} else if (!header) {
4145		sw_context->kernel = true;
4146	}
4147
4148	sw_context->filp = file_priv;
4149	sw_context->fp = vmw_fpriv(file_priv);
4150	INIT_LIST_HEAD(&sw_context->ctx_list);
4151	sw_context->cur_query_bo = dev_priv->pinned_bo;
4152	sw_context->last_query_ctx = NULL;
4153	sw_context->needs_post_query_barrier = false;
4154	sw_context->dx_ctx_node = NULL;
4155	sw_context->dx_query_mob = NULL;
4156	sw_context->dx_query_ctx = NULL;
4157	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4158	INIT_LIST_HEAD(&sw_context->res_relocations);
4159	INIT_LIST_HEAD(&sw_context->bo_relocations);
4160
4161	if (sw_context->staged_bindings)
4162		vmw_binding_state_reset(sw_context->staged_bindings);
4163
4164	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4165	sw_context->ctx = &val_ctx;
4166	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4167	if (unlikely(ret != 0))
4168		goto out_err_nores;
4169
4170	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4171				command_size);
4172	if (unlikely(ret != 0))
4173		goto out_err_nores;
4174
4175	ret = vmw_resources_reserve(sw_context);
4176	if (unlikely(ret != 0))
4177		goto out_err_nores;
4178
4179	ret = vmw_validation_bo_reserve(&val_ctx, true);
4180	if (unlikely(ret != 0))
4181		goto out_err_nores;
4182
4183	ret = vmw_validation_bo_validate(&val_ctx, true);
4184	if (unlikely(ret != 0))
4185		goto out_err;
4186
4187	ret = vmw_validation_res_validate(&val_ctx, true);
4188	if (unlikely(ret != 0))
4189		goto out_err;
4190
4191	vmw_validation_drop_ht(&val_ctx);
4192
4193	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4194	if (unlikely(ret != 0)) {
4195		ret = -ERESTARTSYS;
4196		goto out_err;
4197	}
4198
4199	if (dev_priv->has_mob) {
4200		ret = vmw_rebind_contexts(sw_context);
4201		if (unlikely(ret != 0))
4202			goto out_unlock_binding;
4203	}
4204
4205	if (!header) {
4206		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4207					      command_size, sw_context);
4208	} else {
4209		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4210						sw_context);
4211		header = NULL;
4212	}
4213	mutex_unlock(&dev_priv->binding_mutex);
4214	if (ret)
4215		goto out_err;
4216
4217	vmw_query_bo_switch_commit(dev_priv, sw_context);
4218	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4219					 (user_fence_rep) ? &handle : NULL);
4220	/*
4221	 * This error is harmless, because if fence submission fails,
4222	 * vmw_fifo_send_fence will sync. The error will be propagated to
4223	 * user-space in @fence_rep
4224	 */
4225	if (ret != 0)
4226		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4227
4228	vmw_execbuf_bindings_commit(sw_context, false);
4229	vmw_bind_dx_query_mob(sw_context);
4230	vmw_validation_res_unreserve(&val_ctx, false);
4231
4232	vmw_validation_bo_fence(sw_context->ctx, fence);
4233
4234	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4235		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4236
4237	/*
4238	 * If anything fails here, give up trying to export the fence and do a
4239	 * sync since the user mode will not be able to sync the fence itself.
4240	 * This ensures we are still functionally correct.
4241	 */
4242	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4243
4244		sync_file = sync_file_create(&fence->base);
4245		if (!sync_file) {
4246			VMW_DEBUG_USER("Sync file create failed for fence\n");
4247			put_unused_fd(out_fence_fd);
4248			out_fence_fd = -1;
4249
4250			(void) vmw_fence_obj_wait(fence, false, false,
4251						  VMW_FENCE_WAIT_TIMEOUT);
4252		}
4253	}
4254
4255	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4256				    user_fence_rep, fence, handle, out_fence_fd);
4257
4258	if (sync_file) {
4259		if (ret) {
4260			/* usercopy of fence failed, put the file object */
4261			fput(sync_file->file);
4262			put_unused_fd(out_fence_fd);
4263		} else {
4264			/* Link the fence with the FD created earlier */
4265			fd_install(out_fence_fd, sync_file->file);
4266		}
4267	}
4268
4269	/* Don't unreference when handing fence out */
4270	if (unlikely(out_fence != NULL)) {
4271		*out_fence = fence;
4272		fence = NULL;
4273	} else if (likely(fence != NULL)) {
4274		vmw_fence_obj_unreference(&fence);
4275	}
4276
4277	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4278	mutex_unlock(&dev_priv->cmdbuf_mutex);
4279
4280	/*
4281	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4282	 * in resource destruction paths.
4283	 */
4284	vmw_validation_unref_lists(&val_ctx);
4285
4286	return ret;
4287
4288out_unlock_binding:
4289	mutex_unlock(&dev_priv->binding_mutex);
4290out_err:
4291	vmw_validation_bo_backoff(&val_ctx);
4292out_err_nores:
4293	vmw_execbuf_bindings_commit(sw_context, true);
4294	vmw_validation_res_unreserve(&val_ctx, true);
4295	vmw_resource_relocations_free(&sw_context->res_relocations);
4296	vmw_free_relocations(sw_context);
4297	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4298		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4299out_unlock:
4300	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4301	vmw_validation_drop_ht(&val_ctx);
4302	WARN_ON(!list_empty(&sw_context->ctx_list));
4303	mutex_unlock(&dev_priv->cmdbuf_mutex);
4304
4305	/*
4306	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4307	 * in resource destruction paths.
4308	 */
4309	vmw_validation_unref_lists(&val_ctx);
4310out_free_header:
4311	if (header)
4312		vmw_cmdbuf_header_free(header);
4313out_free_fence_fd:
4314	if (out_fence_fd >= 0)
4315		put_unused_fd(out_fence_fd);
4316
4317	return ret;
4318}
4319
4320/**
4321 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4322 *
4323 * @dev_priv: The device private structure.
4324 *
4325 * This function is called to idle the fifo and unpin the query buffer if the
4326 * normal way to do this hits an error, which should typically be extremely
4327 * rare.
4328 */
4329static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4330{
4331	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4332
4333	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4334	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4335	if (dev_priv->dummy_query_bo_pinned) {
4336		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4337		dev_priv->dummy_query_bo_pinned = false;
4338	}
4339}
4340
4341
4342/**
4343 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4344 * bo.
4345 *
4346 * @dev_priv: The device private structure.
4347 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4348 * query barrier that flushes all queries touching the current buffer pointed to
4349 * by @dev_priv->pinned_bo
4350 *
4351 * This function should be used to unpin the pinned query bo, or as a query
4352 * barrier when we need to make sure that all queries have finished before the
4353 * next fifo command. (For example on hardware context destructions where the
4354 * hardware may otherwise leak unfinished queries).
4355 *
4356 * This function does not return any failure codes, but make attempts to do safe
4357 * unpinning in case of errors.
4358 *
4359 * The function will synchronize on the previous query barrier, and will thus
4360 * not finish until that barrier has executed.
4361 *
4362 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4363 * calling this function.
4364 */
4365void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4366				     struct vmw_fence_obj *fence)
4367{
4368	int ret = 0;
4369	struct vmw_fence_obj *lfence = NULL;
4370	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4371
4372	if (dev_priv->pinned_bo == NULL)
4373		goto out_unlock;
4374
4375	vmw_bo_placement_set(dev_priv->pinned_bo,
4376			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4377			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4378	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4379	if (ret)
4380		goto out_no_reserve;
4381
4382	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4383			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4384			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4385	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4386	if (ret)
4387		goto out_no_reserve;
4388
4389	ret = vmw_validation_bo_reserve(&val_ctx, false);
4390	if (ret)
4391		goto out_no_reserve;
4392
4393	if (dev_priv->query_cid_valid) {
4394		BUG_ON(fence != NULL);
4395		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4396		if (ret)
4397			goto out_no_emit;
4398		dev_priv->query_cid_valid = false;
4399	}
4400
4401	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4402	if (dev_priv->dummy_query_bo_pinned) {
4403		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4404		dev_priv->dummy_query_bo_pinned = false;
4405	}
4406	if (fence == NULL) {
4407		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4408						  NULL);
4409		fence = lfence;
4410	}
4411	vmw_validation_bo_fence(&val_ctx, fence);
4412	if (lfence != NULL)
4413		vmw_fence_obj_unreference(&lfence);
4414
4415	vmw_validation_unref_lists(&val_ctx);
4416	vmw_bo_unreference(&dev_priv->pinned_bo);
4417
4418out_unlock:
4419	return;
4420out_no_emit:
4421	vmw_validation_bo_backoff(&val_ctx);
4422out_no_reserve:
4423	vmw_validation_unref_lists(&val_ctx);
4424	vmw_execbuf_unpin_panic(dev_priv);
4425	vmw_bo_unreference(&dev_priv->pinned_bo);
4426}
4427
4428/**
4429 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4430 *
4431 * @dev_priv: The device private structure.
4432 *
4433 * This function should be used to unpin the pinned query bo, or as a query
4434 * barrier when we need to make sure that all queries have finished before the
4435 * next fifo command. (For example on hardware context destructions where the
4436 * hardware may otherwise leak unfinished queries).
4437 *
4438 * This function does not return any failure codes, but make attempts to do safe
4439 * unpinning in case of errors.
4440 *
4441 * The function will synchronize on the previous query barrier, and will thus
4442 * not finish until that barrier has executed.
4443 */
4444void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4445{
4446	mutex_lock(&dev_priv->cmdbuf_mutex);
4447	if (dev_priv->query_cid_valid)
4448		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4449	mutex_unlock(&dev_priv->cmdbuf_mutex);
4450}
4451
4452int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4453		      struct drm_file *file_priv)
4454{
4455	struct vmw_private *dev_priv = vmw_priv(dev);
4456	struct drm_vmw_execbuf_arg *arg = data;
4457	int ret;
4458	struct dma_fence *in_fence = NULL;
4459
4460	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4461	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4462
4463	/*
4464	 * Extend the ioctl argument while maintaining backwards compatibility:
4465	 * We take different code paths depending on the value of arg->version.
4466	 *
4467	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4468	 */
4469	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4470		     arg->version == 0)) {
4471		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4472		ret = -EINVAL;
4473		goto mksstats_out;
4474	}
4475
4476	switch (arg->version) {
4477	case 1:
4478		/* For v1 core DRM have extended + zeropadded the data */
4479		arg->context_handle = (uint32_t) -1;
4480		break;
4481	case 2:
4482	default:
4483		/* For v2 and later core DRM would have correctly copied it */
4484		break;
4485	}
4486
4487	/* If imported a fence FD from elsewhere, then wait on it */
4488	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4489		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4490
4491		if (!in_fence) {
4492			VMW_DEBUG_USER("Cannot get imported fence\n");
4493			ret = -EINVAL;
4494			goto mksstats_out;
4495		}
4496
4497		ret = dma_fence_wait(in_fence, true);
4498		if (ret)
4499			goto out;
4500	}
4501
4502	ret = vmw_execbuf_process(file_priv, dev_priv,
4503				  (void __user *)(unsigned long)arg->commands,
4504				  NULL, arg->command_size, arg->throttle_us,
4505				  arg->context_handle,
4506				  (void __user *)(unsigned long)arg->fence_rep,
4507				  NULL, arg->flags);
4508
4509	if (unlikely(ret != 0))
4510		goto out;
4511
4512	vmw_kms_cursor_post_execbuf(dev_priv);
4513
4514out:
4515	if (in_fence)
4516		dma_fence_put(in_fence);
4517
4518mksstats_out:
4519	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4520	return ret;
4521}
4522