1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/drm_atomic.h>
29#include <drm/drm_atomic_helper.h>
30#include <drm/drm_damage_helper.h>
31#include <drm/drm_fourcc.h>
32#include <drm/drm_plane_helper.h>
33#include <drm/drm_rect.h>
34#include <drm/drm_sysfs.h>
35#include <drm/drm_vblank.h>
36
37#include "vmwgfx_kms.h"
38
39/* Might need a hrtimer here? */
40#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
41
42void vmw_du_cleanup(struct vmw_display_unit *du)
43{
44	drm_plane_cleanup(&du->primary);
45	drm_plane_cleanup(&du->cursor);
46
47	drm_connector_unregister(&du->connector);
48	drm_crtc_cleanup(&du->crtc);
49	drm_encoder_cleanup(&du->encoder);
50	drm_connector_cleanup(&du->connector);
51}
52
53/*
54 * Display Unit Cursor functions
55 */
56
57static int vmw_cursor_update_image(struct vmw_private *dev_priv,
58				   u32 *image, u32 width, u32 height,
59				   u32 hotspotX, u32 hotspotY)
60{
61	struct {
62		u32 cmd;
63		SVGAFifoCmdDefineAlphaCursor cursor;
64	} *cmd;
65	u32 image_size = width * height * 4;
66	u32 cmd_size = sizeof(*cmd) + image_size;
67
68	if (!image)
69		return -EINVAL;
70
71	cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
72	if (unlikely(cmd == NULL))
73		return -ENOMEM;
74
75	memset(cmd, 0, sizeof(*cmd));
76
77	memcpy(&cmd[1], image, image_size);
78
79	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
80	cmd->cursor.id = 0;
81	cmd->cursor.width = width;
82	cmd->cursor.height = height;
83	cmd->cursor.hotspotX = hotspotX;
84	cmd->cursor.hotspotY = hotspotY;
85
86	vmw_fifo_commit_flush(dev_priv, cmd_size);
87
88	return 0;
89}
90
91static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
92				struct vmw_buffer_object *bo,
93				u32 width, u32 height,
94				u32 hotspotX, u32 hotspotY)
95{
96	struct ttm_bo_kmap_obj map;
97	unsigned long kmap_offset;
98	unsigned long kmap_num;
99	void *virtual;
100	bool dummy;
101	int ret;
102
103	kmap_offset = 0;
104	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
105
106	ret = ttm_bo_reserve(&bo->base, true, false, NULL);
107	if (unlikely(ret != 0)) {
108		DRM_ERROR("reserve failed\n");
109		return -EINVAL;
110	}
111
112	ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
113	if (unlikely(ret != 0))
114		goto err_unreserve;
115
116	virtual = ttm_kmap_obj_virtual(&map, &dummy);
117	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
118				      hotspotX, hotspotY);
119
120	ttm_bo_kunmap(&map);
121err_unreserve:
122	ttm_bo_unreserve(&bo->base);
123
124	return ret;
125}
126
127
128static void vmw_cursor_update_position(struct vmw_private *dev_priv,
129				       bool show, int x, int y)
130{
131	u32 *fifo_mem = dev_priv->mmio_virt;
132	uint32_t count;
133
134	spin_lock(&dev_priv->cursor_lock);
135	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
136	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
137	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
138	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
139	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
140	spin_unlock(&dev_priv->cursor_lock);
141}
142
143
144void vmw_kms_cursor_snoop(struct vmw_surface *srf,
145			  struct ttm_object_file *tfile,
146			  struct ttm_buffer_object *bo,
147			  SVGA3dCmdHeader *header)
148{
149	struct ttm_bo_kmap_obj map;
150	unsigned long kmap_offset;
151	unsigned long kmap_num;
152	SVGA3dCopyBox *box;
153	unsigned box_count;
154	void *virtual;
155	bool dummy;
156	struct vmw_dma_cmd {
157		SVGA3dCmdHeader header;
158		SVGA3dCmdSurfaceDMA dma;
159	} *cmd;
160	int i, ret;
161
162	cmd = container_of(header, struct vmw_dma_cmd, header);
163
164	/* No snooper installed */
165	if (!srf->snooper.image)
166		return;
167
168	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
169		DRM_ERROR("face and mipmap for cursors should never != 0\n");
170		return;
171	}
172
173	if (cmd->header.size < 64) {
174		DRM_ERROR("at least one full copy box must be given\n");
175		return;
176	}
177
178	box = (SVGA3dCopyBox *)&cmd[1];
179	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
180			sizeof(SVGA3dCopyBox);
181
182	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
183	    box->x != 0    || box->y != 0    || box->z != 0    ||
184	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
185	    box->d != 1    || box_count != 1 ||
186	    box->w > 64 || box->h > 64) {
187		/* TODO handle none page aligned offsets */
188		/* TODO handle more dst & src != 0 */
189		/* TODO handle more then one copy */
190		DRM_ERROR("Can't snoop dma request for cursor!\n");
191		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
192			  box->srcx, box->srcy, box->srcz,
193			  box->x, box->y, box->z,
194			  box->w, box->h, box->d, box_count,
195			  cmd->dma.guest.ptr.offset);
196		return;
197	}
198
199	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
200	kmap_num = (64*64*4) >> PAGE_SHIFT;
201
202	ret = ttm_bo_reserve(bo, true, false, NULL);
203	if (unlikely(ret != 0)) {
204		DRM_ERROR("reserve failed\n");
205		return;
206	}
207
208	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
209	if (unlikely(ret != 0))
210		goto err_unreserve;
211
212	virtual = ttm_kmap_obj_virtual(&map, &dummy);
213
214	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
215		memcpy(srf->snooper.image, virtual, 64*64*4);
216	} else {
217		/* Image is unsigned pointer. */
218		for (i = 0; i < box->h; i++)
219			memcpy(srf->snooper.image + i * 64,
220			       virtual + i * cmd->dma.guest.pitch,
221			       box->w * 4);
222	}
223
224	srf->snooper.age++;
225
226	ttm_bo_kunmap(&map);
227err_unreserve:
228	ttm_bo_unreserve(bo);
229}
230
231/**
232 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
233 *
234 * @dev_priv: Pointer to the device private struct.
235 *
236 * Clears all legacy hotspots.
237 */
238void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
239{
240	struct drm_device *dev = dev_priv->dev;
241	struct vmw_display_unit *du;
242	struct drm_crtc *crtc;
243
244	drm_modeset_lock_all(dev);
245	drm_for_each_crtc(crtc, dev) {
246		du = vmw_crtc_to_du(crtc);
247
248		du->hotspot_x = 0;
249		du->hotspot_y = 0;
250	}
251	drm_modeset_unlock_all(dev);
252}
253
254void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
255{
256	struct drm_device *dev = dev_priv->dev;
257	struct vmw_display_unit *du;
258	struct drm_crtc *crtc;
259
260	mutex_lock(&dev->mode_config.mutex);
261
262	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
263		du = vmw_crtc_to_du(crtc);
264		if (!du->cursor_surface ||
265		    du->cursor_age == du->cursor_surface->snooper.age)
266			continue;
267
268		du->cursor_age = du->cursor_surface->snooper.age;
269		vmw_cursor_update_image(dev_priv,
270					du->cursor_surface->snooper.image,
271					64, 64,
272					du->hotspot_x + du->core_hotspot_x,
273					du->hotspot_y + du->core_hotspot_y);
274	}
275
276	mutex_unlock(&dev->mode_config.mutex);
277}
278
279
280void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
281{
282	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
283
284	drm_plane_cleanup(plane);
285}
286
287
288void vmw_du_primary_plane_destroy(struct drm_plane *plane)
289{
290	drm_plane_cleanup(plane);
291
292	/* Planes are static in our case so we don't free it */
293}
294
295
296/**
297 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
298 *
299 * @vps: plane state associated with the display surface
300 * @unreference: true if we also want to unreference the display.
301 */
302void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
303			     bool unreference)
304{
305	if (vps->surf) {
306		if (vps->pinned) {
307			vmw_resource_unpin(&vps->surf->res);
308			vps->pinned--;
309		}
310
311		if (unreference) {
312			if (vps->pinned)
313				DRM_ERROR("Surface still pinned\n");
314			vmw_surface_unreference(&vps->surf);
315		}
316	}
317}
318
319
320/**
321 * vmw_du_plane_cleanup_fb - Unpins the cursor
322 *
323 * @plane:  display plane
324 * @old_state: Contains the FB to clean up
325 *
326 * Unpins the framebuffer surface
327 *
328 * Returns 0 on success
329 */
330void
331vmw_du_plane_cleanup_fb(struct drm_plane *plane,
332			struct drm_plane_state *old_state)
333{
334	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
335
336	vmw_du_plane_unpin_surf(vps, false);
337}
338
339
340/**
341 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
342 *
343 * @plane:  display plane
344 * @new_state: info on the new plane state, including the FB
345 *
346 * Returns 0 on success
347 */
348int
349vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
350			       struct drm_plane_state *new_state)
351{
352	struct drm_framebuffer *fb = new_state->fb;
353	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
354
355
356	if (vps->surf)
357		vmw_surface_unreference(&vps->surf);
358
359	if (vps->bo)
360		vmw_bo_unreference(&vps->bo);
361
362	if (fb) {
363		if (vmw_framebuffer_to_vfb(fb)->bo) {
364			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
365			vmw_bo_reference(vps->bo);
366		} else {
367			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
368			vmw_surface_reference(vps->surf);
369		}
370	}
371
372	return 0;
373}
374
375
376void
377vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
378				  struct drm_plane_state *old_state)
379{
380	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
381	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
382	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
383	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
384	s32 hotspot_x, hotspot_y;
385	int ret = 0;
386
387
388	hotspot_x = du->hotspot_x;
389	hotspot_y = du->hotspot_y;
390
391	if (plane->state->fb) {
392		hotspot_x += plane->state->fb->hot_x;
393		hotspot_y += plane->state->fb->hot_y;
394	}
395
396	du->cursor_surface = vps->surf;
397	du->cursor_bo = vps->bo;
398
399	if (vps->surf) {
400		du->cursor_age = du->cursor_surface->snooper.age;
401
402		ret = vmw_cursor_update_image(dev_priv,
403					      vps->surf->snooper.image,
404					      64, 64, hotspot_x,
405					      hotspot_y);
406	} else if (vps->bo) {
407		ret = vmw_cursor_update_bo(dev_priv, vps->bo,
408					   plane->state->crtc_w,
409					   plane->state->crtc_h,
410					   hotspot_x, hotspot_y);
411	} else {
412		vmw_cursor_update_position(dev_priv, false, 0, 0);
413		return;
414	}
415
416	if (!ret) {
417		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
418		du->cursor_y = plane->state->crtc_y + du->set_gui_y;
419
420		vmw_cursor_update_position(dev_priv, true,
421					   du->cursor_x + hotspot_x,
422					   du->cursor_y + hotspot_y);
423
424		du->core_hotspot_x = hotspot_x - du->hotspot_x;
425		du->core_hotspot_y = hotspot_y - du->hotspot_y;
426	} else {
427		DRM_ERROR("Failed to update cursor image\n");
428	}
429}
430
431
432/**
433 * vmw_du_primary_plane_atomic_check - check if the new state is okay
434 *
435 * @plane: display plane
436 * @state: info on the new plane state, including the FB
437 *
438 * Check if the new state is settable given the current state.  Other
439 * than what the atomic helper checks, we care about crtc fitting
440 * the FB and maintaining one active framebuffer.
441 *
442 * Returns 0 on success
443 */
444int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
445				      struct drm_plane_state *state)
446{
447	struct drm_crtc_state *crtc_state = NULL;
448	struct drm_framebuffer *new_fb = state->fb;
449	int ret;
450
451	if (state->crtc)
452		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
453
454	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
455						  DRM_PLANE_HELPER_NO_SCALING,
456						  DRM_PLANE_HELPER_NO_SCALING,
457						  false, true);
458
459	if (!ret && new_fb) {
460		struct drm_crtc *crtc = state->crtc;
461		struct vmw_connector_state *vcs;
462		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
463
464		vcs = vmw_connector_state_to_vcs(du->connector.state);
465	}
466
467
468	return ret;
469}
470
471
472/**
473 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
474 *
475 * @plane: cursor plane
476 * @state: info on the new plane state
477 *
478 * This is a chance to fail if the new cursor state does not fit
479 * our requirements.
480 *
481 * Returns 0 on success
482 */
483int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
484				     struct drm_plane_state *new_state)
485{
486	int ret = 0;
487	struct drm_crtc_state *crtc_state = NULL;
488	struct vmw_surface *surface = NULL;
489	struct drm_framebuffer *fb = new_state->fb;
490
491	if (new_state->crtc)
492		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
493							   new_state->crtc);
494
495	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
496						  DRM_PLANE_HELPER_NO_SCALING,
497						  DRM_PLANE_HELPER_NO_SCALING,
498						  true, true);
499	if (ret)
500		return ret;
501
502	/* Turning off */
503	if (!fb)
504		return 0;
505
506	/* A lot of the code assumes this */
507	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
508		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
509			  new_state->crtc_w, new_state->crtc_h);
510		ret = -EINVAL;
511	}
512
513	if (!vmw_framebuffer_to_vfb(fb)->bo)
514		surface = vmw_framebuffer_to_vfbs(fb)->surface;
515
516	if (surface && !surface->snooper.image) {
517		DRM_ERROR("surface not suitable for cursor\n");
518		ret = -EINVAL;
519	}
520
521	return ret;
522}
523
524
525int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
526			     struct drm_crtc_state *new_state)
527{
528	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
529	int connector_mask = drm_connector_mask(&du->connector);
530	bool has_primary = new_state->plane_mask &
531			   drm_plane_mask(crtc->primary);
532
533	/* We always want to have an active plane with an active CRTC */
534	if (has_primary != new_state->enable)
535		return -EINVAL;
536
537
538	if (new_state->connector_mask != connector_mask &&
539	    new_state->connector_mask != 0) {
540		DRM_ERROR("Invalid connectors configuration\n");
541		return -EINVAL;
542	}
543
544	/*
545	 * Our virtual device does not have a dot clock, so use the logical
546	 * clock value as the dot clock.
547	 */
548	if (new_state->mode.crtc_clock == 0)
549		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
550
551	return 0;
552}
553
554
555void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
556			      struct drm_crtc_state *old_crtc_state)
557{
558}
559
560
561void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
562			      struct drm_crtc_state *old_crtc_state)
563{
564	struct drm_pending_vblank_event *event = crtc->state->event;
565
566	if (event) {
567		crtc->state->event = NULL;
568
569		spin_lock_irq(&crtc->dev->event_lock);
570		drm_crtc_send_vblank_event(crtc, event);
571		spin_unlock_irq(&crtc->dev->event_lock);
572	}
573}
574
575
576/**
577 * vmw_du_crtc_duplicate_state - duplicate crtc state
578 * @crtc: DRM crtc
579 *
580 * Allocates and returns a copy of the crtc state (both common and
581 * vmw-specific) for the specified crtc.
582 *
583 * Returns: The newly allocated crtc state, or NULL on failure.
584 */
585struct drm_crtc_state *
586vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
587{
588	struct drm_crtc_state *state;
589	struct vmw_crtc_state *vcs;
590
591	if (WARN_ON(!crtc->state))
592		return NULL;
593
594	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
595
596	if (!vcs)
597		return NULL;
598
599	state = &vcs->base;
600
601	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
602
603	return state;
604}
605
606
607/**
608 * vmw_du_crtc_reset - creates a blank vmw crtc state
609 * @crtc: DRM crtc
610 *
611 * Resets the atomic state for @crtc by freeing the state pointer (which
612 * might be NULL, e.g. at driver load time) and allocating a new empty state
613 * object.
614 */
615void vmw_du_crtc_reset(struct drm_crtc *crtc)
616{
617	struct vmw_crtc_state *vcs;
618
619
620	if (crtc->state) {
621		__drm_atomic_helper_crtc_destroy_state(crtc->state);
622
623		kfree(vmw_crtc_state_to_vcs(crtc->state));
624	}
625
626	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
627
628	if (!vcs) {
629		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
630		return;
631	}
632
633	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
634}
635
636
637/**
638 * vmw_du_crtc_destroy_state - destroy crtc state
639 * @crtc: DRM crtc
640 * @state: state object to destroy
641 *
642 * Destroys the crtc state (both common and vmw-specific) for the
643 * specified plane.
644 */
645void
646vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
647			  struct drm_crtc_state *state)
648{
649	drm_atomic_helper_crtc_destroy_state(crtc, state);
650}
651
652
653/**
654 * vmw_du_plane_duplicate_state - duplicate plane state
655 * @plane: drm plane
656 *
657 * Allocates and returns a copy of the plane state (both common and
658 * vmw-specific) for the specified plane.
659 *
660 * Returns: The newly allocated plane state, or NULL on failure.
661 */
662struct drm_plane_state *
663vmw_du_plane_duplicate_state(struct drm_plane *plane)
664{
665	struct drm_plane_state *state;
666	struct vmw_plane_state *vps;
667
668	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
669
670	if (!vps)
671		return NULL;
672
673	vps->pinned = 0;
674	vps->cpp = 0;
675
676	/* Each ref counted resource needs to be acquired again */
677	if (vps->surf)
678		(void) vmw_surface_reference(vps->surf);
679
680	if (vps->bo)
681		(void) vmw_bo_reference(vps->bo);
682
683	state = &vps->base;
684
685	__drm_atomic_helper_plane_duplicate_state(plane, state);
686
687	return state;
688}
689
690
691/**
692 * vmw_du_plane_reset - creates a blank vmw plane state
693 * @plane: drm plane
694 *
695 * Resets the atomic state for @plane by freeing the state pointer (which might
696 * be NULL, e.g. at driver load time) and allocating a new empty state object.
697 */
698void vmw_du_plane_reset(struct drm_plane *plane)
699{
700	struct vmw_plane_state *vps;
701
702
703	if (plane->state)
704		vmw_du_plane_destroy_state(plane, plane->state);
705
706	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
707
708	if (!vps) {
709		DRM_ERROR("Cannot allocate vmw_plane_state\n");
710		return;
711	}
712
713	__drm_atomic_helper_plane_reset(plane, &vps->base);
714}
715
716
717/**
718 * vmw_du_plane_destroy_state - destroy plane state
719 * @plane: DRM plane
720 * @state: state object to destroy
721 *
722 * Destroys the plane state (both common and vmw-specific) for the
723 * specified plane.
724 */
725void
726vmw_du_plane_destroy_state(struct drm_plane *plane,
727			   struct drm_plane_state *state)
728{
729	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
730
731
732	/* Should have been freed by cleanup_fb */
733	if (vps->surf)
734		vmw_surface_unreference(&vps->surf);
735
736	if (vps->bo)
737		vmw_bo_unreference(&vps->bo);
738
739	drm_atomic_helper_plane_destroy_state(plane, state);
740}
741
742
743/**
744 * vmw_du_connector_duplicate_state - duplicate connector state
745 * @connector: DRM connector
746 *
747 * Allocates and returns a copy of the connector state (both common and
748 * vmw-specific) for the specified connector.
749 *
750 * Returns: The newly allocated connector state, or NULL on failure.
751 */
752struct drm_connector_state *
753vmw_du_connector_duplicate_state(struct drm_connector *connector)
754{
755	struct drm_connector_state *state;
756	struct vmw_connector_state *vcs;
757
758	if (WARN_ON(!connector->state))
759		return NULL;
760
761	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
762
763	if (!vcs)
764		return NULL;
765
766	state = &vcs->base;
767
768	__drm_atomic_helper_connector_duplicate_state(connector, state);
769
770	return state;
771}
772
773
774/**
775 * vmw_du_connector_reset - creates a blank vmw connector state
776 * @connector: DRM connector
777 *
778 * Resets the atomic state for @connector by freeing the state pointer (which
779 * might be NULL, e.g. at driver load time) and allocating a new empty state
780 * object.
781 */
782void vmw_du_connector_reset(struct drm_connector *connector)
783{
784	struct vmw_connector_state *vcs;
785
786
787	if (connector->state) {
788		__drm_atomic_helper_connector_destroy_state(connector->state);
789
790		kfree(vmw_connector_state_to_vcs(connector->state));
791	}
792
793	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
794
795	if (!vcs) {
796		DRM_ERROR("Cannot allocate vmw_connector_state\n");
797		return;
798	}
799
800	__drm_atomic_helper_connector_reset(connector, &vcs->base);
801}
802
803
804/**
805 * vmw_du_connector_destroy_state - destroy connector state
806 * @connector: DRM connector
807 * @state: state object to destroy
808 *
809 * Destroys the connector state (both common and vmw-specific) for the
810 * specified plane.
811 */
812void
813vmw_du_connector_destroy_state(struct drm_connector *connector,
814			  struct drm_connector_state *state)
815{
816	drm_atomic_helper_connector_destroy_state(connector, state);
817}
818/*
819 * Generic framebuffer code
820 */
821
822/*
823 * Surface framebuffer code
824 */
825
826static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
827{
828	struct vmw_framebuffer_surface *vfbs =
829		vmw_framebuffer_to_vfbs(framebuffer);
830
831	drm_framebuffer_cleanup(framebuffer);
832	vmw_surface_unreference(&vfbs->surface);
833	if (vfbs->base.user_obj)
834		ttm_base_object_unref(&vfbs->base.user_obj);
835
836	kfree(vfbs);
837}
838
839/**
840 * vmw_kms_readback - Perform a readback from the screen system to
841 * a buffer-object backed framebuffer.
842 *
843 * @dev_priv: Pointer to the device private structure.
844 * @file_priv: Pointer to a struct drm_file identifying the caller.
845 * Must be set to NULL if @user_fence_rep is NULL.
846 * @vfb: Pointer to the buffer-object backed framebuffer.
847 * @user_fence_rep: User-space provided structure for fence information.
848 * Must be set to non-NULL if @file_priv is non-NULL.
849 * @vclips: Array of clip rects.
850 * @num_clips: Number of clip rects in @vclips.
851 *
852 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
853 * interrupted.
854 */
855int vmw_kms_readback(struct vmw_private *dev_priv,
856		     struct drm_file *file_priv,
857		     struct vmw_framebuffer *vfb,
858		     struct drm_vmw_fence_rep __user *user_fence_rep,
859		     struct drm_vmw_rect *vclips,
860		     uint32_t num_clips)
861{
862	switch (dev_priv->active_display_unit) {
863	case vmw_du_screen_object:
864		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
865					    user_fence_rep, vclips, num_clips,
866					    NULL);
867	case vmw_du_screen_target:
868		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
869					user_fence_rep, NULL, vclips, num_clips,
870					1, false, true, NULL);
871	default:
872		WARN_ONCE(true,
873			  "Readback called with invalid display system.\n");
874}
875
876	return -ENOSYS;
877}
878
879
880static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
881	.destroy = vmw_framebuffer_surface_destroy,
882	.dirty = drm_atomic_helper_dirtyfb,
883};
884
885static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
886					   struct vmw_surface *surface,
887					   struct vmw_framebuffer **out,
888					   const struct drm_mode_fb_cmd2
889					   *mode_cmd,
890					   bool is_bo_proxy)
891
892{
893	struct drm_device *dev = dev_priv->dev;
894	struct vmw_framebuffer_surface *vfbs;
895	enum SVGA3dSurfaceFormat format;
896	int ret;
897	struct drm_format_name_buf format_name;
898
899	/* 3D is only supported on HWv8 and newer hosts */
900	if (dev_priv->active_display_unit == vmw_du_legacy)
901		return -ENOSYS;
902
903	/*
904	 * Sanity checks.
905	 */
906
907	/* Surface must be marked as a scanout. */
908	if (unlikely(!surface->metadata.scanout))
909		return -EINVAL;
910
911	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
912		     surface->metadata.num_sizes != 1 ||
913		     surface->metadata.base_size.width < mode_cmd->width ||
914		     surface->metadata.base_size.height < mode_cmd->height ||
915		     surface->metadata.base_size.depth != 1)) {
916		DRM_ERROR("Incompatible surface dimensions "
917			  "for requested mode.\n");
918		return -EINVAL;
919	}
920
921	switch (mode_cmd->pixel_format) {
922	case DRM_FORMAT_ARGB8888:
923		format = SVGA3D_A8R8G8B8;
924		break;
925	case DRM_FORMAT_XRGB8888:
926		format = SVGA3D_X8R8G8B8;
927		break;
928	case DRM_FORMAT_RGB565:
929		format = SVGA3D_R5G6B5;
930		break;
931	case DRM_FORMAT_XRGB1555:
932		format = SVGA3D_A1R5G5B5;
933		break;
934	default:
935		DRM_ERROR("Invalid pixel format: %s\n",
936			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
937		return -EINVAL;
938	}
939
940	/*
941	 * For DX, surface format validation is done when surface->scanout
942	 * is set.
943	 */
944	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
945		DRM_ERROR("Invalid surface format for requested mode.\n");
946		return -EINVAL;
947	}
948
949	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
950	if (!vfbs) {
951		ret = -ENOMEM;
952		goto out_err1;
953	}
954
955	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
956	vfbs->surface = vmw_surface_reference(surface);
957	vfbs->base.user_handle = mode_cmd->handles[0];
958	vfbs->is_bo_proxy = is_bo_proxy;
959
960	*out = &vfbs->base;
961
962	ret = drm_framebuffer_init(dev, &vfbs->base.base,
963				   &vmw_framebuffer_surface_funcs);
964	if (ret)
965		goto out_err2;
966
967	return 0;
968
969out_err2:
970	vmw_surface_unreference(&surface);
971	kfree(vfbs);
972out_err1:
973	return ret;
974}
975
976/*
977 * Buffer-object framebuffer code
978 */
979
980static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
981{
982	struct vmw_framebuffer_bo *vfbd =
983		vmw_framebuffer_to_vfbd(framebuffer);
984
985	drm_framebuffer_cleanup(framebuffer);
986	vmw_bo_unreference(&vfbd->buffer);
987	if (vfbd->base.user_obj)
988		ttm_base_object_unref(&vfbd->base.user_obj);
989
990	kfree(vfbd);
991}
992
993static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
994				    struct drm_file *file_priv,
995				    unsigned int flags, unsigned int color,
996				    struct drm_clip_rect *clips,
997				    unsigned int num_clips)
998{
999	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1000	struct vmw_framebuffer_bo *vfbd =
1001		vmw_framebuffer_to_vfbd(framebuffer);
1002	struct drm_clip_rect norect;
1003	int ret, increment = 1;
1004
1005	drm_modeset_lock_all(dev_priv->dev);
1006
1007	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1008	if (unlikely(ret != 0)) {
1009		drm_modeset_unlock_all(dev_priv->dev);
1010		return ret;
1011	}
1012
1013	if (!num_clips) {
1014		num_clips = 1;
1015		clips = &norect;
1016		norect.x1 = norect.y1 = 0;
1017		norect.x2 = framebuffer->width;
1018		norect.y2 = framebuffer->height;
1019	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1020		num_clips /= 2;
1021		increment = 2;
1022	}
1023
1024	switch (dev_priv->active_display_unit) {
1025	case vmw_du_legacy:
1026		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1027					      clips, num_clips, increment);
1028		break;
1029	default:
1030		ret = -EINVAL;
1031		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1032		break;
1033	}
1034
1035	vmw_fifo_flush(dev_priv, false);
1036	ttm_read_unlock(&dev_priv->reservation_sem);
1037
1038	drm_modeset_unlock_all(dev_priv->dev);
1039
1040	return ret;
1041}
1042
1043static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1044					struct drm_file *file_priv,
1045					unsigned int flags, unsigned int color,
1046					struct drm_clip_rect *clips,
1047					unsigned int num_clips)
1048{
1049	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1050
1051	if (dev_priv->active_display_unit == vmw_du_legacy)
1052		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1053						color, clips, num_clips);
1054
1055	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1056					 clips, num_clips);
1057}
1058
1059static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1060	.destroy = vmw_framebuffer_bo_destroy,
1061	.dirty = vmw_framebuffer_bo_dirty_ext,
1062};
1063
1064/**
1065 * Pin the bofer in a location suitable for access by the
1066 * display system.
1067 */
1068static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1069{
1070	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1071	struct vmw_buffer_object *buf;
1072	struct ttm_placement *placement;
1073	int ret;
1074
1075	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1076		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1077
1078	if (!buf)
1079		return 0;
1080
1081	switch (dev_priv->active_display_unit) {
1082	case vmw_du_legacy:
1083		vmw_overlay_pause_all(dev_priv);
1084		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1085		vmw_overlay_resume_all(dev_priv);
1086		break;
1087	case vmw_du_screen_object:
1088	case vmw_du_screen_target:
1089		if (vfb->bo) {
1090			if (dev_priv->capabilities & SVGA_CAP_3D) {
1091				/*
1092				 * Use surface DMA to get content to
1093				 * sreen target surface.
1094				 */
1095				placement = &vmw_vram_gmr_placement;
1096			} else {
1097				/* Use CPU blit. */
1098				placement = &vmw_sys_placement;
1099			}
1100		} else {
1101			/* Use surface / image update */
1102			placement = &vmw_mob_placement;
1103		}
1104
1105		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1106	default:
1107		return -EINVAL;
1108	}
1109
1110	return ret;
1111}
1112
1113static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1114{
1115	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1116	struct vmw_buffer_object *buf;
1117
1118	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1119		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1120
1121	if (WARN_ON(!buf))
1122		return 0;
1123
1124	return vmw_bo_unpin(dev_priv, buf, false);
1125}
1126
1127/**
1128 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1129 *
1130 * @dev: DRM device
1131 * @mode_cmd: parameters for the new surface
1132 * @bo_mob: MOB backing the buffer object
1133 * @srf_out: newly created surface
1134 *
1135 * When the content FB is a buffer object, we create a surface as a proxy to the
1136 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1137 * This is a more efficient approach
1138 *
1139 * RETURNS:
1140 * 0 on success, error code otherwise
1141 */
1142static int vmw_create_bo_proxy(struct drm_device *dev,
1143			       const struct drm_mode_fb_cmd2 *mode_cmd,
1144			       struct vmw_buffer_object *bo_mob,
1145			       struct vmw_surface **srf_out)
1146{
1147	struct vmw_surface_metadata metadata = {0};
1148	uint32_t format;
1149	struct vmw_resource *res;
1150	unsigned int bytes_pp;
1151	struct drm_format_name_buf format_name;
1152	int ret;
1153
1154	switch (mode_cmd->pixel_format) {
1155	case DRM_FORMAT_ARGB8888:
1156	case DRM_FORMAT_XRGB8888:
1157		format = SVGA3D_X8R8G8B8;
1158		bytes_pp = 4;
1159		break;
1160
1161	case DRM_FORMAT_RGB565:
1162	case DRM_FORMAT_XRGB1555:
1163		format = SVGA3D_R5G6B5;
1164		bytes_pp = 2;
1165		break;
1166
1167	case 8:
1168		format = SVGA3D_P8;
1169		bytes_pp = 1;
1170		break;
1171
1172	default:
1173		DRM_ERROR("Invalid framebuffer format %s\n",
1174			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1175		return -EINVAL;
1176	}
1177
1178	metadata.format = format;
1179	metadata.mip_levels[0] = 1;
1180	metadata.num_sizes = 1;
1181	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1182	metadata.base_size.height =  mode_cmd->height;
1183	metadata.base_size.depth = 1;
1184	metadata.scanout = true;
1185
1186	ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
1187	if (ret) {
1188		DRM_ERROR("Failed to allocate proxy content buffer\n");
1189		return ret;
1190	}
1191
1192	res = &(*srf_out)->res;
1193
1194	/* Reserve and switch the backing mob. */
1195	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1196	(void) vmw_resource_reserve(res, false, true);
1197	vmw_bo_unreference(&res->backup);
1198	res->backup = vmw_bo_reference(bo_mob);
1199	res->backup_offset = 0;
1200	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1201	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1202
1203	return 0;
1204}
1205
1206
1207
1208static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1209				      struct vmw_buffer_object *bo,
1210				      struct vmw_framebuffer **out,
1211				      const struct drm_mode_fb_cmd2
1212				      *mode_cmd)
1213
1214{
1215	struct drm_device *dev = dev_priv->dev;
1216	struct vmw_framebuffer_bo *vfbd;
1217	unsigned int requested_size;
1218	struct drm_format_name_buf format_name;
1219	int ret;
1220
1221	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1222	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
1223		DRM_ERROR("Screen buffer object size is too small "
1224			  "for requested mode.\n");
1225		return -EINVAL;
1226	}
1227
1228	/* Limited framebuffer color depth support for screen objects */
1229	if (dev_priv->active_display_unit == vmw_du_screen_object) {
1230		switch (mode_cmd->pixel_format) {
1231		case DRM_FORMAT_XRGB8888:
1232		case DRM_FORMAT_ARGB8888:
1233			break;
1234		case DRM_FORMAT_XRGB1555:
1235		case DRM_FORMAT_RGB565:
1236			break;
1237		default:
1238			DRM_ERROR("Invalid pixel format: %s\n",
1239				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1240			return -EINVAL;
1241		}
1242	}
1243
1244	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1245	if (!vfbd) {
1246		ret = -ENOMEM;
1247		goto out_err1;
1248	}
1249
1250	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1251	vfbd->base.bo = true;
1252	vfbd->buffer = vmw_bo_reference(bo);
1253	vfbd->base.user_handle = mode_cmd->handles[0];
1254	*out = &vfbd->base;
1255
1256	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1257				   &vmw_framebuffer_bo_funcs);
1258	if (ret)
1259		goto out_err2;
1260
1261	return 0;
1262
1263out_err2:
1264	vmw_bo_unreference(&bo);
1265	kfree(vfbd);
1266out_err1:
1267	return ret;
1268}
1269
1270
1271/**
1272 * vmw_kms_srf_ok - check if a surface can be created
1273 *
1274 * @width: requested width
1275 * @height: requested height
1276 *
1277 * Surfaces need to be less than texture size
1278 */
1279static bool
1280vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1281{
1282	if (width  > dev_priv->texture_max_width ||
1283	    height > dev_priv->texture_max_height)
1284		return false;
1285
1286	return true;
1287}
1288
1289/**
1290 * vmw_kms_new_framebuffer - Create a new framebuffer.
1291 *
1292 * @dev_priv: Pointer to device private struct.
1293 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1294 * Either @bo or @surface must be NULL.
1295 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1296 * Either @bo or @surface must be NULL.
1297 * @only_2d: No presents will occur to this buffer object based framebuffer.
1298 * This helps the code to do some important optimizations.
1299 * @mode_cmd: Frame-buffer metadata.
1300 */
1301struct vmw_framebuffer *
1302vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1303			struct vmw_buffer_object *bo,
1304			struct vmw_surface *surface,
1305			bool only_2d,
1306			const struct drm_mode_fb_cmd2 *mode_cmd)
1307{
1308	struct vmw_framebuffer *vfb = NULL;
1309	bool is_bo_proxy = false;
1310	int ret;
1311
1312	/*
1313	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1314	 * therefore, wrap the buffer object in a surface so we can use the
1315	 * SurfaceCopy command.
1316	 */
1317	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1318	    bo && only_2d &&
1319	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1320	    dev_priv->active_display_unit == vmw_du_screen_target) {
1321		ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
1322					  bo, &surface);
1323		if (ret)
1324			return ERR_PTR(ret);
1325
1326		is_bo_proxy = true;
1327	}
1328
1329	/* Create the new framebuffer depending one what we have */
1330	if (surface) {
1331		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1332						      mode_cmd,
1333						      is_bo_proxy);
1334
1335		/*
1336		 * vmw_create_bo_proxy() adds a reference that is no longer
1337		 * needed
1338		 */
1339		if (is_bo_proxy)
1340			vmw_surface_unreference(&surface);
1341	} else if (bo) {
1342		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1343						 mode_cmd);
1344	} else {
1345		BUG();
1346	}
1347
1348	if (ret)
1349		return ERR_PTR(ret);
1350
1351	vfb->pin = vmw_framebuffer_pin;
1352	vfb->unpin = vmw_framebuffer_unpin;
1353
1354	return vfb;
1355}
1356
1357/*
1358 * Generic Kernel modesetting functions
1359 */
1360
1361static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1362						 struct drm_file *file_priv,
1363						 const struct drm_mode_fb_cmd2 *mode_cmd)
1364{
1365	struct vmw_private *dev_priv = vmw_priv(dev);
1366	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1367	struct vmw_framebuffer *vfb = NULL;
1368	struct vmw_surface *surface = NULL;
1369	struct vmw_buffer_object *bo = NULL;
1370	struct ttm_base_object *user_obj;
1371	int ret;
1372
1373	/*
1374	 * Take a reference on the user object of the resource
1375	 * backing the kms fb. This ensures that user-space handle
1376	 * lookups on that resource will always work as long as
1377	 * it's registered with a kms framebuffer. This is important,
1378	 * since vmw_execbuf_process identifies resources in the
1379	 * command stream using user-space handles.
1380	 */
1381
1382	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1383	if (unlikely(user_obj == NULL)) {
1384		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1385		return ERR_PTR(-ENOENT);
1386	}
1387
1388	/**
1389	 * End conditioned code.
1390	 */
1391
1392	/* returns either a bo or surface */
1393	ret = vmw_user_lookup_handle(dev_priv, tfile,
1394				     mode_cmd->handles[0],
1395				     &surface, &bo);
1396	if (ret)
1397		goto err_out;
1398
1399
1400	if (!bo &&
1401	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1402		DRM_ERROR("Surface size cannot exceed %dx%d",
1403			dev_priv->texture_max_width,
1404			dev_priv->texture_max_height);
1405		goto err_out;
1406	}
1407
1408
1409	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1410				      !(dev_priv->capabilities & SVGA_CAP_3D),
1411				      mode_cmd);
1412	if (IS_ERR(vfb)) {
1413		ret = PTR_ERR(vfb);
1414		goto err_out;
1415 	}
1416
1417err_out:
1418	/* vmw_user_lookup_handle takes one ref so does new_fb */
1419	if (bo)
1420		vmw_bo_unreference(&bo);
1421	if (surface)
1422		vmw_surface_unreference(&surface);
1423
1424	if (ret) {
1425		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1426		ttm_base_object_unref(&user_obj);
1427		return ERR_PTR(ret);
1428	} else
1429		vfb->user_obj = user_obj;
1430
1431	return &vfb->base;
1432}
1433
1434/**
1435 * vmw_kms_check_display_memory - Validates display memory required for a
1436 * topology
1437 * @dev: DRM device
1438 * @num_rects: number of drm_rect in rects
1439 * @rects: array of drm_rect representing the topology to validate indexed by
1440 * crtc index.
1441 *
1442 * Returns:
1443 * 0 on success otherwise negative error code
1444 */
1445static int vmw_kms_check_display_memory(struct drm_device *dev,
1446					uint32_t num_rects,
1447					struct drm_rect *rects)
1448{
1449	struct vmw_private *dev_priv = vmw_priv(dev);
1450	struct drm_rect bounding_box = {0};
1451	u64 total_pixels = 0, pixel_mem, bb_mem;
1452	int i;
1453
1454	for (i = 0; i < num_rects; i++) {
1455		/*
1456		 * For STDU only individual screen (screen target) is limited by
1457		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1458		 */
1459		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1460		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1461		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1462			VMW_DEBUG_KMS("Screen size not supported.\n");
1463			return -EINVAL;
1464		}
1465
1466		/* Bounding box upper left is at (0,0). */
1467		if (rects[i].x2 > bounding_box.x2)
1468			bounding_box.x2 = rects[i].x2;
1469
1470		if (rects[i].y2 > bounding_box.y2)
1471			bounding_box.y2 = rects[i].y2;
1472
1473		total_pixels += (u64) drm_rect_width(&rects[i]) *
1474			(u64) drm_rect_height(&rects[i]);
1475	}
1476
1477	/* Virtual svga device primary limits are always in 32-bpp. */
1478	pixel_mem = total_pixels * 4;
1479
1480	/*
1481	 * For HV10 and below prim_bb_mem is vram size. When
1482	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1483	 * limit on primary bounding box
1484	 */
1485	if (pixel_mem > dev_priv->prim_bb_mem) {
1486		VMW_DEBUG_KMS("Combined output size too large.\n");
1487		return -EINVAL;
1488	}
1489
1490	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1491	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1492	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1493		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1494
1495		if (bb_mem > dev_priv->prim_bb_mem) {
1496			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1497			return -EINVAL;
1498		}
1499	}
1500
1501	return 0;
1502}
1503
1504/**
1505 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1506 * crtc mutex
1507 * @state: The atomic state pointer containing the new atomic state
1508 * @crtc: The crtc
1509 *
1510 * This function returns the new crtc state if it's part of the state update.
1511 * Otherwise returns the current crtc state. It also makes sure that the
1512 * crtc mutex is locked.
1513 *
1514 * Returns: A valid crtc state pointer or NULL. It may also return a
1515 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1516 */
1517static struct drm_crtc_state *
1518vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1519{
1520	struct drm_crtc_state *crtc_state;
1521
1522	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1523	if (crtc_state) {
1524		lockdep_assert_held(&crtc->mutex.mutex.base);
1525	} else {
1526		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1527
1528		if (ret != 0 && ret != -EALREADY)
1529			return ERR_PTR(ret);
1530
1531		crtc_state = crtc->state;
1532	}
1533
1534	return crtc_state;
1535}
1536
1537/**
1538 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1539 * from the same fb after the new state is committed.
1540 * @dev: The drm_device.
1541 * @state: The new state to be checked.
1542 *
1543 * Returns:
1544 *   Zero on success,
1545 *   -EINVAL on invalid state,
1546 *   -EDEADLK if modeset locking needs to be rerun.
1547 */
1548static int vmw_kms_check_implicit(struct drm_device *dev,
1549				  struct drm_atomic_state *state)
1550{
1551	struct drm_framebuffer *implicit_fb = NULL;
1552	struct drm_crtc *crtc;
1553	struct drm_crtc_state *crtc_state;
1554	struct drm_plane_state *plane_state;
1555
1556	drm_for_each_crtc(crtc, dev) {
1557		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1558
1559		if (!du->is_implicit)
1560			continue;
1561
1562		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1563		if (IS_ERR(crtc_state))
1564			return PTR_ERR(crtc_state);
1565
1566		if (!crtc_state || !crtc_state->enable)
1567			continue;
1568
1569		/*
1570		 * Can't move primary planes across crtcs, so this is OK.
1571		 * It also means we don't need to take the plane mutex.
1572		 */
1573		plane_state = du->primary.state;
1574		if (plane_state->crtc != crtc)
1575			continue;
1576
1577		if (!implicit_fb)
1578			implicit_fb = plane_state->fb;
1579		else if (implicit_fb != plane_state->fb)
1580			return -EINVAL;
1581	}
1582
1583	return 0;
1584}
1585
1586/**
1587 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1588 * @dev: DRM device
1589 * @state: the driver state object
1590 *
1591 * Returns:
1592 * 0 on success otherwise negative error code
1593 */
1594static int vmw_kms_check_topology(struct drm_device *dev,
1595				  struct drm_atomic_state *state)
1596{
1597	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1598	struct drm_rect *rects;
1599	struct drm_crtc *crtc;
1600	uint32_t i;
1601	int ret = 0;
1602
1603	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1604			GFP_KERNEL);
1605	if (!rects)
1606		return -ENOMEM;
1607
1608	drm_for_each_crtc(crtc, dev) {
1609		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1610		struct drm_crtc_state *crtc_state;
1611
1612		i = drm_crtc_index(crtc);
1613
1614		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1615		if (IS_ERR(crtc_state)) {
1616			ret = PTR_ERR(crtc_state);
1617			goto clean;
1618		}
1619
1620		if (!crtc_state)
1621			continue;
1622
1623		if (crtc_state->enable) {
1624			rects[i].x1 = du->gui_x;
1625			rects[i].y1 = du->gui_y;
1626			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1627			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1628		} else {
1629			rects[i].x1 = 0;
1630			rects[i].y1 = 0;
1631			rects[i].x2 = 0;
1632			rects[i].y2 = 0;
1633		}
1634	}
1635
1636	/* Determine change to topology due to new atomic state */
1637	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1638				      new_crtc_state, i) {
1639		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1640		struct drm_connector *connector;
1641		struct drm_connector_state *conn_state;
1642		struct vmw_connector_state *vmw_conn_state;
1643
1644		if (!du->pref_active && new_crtc_state->enable) {
1645			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1646			ret = -EINVAL;
1647			goto clean;
1648		}
1649
1650		/*
1651		 * For vmwgfx each crtc has only one connector attached and it
1652		 * is not changed so don't really need to check the
1653		 * crtc->connector_mask and iterate over it.
1654		 */
1655		connector = &du->connector;
1656		conn_state = drm_atomic_get_connector_state(state, connector);
1657		if (IS_ERR(conn_state)) {
1658			ret = PTR_ERR(conn_state);
1659			goto clean;
1660		}
1661
1662		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1663		vmw_conn_state->gui_x = du->gui_x;
1664		vmw_conn_state->gui_y = du->gui_y;
1665	}
1666
1667	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1668					   rects);
1669
1670clean:
1671	kfree(rects);
1672	return ret;
1673}
1674
1675/**
1676 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1677 *
1678 * @dev: DRM device
1679 * @state: the driver state object
1680 *
1681 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1682 * us to assign a value to mode->crtc_clock so that
1683 * drm_calc_timestamping_constants() won't throw an error message
1684 *
1685 * Returns:
1686 * Zero for success or -errno
1687 */
1688static int
1689vmw_kms_atomic_check_modeset(struct drm_device *dev,
1690			     struct drm_atomic_state *state)
1691{
1692	struct drm_crtc *crtc;
1693	struct drm_crtc_state *crtc_state;
1694	bool need_modeset = false;
1695	int i, ret;
1696
1697	ret = drm_atomic_helper_check(dev, state);
1698	if (ret)
1699		return ret;
1700
1701	ret = vmw_kms_check_implicit(dev, state);
1702	if (ret) {
1703		VMW_DEBUG_KMS("Invalid implicit state\n");
1704		return ret;
1705	}
1706
1707	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1708		if (drm_atomic_crtc_needs_modeset(crtc_state))
1709			need_modeset = true;
1710	}
1711
1712	if (need_modeset)
1713		return vmw_kms_check_topology(dev, state);
1714
1715	return ret;
1716}
1717
1718static const struct drm_mode_config_funcs vmw_kms_funcs = {
1719	.fb_create = vmw_kms_fb_create,
1720	.atomic_check = vmw_kms_atomic_check_modeset,
1721	.atomic_commit = drm_atomic_helper_commit,
1722};
1723
1724static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1725				   struct drm_file *file_priv,
1726				   struct vmw_framebuffer *vfb,
1727				   struct vmw_surface *surface,
1728				   uint32_t sid,
1729				   int32_t destX, int32_t destY,
1730				   struct drm_vmw_rect *clips,
1731				   uint32_t num_clips)
1732{
1733	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1734					    &surface->res, destX, destY,
1735					    num_clips, 1, NULL, NULL);
1736}
1737
1738
1739int vmw_kms_present(struct vmw_private *dev_priv,
1740		    struct drm_file *file_priv,
1741		    struct vmw_framebuffer *vfb,
1742		    struct vmw_surface *surface,
1743		    uint32_t sid,
1744		    int32_t destX, int32_t destY,
1745		    struct drm_vmw_rect *clips,
1746		    uint32_t num_clips)
1747{
1748	int ret;
1749
1750	switch (dev_priv->active_display_unit) {
1751	case vmw_du_screen_target:
1752		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1753						 &surface->res, destX, destY,
1754						 num_clips, 1, NULL, NULL);
1755		break;
1756	case vmw_du_screen_object:
1757		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1758					      sid, destX, destY, clips,
1759					      num_clips);
1760		break;
1761	default:
1762		WARN_ONCE(true,
1763			  "Present called with invalid display system.\n");
1764		ret = -ENOSYS;
1765		break;
1766	}
1767	if (ret)
1768		return ret;
1769
1770	vmw_fifo_flush(dev_priv, false);
1771
1772	return 0;
1773}
1774
1775static void
1776vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1777{
1778	if (dev_priv->hotplug_mode_update_property)
1779		return;
1780
1781	dev_priv->hotplug_mode_update_property =
1782		drm_property_create_range(dev_priv->dev,
1783					  DRM_MODE_PROP_IMMUTABLE,
1784					  "hotplug_mode_update", 0, 1);
1785
1786	if (!dev_priv->hotplug_mode_update_property)
1787		return;
1788
1789}
1790
1791int vmw_kms_init(struct vmw_private *dev_priv)
1792{
1793	struct drm_device *dev = dev_priv->dev;
1794	int ret;
1795
1796	drm_mode_config_init(dev);
1797	dev->mode_config.funcs = &vmw_kms_funcs;
1798	dev->mode_config.min_width = 1;
1799	dev->mode_config.min_height = 1;
1800	dev->mode_config.max_width = dev_priv->texture_max_width;
1801	dev->mode_config.max_height = dev_priv->texture_max_height;
1802
1803	drm_mode_create_suggested_offset_properties(dev);
1804	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1805
1806	ret = vmw_kms_stdu_init_display(dev_priv);
1807	if (ret) {
1808		ret = vmw_kms_sou_init_display(dev_priv);
1809		if (ret) /* Fallback */
1810			ret = vmw_kms_ldu_init_display(dev_priv);
1811	}
1812
1813	return ret;
1814}
1815
1816int vmw_kms_close(struct vmw_private *dev_priv)
1817{
1818	int ret = 0;
1819
1820	/*
1821	 * Docs says we should take the lock before calling this function
1822	 * but since it destroys encoders and our destructor calls
1823	 * drm_encoder_cleanup which takes the lock we deadlock.
1824	 */
1825	drm_mode_config_cleanup(dev_priv->dev);
1826	if (dev_priv->active_display_unit == vmw_du_legacy)
1827		ret = vmw_kms_ldu_close_display(dev_priv);
1828
1829	return ret;
1830}
1831
1832int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1833				struct drm_file *file_priv)
1834{
1835	struct drm_vmw_cursor_bypass_arg *arg = data;
1836	struct vmw_display_unit *du;
1837	struct drm_crtc *crtc;
1838	int ret = 0;
1839
1840
1841	mutex_lock(&dev->mode_config.mutex);
1842	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1843
1844		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1845			du = vmw_crtc_to_du(crtc);
1846			du->hotspot_x = arg->xhot;
1847			du->hotspot_y = arg->yhot;
1848		}
1849
1850		mutex_unlock(&dev->mode_config.mutex);
1851		return 0;
1852	}
1853
1854	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1855	if (!crtc) {
1856		ret = -ENOENT;
1857		goto out;
1858	}
1859
1860	du = vmw_crtc_to_du(crtc);
1861
1862	du->hotspot_x = arg->xhot;
1863	du->hotspot_y = arg->yhot;
1864
1865out:
1866	mutex_unlock(&dev->mode_config.mutex);
1867
1868	return ret;
1869}
1870
1871int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1872			unsigned width, unsigned height, unsigned pitch,
1873			unsigned bpp, unsigned depth)
1874{
1875	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1876		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1877	else if (vmw_fifo_have_pitchlock(vmw_priv))
1878		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1879			       SVGA_FIFO_PITCHLOCK);
1880	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1881	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1882	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1883
1884	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1885		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1886			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1887		return -EINVAL;
1888	}
1889
1890	return 0;
1891}
1892
1893bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1894				uint32_t pitch,
1895				uint32_t height)
1896{
1897	return ((u64) pitch * (u64) height) < (u64)
1898		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1899		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1900}
1901
1902
1903/**
1904 * Function called by DRM code called with vbl_lock held.
1905 */
1906u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
1907{
1908	return 0;
1909}
1910
1911/**
1912 * Function called by DRM code called with vbl_lock held.
1913 */
1914int vmw_enable_vblank(struct drm_crtc *crtc)
1915{
1916	return -EINVAL;
1917}
1918
1919/**
1920 * Function called by DRM code called with vbl_lock held.
1921 */
1922void vmw_disable_vblank(struct drm_crtc *crtc)
1923{
1924}
1925
1926/**
1927 * vmw_du_update_layout - Update the display unit with topology from resolution
1928 * plugin and generate DRM uevent
1929 * @dev_priv: device private
1930 * @num_rects: number of drm_rect in rects
1931 * @rects: toplogy to update
1932 */
1933static int vmw_du_update_layout(struct vmw_private *dev_priv,
1934				unsigned int num_rects, struct drm_rect *rects)
1935{
1936	struct drm_device *dev = dev_priv->dev;
1937	struct vmw_display_unit *du;
1938	struct drm_connector *con;
1939	struct drm_connector_list_iter conn_iter;
1940	struct drm_modeset_acquire_ctx ctx;
1941	struct drm_crtc *crtc;
1942	int ret;
1943
1944	/* Currently gui_x/y is protected with the crtc mutex */
1945	mutex_lock(&dev->mode_config.mutex);
1946	drm_modeset_acquire_init(&ctx, 0);
1947retry:
1948	drm_for_each_crtc(crtc, dev) {
1949		ret = drm_modeset_lock(&crtc->mutex, &ctx);
1950		if (ret < 0) {
1951			if (ret == -EDEADLK) {
1952				drm_modeset_backoff(&ctx);
1953				goto retry;
1954      		}
1955			goto out_fini;
1956		}
1957	}
1958
1959	drm_connector_list_iter_begin(dev, &conn_iter);
1960	drm_for_each_connector_iter(con, &conn_iter) {
1961		du = vmw_connector_to_du(con);
1962		if (num_rects > du->unit) {
1963			du->pref_width = drm_rect_width(&rects[du->unit]);
1964			du->pref_height = drm_rect_height(&rects[du->unit]);
1965			du->pref_active = true;
1966			du->gui_x = rects[du->unit].x1;
1967			du->gui_y = rects[du->unit].y1;
1968		} else {
1969			du->pref_width = 800;
1970			du->pref_height = 600;
1971			du->pref_active = false;
1972			du->gui_x = 0;
1973			du->gui_y = 0;
1974		}
1975	}
1976	drm_connector_list_iter_end(&conn_iter);
1977
1978	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1979		du = vmw_connector_to_du(con);
1980		if (num_rects > du->unit) {
1981			drm_object_property_set_value
1982			  (&con->base, dev->mode_config.suggested_x_property,
1983			   du->gui_x);
1984			drm_object_property_set_value
1985			  (&con->base, dev->mode_config.suggested_y_property,
1986			   du->gui_y);
1987		} else {
1988			drm_object_property_set_value
1989			  (&con->base, dev->mode_config.suggested_x_property,
1990			   0);
1991			drm_object_property_set_value
1992			  (&con->base, dev->mode_config.suggested_y_property,
1993			   0);
1994		}
1995		con->status = vmw_du_connector_detect(con, true);
1996	}
1997
1998	drm_sysfs_hotplug_event(dev);
1999out_fini:
2000	drm_modeset_drop_locks(&ctx);
2001	drm_modeset_acquire_fini(&ctx);
2002	mutex_unlock(&dev->mode_config.mutex);
2003
2004	return 0;
2005}
2006
2007int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2008			  u16 *r, u16 *g, u16 *b,
2009			  uint32_t size,
2010			  struct drm_modeset_acquire_ctx *ctx)
2011{
2012	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2013	int i;
2014
2015	for (i = 0; i < size; i++) {
2016		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2017			  r[i], g[i], b[i]);
2018		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2019		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2020		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2021	}
2022
2023	return 0;
2024}
2025
2026int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2027{
2028	return 0;
2029}
2030
2031enum drm_connector_status
2032vmw_du_connector_detect(struct drm_connector *connector, bool force)
2033{
2034	uint32_t num_displays;
2035	struct drm_device *dev = connector->dev;
2036	struct vmw_private *dev_priv = vmw_priv(dev);
2037	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2038
2039	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2040
2041	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2042		 du->pref_active) ?
2043		connector_status_connected : connector_status_disconnected);
2044}
2045
2046static struct drm_display_mode vmw_kms_connector_builtin[] = {
2047	/* 640x480@60Hz */
2048	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2049		   752, 800, 0, 480, 489, 492, 525, 0,
2050		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2051	/* 800x600@60Hz */
2052	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2053		   968, 1056, 0, 600, 601, 605, 628, 0,
2054		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2055	/* 1024x768@60Hz */
2056	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2057		   1184, 1344, 0, 768, 771, 777, 806, 0,
2058		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2059	/* 1152x864@75Hz */
2060	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2061		   1344, 1600, 0, 864, 865, 868, 900, 0,
2062		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2063	/* 1280x768@60Hz */
2064	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2065		   1472, 1664, 0, 768, 771, 778, 798, 0,
2066		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2067	/* 1280x800@60Hz */
2068	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2069		   1480, 1680, 0, 800, 803, 809, 831, 0,
2070		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2071	/* 1280x960@60Hz */
2072	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2073		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2074		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2075	/* 1280x1024@60Hz */
2076	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2077		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2078		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2079	/* 1360x768@60Hz */
2080	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2081		   1536, 1792, 0, 768, 771, 777, 795, 0,
2082		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2083	/* 1440x1050@60Hz */
2084	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2085		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2086		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2087	/* 1440x900@60Hz */
2088	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2089		   1672, 1904, 0, 900, 903, 909, 934, 0,
2090		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2091	/* 1600x1200@60Hz */
2092	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2093		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2094		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2095	/* 1680x1050@60Hz */
2096	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2097		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2098		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2099	/* 1792x1344@60Hz */
2100	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2101		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2102		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2103	/* 1853x1392@60Hz */
2104	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2105		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2106		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2107	/* 1920x1200@60Hz */
2108	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2109		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2110		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2111	/* 1920x1440@60Hz */
2112	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2113		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2114		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2115	/* 2560x1600@60Hz */
2116	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2117		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2118		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2119	/* Terminate */
2120	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2121};
2122
2123/**
2124 * vmw_guess_mode_timing - Provide fake timings for a
2125 * 60Hz vrefresh mode.
2126 *
2127 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
2128 * members filled in.
2129 */
2130void vmw_guess_mode_timing(struct drm_display_mode *mode)
2131{
2132	mode->hsync_start = mode->hdisplay + 50;
2133	mode->hsync_end = mode->hsync_start + 50;
2134	mode->htotal = mode->hsync_end + 50;
2135
2136	mode->vsync_start = mode->vdisplay + 50;
2137	mode->vsync_end = mode->vsync_start + 50;
2138	mode->vtotal = mode->vsync_end + 50;
2139
2140	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2141}
2142
2143
2144int vmw_du_connector_fill_modes(struct drm_connector *connector,
2145				uint32_t max_width, uint32_t max_height)
2146{
2147	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2148	struct drm_device *dev = connector->dev;
2149	struct vmw_private *dev_priv = vmw_priv(dev);
2150	struct drm_display_mode *mode = NULL;
2151	struct drm_display_mode *bmode;
2152	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2153		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2154		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2155		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2156	};
2157	int i;
2158	u32 assumed_bpp = 4;
2159
2160	if (dev_priv->assume_16bpp)
2161		assumed_bpp = 2;
2162
2163	max_width  = min(max_width,  dev_priv->texture_max_width);
2164	max_height = min(max_height, dev_priv->texture_max_height);
2165
2166	/*
2167	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2168	 * HEIGHT registers.
2169	 */
2170	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2171		max_width  = min(max_width,  dev_priv->stdu_max_width);
2172		max_height = min(max_height, dev_priv->stdu_max_height);
2173	}
2174
2175	/* Add preferred mode */
2176	mode = drm_mode_duplicate(dev, &prefmode);
2177	if (!mode)
2178		return 0;
2179	mode->hdisplay = du->pref_width;
2180	mode->vdisplay = du->pref_height;
2181	vmw_guess_mode_timing(mode);
2182
2183	if (vmw_kms_validate_mode_vram(dev_priv,
2184					mode->hdisplay * assumed_bpp,
2185					mode->vdisplay)) {
2186		drm_mode_probed_add(connector, mode);
2187	} else {
2188		drm_mode_destroy(dev, mode);
2189		mode = NULL;
2190	}
2191
2192	if (du->pref_mode) {
2193		list_del_init(&du->pref_mode->head);
2194		drm_mode_destroy(dev, du->pref_mode);
2195	}
2196
2197	/* mode might be null here, this is intended */
2198	du->pref_mode = mode;
2199
2200	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2201		bmode = &vmw_kms_connector_builtin[i];
2202		if (bmode->hdisplay > max_width ||
2203		    bmode->vdisplay > max_height)
2204			continue;
2205
2206		if (!vmw_kms_validate_mode_vram(dev_priv,
2207						bmode->hdisplay * assumed_bpp,
2208						bmode->vdisplay))
2209			continue;
2210
2211		mode = drm_mode_duplicate(dev, bmode);
2212		if (!mode)
2213			return 0;
2214
2215		drm_mode_probed_add(connector, mode);
2216	}
2217
2218	drm_connector_list_update(connector);
2219	/* Move the prefered mode first, help apps pick the right mode. */
2220	drm_mode_sort(&connector->modes);
2221
2222	return 1;
2223}
2224
2225/**
2226 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2227 * @dev: drm device for the ioctl
2228 * @data: data pointer for the ioctl
2229 * @file_priv: drm file for the ioctl call
2230 *
2231 * Update preferred topology of display unit as per ioctl request. The topology
2232 * is expressed as array of drm_vmw_rect.
2233 * e.g.
2234 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2235 *
2236 * NOTE:
2237 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2238 * device limit on topology, x + w and y + h (lower right) cannot be greater
2239 * than INT_MAX. So topology beyond these limits will return with error.
2240 *
2241 * Returns:
2242 * Zero on success, negative errno on failure.
2243 */
2244int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2245				struct drm_file *file_priv)
2246{
2247	struct vmw_private *dev_priv = vmw_priv(dev);
2248	struct drm_mode_config *mode_config = &dev->mode_config;
2249	struct drm_vmw_update_layout_arg *arg =
2250		(struct drm_vmw_update_layout_arg *)data;
2251	void __user *user_rects;
2252	struct drm_vmw_rect *rects;
2253	struct drm_rect *drm_rects;
2254	unsigned rects_size;
2255	int ret, i;
2256
2257	if (!arg->num_outputs) {
2258		struct drm_rect def_rect = {0, 0, 800, 600};
2259		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2260			      def_rect.x1, def_rect.y1,
2261			      def_rect.x2, def_rect.y2);
2262		vmw_du_update_layout(dev_priv, 1, &def_rect);
2263		return 0;
2264	}
2265
2266	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2267	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2268			GFP_KERNEL);
2269	if (unlikely(!rects))
2270		return -ENOMEM;
2271
2272	user_rects = (void __user *)(unsigned long)arg->rects;
2273	ret = copy_from_user(rects, user_rects, rects_size);
2274	if (unlikely(ret != 0)) {
2275		DRM_ERROR("Failed to get rects.\n");
2276		ret = -EFAULT;
2277		goto out_free;
2278	}
2279
2280	drm_rects = (struct drm_rect *)rects;
2281
2282	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2283	for (i = 0; i < arg->num_outputs; i++) {
2284		struct drm_vmw_rect curr_rect;
2285
2286		/* Verify user-space for overflow as kernel use drm_rect */
2287		if ((rects[i].x + rects[i].w > INT_MAX) ||
2288		    (rects[i].y + rects[i].h > INT_MAX)) {
2289			ret = -ERANGE;
2290			goto out_free;
2291		}
2292
2293		curr_rect = rects[i];
2294		drm_rects[i].x1 = curr_rect.x;
2295		drm_rects[i].y1 = curr_rect.y;
2296		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2297		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2298
2299		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2300			      drm_rects[i].x1, drm_rects[i].y1,
2301			      drm_rects[i].x2, drm_rects[i].y2);
2302
2303		/*
2304		 * Currently this check is limiting the topology within
2305		 * mode_config->max (which actually is max texture size
2306		 * supported by virtual device). This limit is here to address
2307		 * window managers that create a big framebuffer for whole
2308		 * topology.
2309		 */
2310		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2311		    drm_rects[i].x2 > mode_config->max_width ||
2312		    drm_rects[i].y2 > mode_config->max_height) {
2313			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2314				      drm_rects[i].x1, drm_rects[i].y1,
2315				      drm_rects[i].x2, drm_rects[i].y2);
2316			ret = -EINVAL;
2317			goto out_free;
2318		}
2319	}
2320
2321	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2322
2323	if (ret == 0)
2324		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2325
2326out_free:
2327	kfree(rects);
2328	return ret;
2329}
2330
2331/**
2332 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2333 * on a set of cliprects and a set of display units.
2334 *
2335 * @dev_priv: Pointer to a device private structure.
2336 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2337 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2338 * Cliprects are given in framebuffer coordinates.
2339 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2340 * be NULL. Cliprects are given in source coordinates.
2341 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2342 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2343 * @num_clips: Number of cliprects in the @clips or @vclips array.
2344 * @increment: Integer with which to increment the clip counter when looping.
2345 * Used to skip a predetermined number of clip rects.
2346 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2347 */
2348int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2349			 struct vmw_framebuffer *framebuffer,
2350			 const struct drm_clip_rect *clips,
2351			 const struct drm_vmw_rect *vclips,
2352			 s32 dest_x, s32 dest_y,
2353			 int num_clips,
2354			 int increment,
2355			 struct vmw_kms_dirty *dirty)
2356{
2357	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2358	struct drm_crtc *crtc;
2359	u32 num_units = 0;
2360	u32 i, k;
2361
2362	dirty->dev_priv = dev_priv;
2363
2364	/* If crtc is passed, no need to iterate over other display units */
2365	if (dirty->crtc) {
2366		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2367	} else {
2368		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
2369				    head) {
2370			struct drm_plane *plane = crtc->primary;
2371
2372			if (plane->state->fb == &framebuffer->base)
2373				units[num_units++] = vmw_crtc_to_du(crtc);
2374		}
2375	}
2376
2377	for (k = 0; k < num_units; k++) {
2378		struct vmw_display_unit *unit = units[k];
2379		s32 crtc_x = unit->crtc.x;
2380		s32 crtc_y = unit->crtc.y;
2381		s32 crtc_width = unit->crtc.mode.hdisplay;
2382		s32 crtc_height = unit->crtc.mode.vdisplay;
2383		const struct drm_clip_rect *clips_ptr = clips;
2384		const struct drm_vmw_rect *vclips_ptr = vclips;
2385
2386		dirty->unit = unit;
2387		if (dirty->fifo_reserve_size > 0) {
2388			dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
2389						      dirty->fifo_reserve_size);
2390			if (!dirty->cmd)
2391				return -ENOMEM;
2392
2393			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2394		}
2395		dirty->num_hits = 0;
2396		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2397		       vclips_ptr += increment) {
2398			s32 clip_left;
2399			s32 clip_top;
2400
2401			/*
2402			 * Select clip array type. Note that integer type
2403			 * in @clips is unsigned short, whereas in @vclips
2404			 * it's 32-bit.
2405			 */
2406			if (clips) {
2407				dirty->fb_x = (s32) clips_ptr->x1;
2408				dirty->fb_y = (s32) clips_ptr->y1;
2409				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2410					crtc_x;
2411				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2412					crtc_y;
2413			} else {
2414				dirty->fb_x = vclips_ptr->x;
2415				dirty->fb_y = vclips_ptr->y;
2416				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2417					dest_x - crtc_x;
2418				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2419					dest_y - crtc_y;
2420			}
2421
2422			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2423			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2424
2425			/* Skip this clip if it's outside the crtc region */
2426			if (dirty->unit_x1 >= crtc_width ||
2427			    dirty->unit_y1 >= crtc_height ||
2428			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2429				continue;
2430
2431			/* Clip right and bottom to crtc limits */
2432			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2433					       crtc_width);
2434			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2435					       crtc_height);
2436
2437			/* Clip left and top to crtc limits */
2438			clip_left = min_t(s32, dirty->unit_x1, 0);
2439			clip_top = min_t(s32, dirty->unit_y1, 0);
2440			dirty->unit_x1 -= clip_left;
2441			dirty->unit_y1 -= clip_top;
2442			dirty->fb_x -= clip_left;
2443			dirty->fb_y -= clip_top;
2444
2445			dirty->clip(dirty);
2446		}
2447
2448		dirty->fifo_commit(dirty);
2449	}
2450
2451	return 0;
2452}
2453
2454/**
2455 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2456 * cleanup and fencing
2457 * @dev_priv: Pointer to the device-private struct
2458 * @file_priv: Pointer identifying the client when user-space fencing is used
2459 * @ctx: Pointer to the validation context
2460 * @out_fence: If non-NULL, returned refcounted fence-pointer
2461 * @user_fence_rep: If non-NULL, pointer to user-space address area
2462 * in which to copy user-space fence info
2463 */
2464void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2465				      struct drm_file *file_priv,
2466				      struct vmw_validation_context *ctx,
2467				      struct vmw_fence_obj **out_fence,
2468				      struct drm_vmw_fence_rep __user *
2469				      user_fence_rep)
2470{
2471	struct vmw_fence_obj *fence = NULL;
2472	uint32_t handle = 0;
2473	int ret = 0;
2474
2475	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2476	    out_fence)
2477		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2478						 file_priv ? &handle : NULL);
2479	vmw_validation_done(ctx, fence);
2480	if (file_priv)
2481		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2482					    ret, user_fence_rep, fence,
2483					    handle, -1);
2484	if (out_fence)
2485		*out_fence = fence;
2486	else
2487		vmw_fence_obj_unreference(&fence);
2488}
2489
2490/**
2491 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2492 * its backing MOB.
2493 *
2494 * @res: Pointer to the surface resource
2495 * @clips: Clip rects in framebuffer (surface) space.
2496 * @num_clips: Number of clips in @clips.
2497 * @increment: Integer with which to increment the clip counter when looping.
2498 * Used to skip a predetermined number of clip rects.
2499 *
2500 * This function makes sure the proxy surface is updated from its backing MOB
2501 * using the region given by @clips. The surface resource @res and its backing
2502 * MOB needs to be reserved and validated on call.
2503 */
2504int vmw_kms_update_proxy(struct vmw_resource *res,
2505			 const struct drm_clip_rect *clips,
2506			 unsigned num_clips,
2507			 int increment)
2508{
2509	struct vmw_private *dev_priv = res->dev_priv;
2510	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2511	struct {
2512		SVGA3dCmdHeader header;
2513		SVGA3dCmdUpdateGBImage body;
2514	} *cmd;
2515	SVGA3dBox *box;
2516	size_t copy_size = 0;
2517	int i;
2518
2519	if (!clips)
2520		return 0;
2521
2522	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2523	if (!cmd)
2524		return -ENOMEM;
2525
2526	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2527		box = &cmd->body.box;
2528
2529		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2530		cmd->header.size = sizeof(cmd->body);
2531		cmd->body.image.sid = res->id;
2532		cmd->body.image.face = 0;
2533		cmd->body.image.mipmap = 0;
2534
2535		if (clips->x1 > size->width || clips->x2 > size->width ||
2536		    clips->y1 > size->height || clips->y2 > size->height) {
2537			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2538			return -EINVAL;
2539		}
2540
2541		box->x = clips->x1;
2542		box->y = clips->y1;
2543		box->z = 0;
2544		box->w = clips->x2 - clips->x1;
2545		box->h = clips->y2 - clips->y1;
2546		box->d = 1;
2547
2548		copy_size += sizeof(*cmd);
2549	}
2550
2551	vmw_fifo_commit(dev_priv, copy_size);
2552
2553	return 0;
2554}
2555
2556int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2557			    unsigned unit,
2558			    u32 max_width,
2559			    u32 max_height,
2560			    struct drm_connector **p_con,
2561			    struct drm_crtc **p_crtc,
2562			    struct drm_display_mode **p_mode)
2563{
2564	struct drm_connector *con;
2565	struct vmw_display_unit *du;
2566	struct drm_display_mode *mode;
2567	int i = 0;
2568	int ret = 0;
2569
2570	mutex_lock(&dev_priv->dev->mode_config.mutex);
2571	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2572			    head) {
2573		if (i == unit)
2574			break;
2575
2576		++i;
2577	}
2578
2579	if (&con->head == &dev_priv->dev->mode_config.connector_list) {
2580		DRM_ERROR("Could not find initial display unit.\n");
2581		ret = -EINVAL;
2582		goto out_unlock;
2583	}
2584
2585	if (list_empty(&con->modes))
2586		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2587
2588	if (list_empty(&con->modes)) {
2589		DRM_ERROR("Could not find initial display mode.\n");
2590		ret = -EINVAL;
2591		goto out_unlock;
2592	}
2593
2594	du = vmw_connector_to_du(con);
2595	*p_con = con;
2596	*p_crtc = &du->crtc;
2597
2598	list_for_each_entry(mode, &con->modes, head) {
2599		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2600			break;
2601	}
2602
2603	if (&mode->head == &con->modes) {
2604		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2605		*p_mode = list_first_entry(&con->modes,
2606					   struct drm_display_mode,
2607					   head);
2608	} else {
2609		*p_mode = mode;
2610	}
2611
2612 out_unlock:
2613	mutex_unlock(&dev_priv->dev->mode_config.mutex);
2614
2615	return ret;
2616}
2617
2618/**
2619 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2620 * property.
2621 *
2622 * @dev_priv: Pointer to a device private struct.
2623 *
2624 * Sets up the implicit placement property unless it's already set up.
2625 */
2626void
2627vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2628{
2629	if (dev_priv->implicit_placement_property)
2630		return;
2631
2632	dev_priv->implicit_placement_property =
2633		drm_property_create_range(dev_priv->dev,
2634					  DRM_MODE_PROP_IMMUTABLE,
2635					  "implicit_placement", 0, 1);
2636}
2637
2638/**
2639 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2640 *
2641 * @dev: Pointer to the drm device
2642 * Return: 0 on success. Negative error code on failure.
2643 */
2644int vmw_kms_suspend(struct drm_device *dev)
2645{
2646	struct vmw_private *dev_priv = vmw_priv(dev);
2647
2648	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2649	if (IS_ERR(dev_priv->suspend_state)) {
2650		int ret = PTR_ERR(dev_priv->suspend_state);
2651
2652		DRM_ERROR("Failed kms suspend: %d\n", ret);
2653		dev_priv->suspend_state = NULL;
2654
2655		return ret;
2656	}
2657
2658	return 0;
2659}
2660
2661
2662/**
2663 * vmw_kms_resume - Re-enable modesetting and restore state
2664 *
2665 * @dev: Pointer to the drm device
2666 * Return: 0 on success. Negative error code on failure.
2667 *
2668 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2669 * to call this function without a previous vmw_kms_suspend().
2670 */
2671int vmw_kms_resume(struct drm_device *dev)
2672{
2673	struct vmw_private *dev_priv = vmw_priv(dev);
2674	int ret;
2675
2676	if (WARN_ON(!dev_priv->suspend_state))
2677		return 0;
2678
2679	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2680	dev_priv->suspend_state = NULL;
2681
2682	return ret;
2683}
2684
2685/**
2686 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2687 *
2688 * @dev: Pointer to the drm device
2689 */
2690void vmw_kms_lost_device(struct drm_device *dev)
2691{
2692	drm_atomic_helper_shutdown(dev);
2693}
2694
2695/**
2696 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2697 * @update: The closure structure.
2698 *
2699 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2700 * update on display unit.
2701 *
2702 * Return: 0 on success or a negative error code on failure.
2703 */
2704int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2705{
2706	struct drm_plane_state *state = update->plane->state;
2707	struct drm_plane_state *old_state = update->old_state;
2708	struct drm_atomic_helper_damage_iter iter;
2709	struct drm_rect clip;
2710	struct drm_rect bb;
2711	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2712	uint32_t reserved_size = 0;
2713	uint32_t submit_size = 0;
2714	uint32_t curr_size = 0;
2715	uint32_t num_hits = 0;
2716	void *cmd_start;
2717	char *cmd_next;
2718	int ret;
2719
2720	/*
2721	 * Iterate in advance to check if really need plane update and find the
2722	 * number of clips that actually are in plane src for fifo allocation.
2723	 */
2724	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2725	drm_atomic_for_each_plane_damage(&iter, &clip)
2726		num_hits++;
2727
2728	if (num_hits == 0)
2729		return 0;
2730
2731	if (update->vfb->bo) {
2732		struct vmw_framebuffer_bo *vfbbo =
2733			container_of(update->vfb, typeof(*vfbbo), base);
2734
2735		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
2736					    update->cpu_blit);
2737	} else {
2738		struct vmw_framebuffer_surface *vfbs =
2739			container_of(update->vfb, typeof(*vfbs), base);
2740
2741		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2742						  0, VMW_RES_DIRTY_NONE, NULL,
2743						  NULL);
2744	}
2745
2746	if (ret)
2747		return ret;
2748
2749	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2750	if (ret)
2751		goto out_unref;
2752
2753	reserved_size = update->calc_fifo_size(update, num_hits);
2754	cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
2755	if (!cmd_start) {
2756		ret = -ENOMEM;
2757		goto out_revert;
2758	}
2759
2760	cmd_next = cmd_start;
2761
2762	if (update->post_prepare) {
2763		curr_size = update->post_prepare(update, cmd_next);
2764		cmd_next += curr_size;
2765		submit_size += curr_size;
2766	}
2767
2768	if (update->pre_clip) {
2769		curr_size = update->pre_clip(update, cmd_next, num_hits);
2770		cmd_next += curr_size;
2771		submit_size += curr_size;
2772	}
2773
2774	bb.x1 = INT_MAX;
2775	bb.y1 = INT_MAX;
2776	bb.x2 = INT_MIN;
2777	bb.y2 = INT_MIN;
2778
2779	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2780	drm_atomic_for_each_plane_damage(&iter, &clip) {
2781		uint32_t fb_x = clip.x1;
2782		uint32_t fb_y = clip.y1;
2783
2784		vmw_du_translate_to_crtc(state, &clip);
2785		if (update->clip) {
2786			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2787						 fb_y);
2788			cmd_next += curr_size;
2789			submit_size += curr_size;
2790		}
2791		bb.x1 = min_t(int, bb.x1, clip.x1);
2792		bb.y1 = min_t(int, bb.y1, clip.y1);
2793		bb.x2 = max_t(int, bb.x2, clip.x2);
2794		bb.y2 = max_t(int, bb.y2, clip.y2);
2795	}
2796
2797	curr_size = update->post_clip(update, cmd_next, &bb);
2798	submit_size += curr_size;
2799
2800	if (reserved_size < submit_size)
2801		submit_size = 0;
2802
2803	vmw_fifo_commit(update->dev_priv, submit_size);
2804
2805	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2806					 update->out_fence, NULL);
2807	return ret;
2808
2809out_revert:
2810	vmw_validation_revert(&val_ctx);
2811
2812out_unref:
2813	vmw_validation_unref_lists(&val_ctx);
2814	return ret;
2815}
2816