1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include "vmwgfx_kms.h"
28
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38
vmw_du_cleanup(struct vmw_display_unit *du)39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 drm_plane_cleanup(&du->primary);
43 if (vmw_cmd_supported(dev_priv))
44 drm_plane_cleanup(&du->cursor.base);
45
46 drm_connector_unregister(&du->connector);
47 drm_crtc_cleanup(&du->crtc);
48 drm_encoder_cleanup(&du->encoder);
49 drm_connector_cleanup(&du->connector);
50 }
51
52 /*
53 * Display Unit Cursor functions
54 */
55
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58 struct vmw_plane_state *vps,
59 u32 *image, u32 width, u32 height,
60 u32 hotspotX, u32 hotspotY);
61
62 struct vmw_svga_fifo_cmd_define_cursor {
63 u32 cmd;
64 SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66
67 /**
68 * vmw_send_define_cursor_cmd - queue a define cursor command
69 * @dev_priv: the private driver struct
70 * @image: buffer which holds the cursor image
71 * @width: width of the mouse cursor image
72 * @height: height of the mouse cursor image
73 * @hotspotX: the horizontal position of mouse hotspot
74 * @hotspotY: the vertical position of mouse hotspot
75 */
vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, u32 *image, u32 width, u32 height, u32 hotspotX, u32 hotspotY)76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77 u32 *image, u32 width, u32 height,
78 u32 hotspotX, u32 hotspotY)
79 {
80 struct vmw_svga_fifo_cmd_define_cursor *cmd;
81 const u32 image_size = width * height * sizeof(*image);
82 const u32 cmd_size = sizeof(*cmd) + image_size;
83
84 /* Try to reserve fifocmd space and swallow any failures;
85 such reservations cannot be left unconsumed for long
86 under the risk of clogging other fifocmd users, so
87 we treat reservations separtely from the way we treat
88 other fallible KMS-atomic resources at prepare_fb */
89 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90
91 if (unlikely(!cmd))
92 return;
93
94 memset(cmd, 0, sizeof(*cmd));
95
96 memcpy(&cmd[1], image, image_size);
97
98 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99 cmd->cursor.id = 0;
100 cmd->cursor.width = width;
101 cmd->cursor.height = height;
102 cmd->cursor.hotspotX = hotspotX;
103 cmd->cursor.hotspotY = hotspotY;
104
105 vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107
108 /**
109 * vmw_cursor_update_image - update the cursor image on the provided plane
110 * @dev_priv: the private driver struct
111 * @vps: the plane state of the cursor plane
112 * @image: buffer which holds the cursor image
113 * @width: width of the mouse cursor image
114 * @height: height of the mouse cursor image
115 * @hotspotX: the horizontal position of mouse hotspot
116 * @hotspotY: the vertical position of mouse hotspot
117 */
vmw_cursor_update_image(struct vmw_private *dev_priv, struct vmw_plane_state *vps, u32 *image, u32 width, u32 height, u32 hotspotX, u32 hotspotY)118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119 struct vmw_plane_state *vps,
120 u32 *image, u32 width, u32 height,
121 u32 hotspotX, u32 hotspotY)
122 {
123 if (vps->cursor.bo)
124 vmw_cursor_update_mob(dev_priv, vps, image,
125 vps->base.crtc_w, vps->base.crtc_h,
126 hotspotX, hotspotY);
127
128 else
129 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
130 hotspotX, hotspotY);
131 }
132
133
134 /**
135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
136 *
137 * Called from inside vmw_du_cursor_plane_atomic_update to actually
138 * make the cursor-image live.
139 *
140 * @dev_priv: device to work with
141 * @vps: the plane state of the cursor plane
142 * @image: cursor source data to fill the MOB with
143 * @width: source data width
144 * @height: source data height
145 * @hotspotX: cursor hotspot x
146 * @hotspotY: cursor hotspot Y
147 */
vmw_cursor_update_mob(struct vmw_private *dev_priv, struct vmw_plane_state *vps, u32 *image, u32 width, u32 height, u32 hotspotX, u32 hotspotY)148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149 struct vmw_plane_state *vps,
150 u32 *image, u32 width, u32 height,
151 u32 hotspotX, u32 hotspotY)
152 {
153 SVGAGBCursorHeader *header;
154 SVGAGBAlphaCursorHeader *alpha_header;
155 const u32 image_size = width * height * sizeof(*image);
156
157 header = vmw_bo_map_and_cache(vps->cursor.bo);
158 alpha_header = &header->header.alphaHeader;
159
160 memset(header, 0, sizeof(*header));
161
162 header->type = SVGA_ALPHA_CURSOR;
163 header->sizeInBytes = image_size;
164
165 alpha_header->hotspotX = hotspotX;
166 alpha_header->hotspotY = hotspotY;
167 alpha_header->width = width;
168 alpha_header->height = height;
169
170 memcpy(header + 1, image, image_size);
171 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 vps->cursor.bo->tbo.resource->start);
173 }
174
175
vmw_du_cursor_mob_size(u32 w, u32 h)176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180
181 /**
182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183 * @vps: cursor plane state
184 */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187 if (vps->surf) {
188 if (vps->surf_mapped)
189 return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
190 return vps->surf->snooper.image;
191 } else if (vps->bo)
192 return vmw_bo_map_and_cache(vps->bo);
193 return NULL;
194 }
195
vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, struct vmw_plane_state *new_vps)196 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
197 struct vmw_plane_state *new_vps)
198 {
199 void *old_image;
200 void *new_image;
201 u32 size;
202 bool changed;
203
204 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
205 old_vps->base.crtc_h != new_vps->base.crtc_h)
206 return true;
207
208 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
209 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
210 return true;
211
212 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
213
214 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
215 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
216
217 changed = false;
218 if (old_image && new_image)
219 changed = memcmp(old_image, new_image, size) != 0;
220
221 return changed;
222 }
223
vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)224 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
225 {
226 if (!(*vbo))
227 return;
228
229 ttm_bo_unpin(&(*vbo)->tbo);
230 vmw_bo_unreference(vbo);
231 }
232
vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, struct vmw_plane_state *vps)233 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
234 struct vmw_plane_state *vps)
235 {
236 u32 i;
237
238 if (!vps->cursor.bo)
239 return;
240
241 vmw_du_cursor_plane_unmap_cm(vps);
242
243 /* Look for a free slot to return this mob to the cache. */
244 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
245 if (!vcp->cursor_mobs[i]) {
246 vcp->cursor_mobs[i] = vps->cursor.bo;
247 vps->cursor.bo = NULL;
248 return;
249 }
250 }
251
252 /* Cache is full: See if this mob is bigger than an existing mob. */
253 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
254 if (vcp->cursor_mobs[i]->tbo.base.size <
255 vps->cursor.bo->tbo.base.size) {
256 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
257 vcp->cursor_mobs[i] = vps->cursor.bo;
258 vps->cursor.bo = NULL;
259 return;
260 }
261 }
262
263 /* Destroy it if it's not worth caching. */
264 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
265 }
266
vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, struct vmw_plane_state *vps)267 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
268 struct vmw_plane_state *vps)
269 {
270 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
271 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
272 u32 i;
273 u32 cursor_max_dim, mob_max_size;
274 struct vmw_fence_obj *fence = NULL;
275 int ret;
276
277 if (!dev_priv->has_mob ||
278 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
279 return -EINVAL;
280
281 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
283
284 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285 vps->base.crtc_h > cursor_max_dim)
286 return -EINVAL;
287
288 if (vps->cursor.bo) {
289 if (vps->cursor.bo->tbo.base.size >= size)
290 return 0;
291 vmw_du_put_cursor_mob(vcp, vps);
292 }
293
294 /* Look for an unused mob in the cache. */
295 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296 if (vcp->cursor_mobs[i] &&
297 vcp->cursor_mobs[i]->tbo.base.size >= size) {
298 vps->cursor.bo = vcp->cursor_mobs[i];
299 vcp->cursor_mobs[i] = NULL;
300 return 0;
301 }
302 }
303 /* Create a new mob if we can't find an existing one. */
304 ret = vmw_bo_create_and_populate(dev_priv, size,
305 VMW_BO_DOMAIN_MOB,
306 &vps->cursor.bo);
307
308 if (ret != 0)
309 return ret;
310
311 /* Fence the mob creation so we are guarateed to have the mob */
312 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
313 if (ret != 0)
314 goto teardown;
315
316 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
317 if (ret != 0) {
318 ttm_bo_unreserve(&vps->cursor.bo->tbo);
319 goto teardown;
320 }
321
322 dma_fence_wait(&fence->base, false);
323 dma_fence_put(&fence->base);
324
325 ttm_bo_unreserve(&vps->cursor.bo->tbo);
326 return 0;
327
328 teardown:
329 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
330 return ret;
331 }
332
333
vmw_cursor_update_position(struct vmw_private *dev_priv, bool show, int x, int y)334 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
335 bool show, int x, int y)
336 {
337 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
338 : SVGA_CURSOR_ON_HIDE;
339 uint32_t count;
340
341 spin_lock(&dev_priv->cursor_lock);
342 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
343 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
344 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
345 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
346 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
347 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
348 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
349 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
350 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
351 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
352 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
353 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
354 } else {
355 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
356 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
357 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
358 }
359 spin_unlock(&dev_priv->cursor_lock);
360 }
361
vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, SVGA3dCmdHeader *header)362 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
363 struct ttm_object_file *tfile,
364 struct ttm_buffer_object *bo,
365 SVGA3dCmdHeader *header)
366 {
367 struct ttm_bo_kmap_obj map;
368 unsigned long kmap_offset;
369 unsigned long kmap_num;
370 SVGA3dCopyBox *box;
371 unsigned box_count;
372 void *virtual;
373 bool is_iomem;
374 struct vmw_dma_cmd {
375 SVGA3dCmdHeader header;
376 SVGA3dCmdSurfaceDMA dma;
377 } *cmd;
378 int i, ret;
379 const struct SVGA3dSurfaceDesc *desc =
380 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
381 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
382
383 cmd = container_of(header, struct vmw_dma_cmd, header);
384
385 /* No snooper installed, nothing to copy */
386 if (!srf->snooper.image)
387 return;
388
389 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
390 DRM_ERROR("face and mipmap for cursors should never != 0\n");
391 return;
392 }
393
394 if (cmd->header.size < 64) {
395 DRM_ERROR("at least one full copy box must be given\n");
396 return;
397 }
398
399 box = (SVGA3dCopyBox *)&cmd[1];
400 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
401 sizeof(SVGA3dCopyBox);
402
403 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
404 box->x != 0 || box->y != 0 || box->z != 0 ||
405 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
406 box->d != 1 || box_count != 1 ||
407 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
408 /* TODO handle none page aligned offsets */
409 /* TODO handle more dst & src != 0 */
410 /* TODO handle more then one copy */
411 DRM_ERROR("Can't snoop dma request for cursor!\n");
412 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
413 box->srcx, box->srcy, box->srcz,
414 box->x, box->y, box->z,
415 box->w, box->h, box->d, box_count,
416 cmd->dma.guest.ptr.offset);
417 return;
418 }
419
420 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
421 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
422
423 ret = ttm_bo_reserve(bo, true, false, NULL);
424 if (unlikely(ret != 0)) {
425 DRM_ERROR("reserve failed\n");
426 return;
427 }
428
429 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
430 if (unlikely(ret != 0))
431 goto err_unreserve;
432
433 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
434
435 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
436 memcpy(srf->snooper.image, virtual,
437 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
438 } else {
439 /* Image is unsigned pointer. */
440 for (i = 0; i < box->h; i++)
441 memcpy(srf->snooper.image + i * image_pitch,
442 virtual + i * cmd->dma.guest.pitch,
443 box->w * desc->pitchBytesPerBlock);
444 }
445
446 srf->snooper.age++;
447
448 ttm_bo_kunmap(&map);
449 err_unreserve:
450 ttm_bo_unreserve(bo);
451 }
452
453 /**
454 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
455 *
456 * @dev_priv: Pointer to the device private struct.
457 *
458 * Clears all legacy hotspots.
459 */
vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)460 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
461 {
462 struct drm_device *dev = &dev_priv->drm;
463 struct vmw_display_unit *du;
464 struct drm_crtc *crtc;
465
466 drm_modeset_lock_all(dev);
467 drm_for_each_crtc(crtc, dev) {
468 du = vmw_crtc_to_du(crtc);
469
470 du->hotspot_x = 0;
471 du->hotspot_y = 0;
472 }
473 drm_modeset_unlock_all(dev);
474 }
475
vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)476 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
477 {
478 struct drm_device *dev = &dev_priv->drm;
479 struct vmw_display_unit *du;
480 struct drm_crtc *crtc;
481
482 mutex_lock(&dev->mode_config.mutex);
483
484 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
485 du = vmw_crtc_to_du(crtc);
486 if (!du->cursor_surface ||
487 du->cursor_age == du->cursor_surface->snooper.age ||
488 !du->cursor_surface->snooper.image)
489 continue;
490
491 du->cursor_age = du->cursor_surface->snooper.age;
492 vmw_send_define_cursor_cmd(dev_priv,
493 du->cursor_surface->snooper.image,
494 VMW_CURSOR_SNOOP_WIDTH,
495 VMW_CURSOR_SNOOP_HEIGHT,
496 du->hotspot_x + du->core_hotspot_x,
497 du->hotspot_y + du->core_hotspot_y);
498 }
499
500 mutex_unlock(&dev->mode_config.mutex);
501 }
502
503
vmw_du_cursor_plane_destroy(struct drm_plane *plane)504 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
505 {
506 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
507 u32 i;
508
509 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
510
511 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
512 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
513
514 drm_plane_cleanup(plane);
515 }
516
517
vmw_du_primary_plane_destroy(struct drm_plane *plane)518 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
519 {
520 drm_plane_cleanup(plane);
521
522 /* Planes are static in our case so we don't free it */
523 }
524
525
526 /**
527 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
528 *
529 * @vps: plane state associated with the display surface
530 * @unreference: true if we also want to unreference the display.
531 */
vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, bool unreference)532 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
533 bool unreference)
534 {
535 if (vps->surf) {
536 if (vps->pinned) {
537 vmw_resource_unpin(&vps->surf->res);
538 vps->pinned--;
539 }
540
541 if (unreference) {
542 if (vps->pinned)
543 DRM_ERROR("Surface still pinned\n");
544 vmw_surface_unreference(&vps->surf);
545 }
546 }
547 }
548
549
550 /**
551 * vmw_du_plane_cleanup_fb - Unpins the plane surface
552 *
553 * @plane: display plane
554 * @old_state: Contains the FB to clean up
555 *
556 * Unpins the framebuffer surface
557 *
558 * Returns 0 on success
559 */
560 void
vmw_du_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)561 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
562 struct drm_plane_state *old_state)
563 {
564 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
565
566 vmw_du_plane_unpin_surf(vps, false);
567 }
568
569
570 /**
571 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
572 *
573 * @vps: plane_state
574 *
575 * Returns 0 on success
576 */
577
578 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)579 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
580 {
581 int ret;
582 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
583 struct ttm_buffer_object *bo;
584
585 if (!vps->cursor.bo)
586 return -EINVAL;
587
588 bo = &vps->cursor.bo->tbo;
589
590 if (bo->base.size < size)
591 return -EINVAL;
592
593 if (vps->cursor.bo->map.virtual)
594 return 0;
595
596 ret = ttm_bo_reserve(bo, false, false, NULL);
597 if (unlikely(ret != 0))
598 return -ENOMEM;
599
600 vmw_bo_map_and_cache(vps->cursor.bo);
601
602 ttm_bo_unreserve(bo);
603
604 if (unlikely(ret != 0))
605 return -ENOMEM;
606
607 return 0;
608 }
609
610
611 /**
612 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
613 *
614 * @vps: state of the cursor plane
615 *
616 * Returns 0 on success
617 */
618
619 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)620 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
621 {
622 int ret = 0;
623 struct vmw_bo *vbo = vps->cursor.bo;
624
625 if (!vbo || !vbo->map.virtual)
626 return 0;
627
628 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
629 if (likely(ret == 0)) {
630 vmw_bo_unmap(vbo);
631 ttm_bo_unreserve(&vbo->tbo);
632 }
633
634 return ret;
635 }
636
637
638 /**
639 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
640 *
641 * @plane: cursor plane
642 * @old_state: contains the state to clean up
643 *
644 * Unmaps all cursor bo mappings and unpins the cursor surface
645 *
646 * Returns 0 on success
647 */
648 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)649 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
650 struct drm_plane_state *old_state)
651 {
652 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
653 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
654
655 if (vps->surf_mapped) {
656 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
657 vps->surf_mapped = false;
658 }
659
660 vmw_du_cursor_plane_unmap_cm(vps);
661 vmw_du_put_cursor_mob(vcp, vps);
662
663 vmw_du_plane_unpin_surf(vps, false);
664
665 if (vps->surf) {
666 vmw_surface_unreference(&vps->surf);
667 vps->surf = NULL;
668 }
669
670 if (vps->bo) {
671 vmw_bo_unreference(&vps->bo);
672 vps->bo = NULL;
673 }
674 }
675
676
677 /**
678 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
679 *
680 * @plane: display plane
681 * @new_state: info on the new plane state, including the FB
682 *
683 * Returns 0 on success
684 */
685 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state)686 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
687 struct drm_plane_state *new_state)
688 {
689 struct drm_framebuffer *fb = new_state->fb;
690 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
691 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
692 int ret = 0;
693
694 if (vps->surf) {
695 if (vps->surf_mapped) {
696 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
697 vps->surf_mapped = false;
698 }
699 vmw_surface_unreference(&vps->surf);
700 vps->surf = NULL;
701 }
702
703 if (vps->bo) {
704 vmw_bo_unreference(&vps->bo);
705 vps->bo = NULL;
706 }
707
708 if (fb) {
709 if (vmw_framebuffer_to_vfb(fb)->bo) {
710 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
711 vmw_bo_reference(vps->bo);
712 } else {
713 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
714 vmw_surface_reference(vps->surf);
715 }
716 }
717
718 if (!vps->surf && vps->bo) {
719 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
720
721 /*
722 * Not using vmw_bo_map_and_cache() helper here as we need to
723 * reserve the ttm_buffer_object first which
724 * vmw_bo_map_and_cache() omits.
725 */
726 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
727
728 if (unlikely(ret != 0))
729 return -ENOMEM;
730
731 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
732
733 ttm_bo_unreserve(&vps->bo->tbo);
734
735 if (unlikely(ret != 0))
736 return -ENOMEM;
737 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
738
739 WARN_ON(vps->surf->snooper.image);
740 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
741 NULL);
742 if (unlikely(ret != 0))
743 return -ENOMEM;
744 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
745 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
746 vps->surf_mapped = true;
747 }
748
749 if (vps->surf || vps->bo) {
750 vmw_du_get_cursor_mob(vcp, vps);
751 vmw_du_cursor_plane_map_cm(vps);
752 }
753
754 return 0;
755 }
756
757
758 void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)759 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
760 struct drm_atomic_state *state)
761 {
762 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
763 plane);
764 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
765 plane);
766 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
767 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
768 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
769 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
770 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
771 s32 hotspot_x, hotspot_y;
772
773 hotspot_x = du->hotspot_x;
774 hotspot_y = du->hotspot_y;
775
776 if (new_state->fb) {
777 hotspot_x += new_state->fb->hot_x;
778 hotspot_y += new_state->fb->hot_y;
779 }
780
781 du->cursor_surface = vps->surf;
782 du->cursor_bo = vps->bo;
783
784 if (!vps->surf && !vps->bo) {
785 vmw_cursor_update_position(dev_priv, false, 0, 0);
786 return;
787 }
788
789 vps->cursor.hotspot_x = hotspot_x;
790 vps->cursor.hotspot_y = hotspot_y;
791
792 if (vps->surf) {
793 du->cursor_age = du->cursor_surface->snooper.age;
794 }
795
796 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
797 /*
798 * If it hasn't changed, avoid making the device do extra
799 * work by keeping the old cursor active.
800 */
801 struct vmw_cursor_plane_state tmp = old_vps->cursor;
802 old_vps->cursor = vps->cursor;
803 vps->cursor = tmp;
804 } else {
805 void *image = vmw_du_cursor_plane_acquire_image(vps);
806 if (image)
807 vmw_cursor_update_image(dev_priv, vps, image,
808 new_state->crtc_w,
809 new_state->crtc_h,
810 hotspot_x, hotspot_y);
811 }
812
813 du->cursor_x = new_state->crtc_x + du->set_gui_x;
814 du->cursor_y = new_state->crtc_y + du->set_gui_y;
815
816 vmw_cursor_update_position(dev_priv, true,
817 du->cursor_x + hotspot_x,
818 du->cursor_y + hotspot_y);
819
820 du->core_hotspot_x = hotspot_x - du->hotspot_x;
821 du->core_hotspot_y = hotspot_y - du->hotspot_y;
822 }
823
824
825 /**
826 * vmw_du_primary_plane_atomic_check - check if the new state is okay
827 *
828 * @plane: display plane
829 * @state: info on the new plane state, including the FB
830 *
831 * Check if the new state is settable given the current state. Other
832 * than what the atomic helper checks, we care about crtc fitting
833 * the FB and maintaining one active framebuffer.
834 *
835 * Returns 0 on success
836 */
vmw_du_primary_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)837 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
838 struct drm_atomic_state *state)
839 {
840 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
841 plane);
842 struct drm_crtc_state *crtc_state = NULL;
843 struct drm_framebuffer *new_fb = new_state->fb;
844 int ret;
845
846 if (new_state->crtc)
847 crtc_state = drm_atomic_get_new_crtc_state(state,
848 new_state->crtc);
849
850 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
851 DRM_PLANE_NO_SCALING,
852 DRM_PLANE_NO_SCALING,
853 false, true);
854
855 if (!ret && new_fb) {
856 struct drm_crtc *crtc = new_state->crtc;
857 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
858
859 vmw_connector_state_to_vcs(du->connector.state);
860 }
861
862
863 return ret;
864 }
865
866
867 /**
868 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
869 *
870 * @plane: cursor plane
871 * @state: info on the new plane state
872 *
873 * This is a chance to fail if the new cursor state does not fit
874 * our requirements.
875 *
876 * Returns 0 on success
877 */
vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)878 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
879 struct drm_atomic_state *state)
880 {
881 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 plane);
883 int ret = 0;
884 struct drm_crtc_state *crtc_state = NULL;
885 struct vmw_surface *surface = NULL;
886 struct drm_framebuffer *fb = new_state->fb;
887
888 if (new_state->crtc)
889 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 new_state->crtc);
891
892 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
893 DRM_PLANE_NO_SCALING,
894 DRM_PLANE_NO_SCALING,
895 true, true);
896 if (ret)
897 return ret;
898
899 /* Turning off */
900 if (!fb)
901 return 0;
902
903 /* A lot of the code assumes this */
904 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 new_state->crtc_w, new_state->crtc_h);
907 return -EINVAL;
908 }
909
910 if (!vmw_framebuffer_to_vfb(fb)->bo) {
911 surface = vmw_framebuffer_to_vfbs(fb)->surface;
912
913 WARN_ON(!surface);
914
915 if (!surface ||
916 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
917 DRM_ERROR("surface not suitable for cursor\n");
918 return -EINVAL;
919 }
920 }
921
922 return 0;
923 }
924
925
vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)926 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
927 struct drm_atomic_state *state)
928 {
929 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
930 crtc);
931 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
932 int connector_mask = drm_connector_mask(&du->connector);
933 bool has_primary = new_state->plane_mask &
934 drm_plane_mask(crtc->primary);
935
936 /* We always want to have an active plane with an active CRTC */
937 if (has_primary != new_state->enable)
938 return -EINVAL;
939
940
941 if (new_state->connector_mask != connector_mask &&
942 new_state->connector_mask != 0) {
943 DRM_ERROR("Invalid connectors configuration\n");
944 return -EINVAL;
945 }
946
947 /*
948 * Our virtual device does not have a dot clock, so use the logical
949 * clock value as the dot clock.
950 */
951 if (new_state->mode.crtc_clock == 0)
952 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
953
954 return 0;
955 }
956
957
vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)958 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
959 struct drm_atomic_state *state)
960 {
961 }
962
963
vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)964 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
965 struct drm_atomic_state *state)
966 {
967 }
968
969
970 /**
971 * vmw_du_crtc_duplicate_state - duplicate crtc state
972 * @crtc: DRM crtc
973 *
974 * Allocates and returns a copy of the crtc state (both common and
975 * vmw-specific) for the specified crtc.
976 *
977 * Returns: The newly allocated crtc state, or NULL on failure.
978 */
979 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)980 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
981 {
982 struct drm_crtc_state *state;
983 struct vmw_crtc_state *vcs;
984
985 if (WARN_ON(!crtc->state))
986 return NULL;
987
988 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
989
990 if (!vcs)
991 return NULL;
992
993 state = &vcs->base;
994
995 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
996
997 return state;
998 }
999
1000
1001 /**
1002 * vmw_du_crtc_reset - creates a blank vmw crtc state
1003 * @crtc: DRM crtc
1004 *
1005 * Resets the atomic state for @crtc by freeing the state pointer (which
1006 * might be NULL, e.g. at driver load time) and allocating a new empty state
1007 * object.
1008 */
vmw_du_crtc_reset(struct drm_crtc *crtc)1009 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1010 {
1011 struct vmw_crtc_state *vcs;
1012
1013
1014 if (crtc->state) {
1015 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1016
1017 kfree(vmw_crtc_state_to_vcs(crtc->state));
1018 }
1019
1020 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1021
1022 if (!vcs) {
1023 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1024 return;
1025 }
1026
1027 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1028 }
1029
1030
1031 /**
1032 * vmw_du_crtc_destroy_state - destroy crtc state
1033 * @crtc: DRM crtc
1034 * @state: state object to destroy
1035 *
1036 * Destroys the crtc state (both common and vmw-specific) for the
1037 * specified plane.
1038 */
1039 void
vmw_du_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)1040 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1041 struct drm_crtc_state *state)
1042 {
1043 drm_atomic_helper_crtc_destroy_state(crtc, state);
1044 }
1045
1046
1047 /**
1048 * vmw_du_plane_duplicate_state - duplicate plane state
1049 * @plane: drm plane
1050 *
1051 * Allocates and returns a copy of the plane state (both common and
1052 * vmw-specific) for the specified plane.
1053 *
1054 * Returns: The newly allocated plane state, or NULL on failure.
1055 */
1056 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane *plane)1057 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1058 {
1059 struct drm_plane_state *state;
1060 struct vmw_plane_state *vps;
1061
1062 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1063
1064 if (!vps)
1065 return NULL;
1066
1067 vps->pinned = 0;
1068 vps->cpp = 0;
1069
1070 memset(&vps->cursor, 0, sizeof(vps->cursor));
1071
1072 /* Each ref counted resource needs to be acquired again */
1073 if (vps->surf)
1074 (void) vmw_surface_reference(vps->surf);
1075
1076 if (vps->bo)
1077 (void) vmw_bo_reference(vps->bo);
1078
1079 state = &vps->base;
1080
1081 __drm_atomic_helper_plane_duplicate_state(plane, state);
1082
1083 return state;
1084 }
1085
1086
1087 /**
1088 * vmw_du_plane_reset - creates a blank vmw plane state
1089 * @plane: drm plane
1090 *
1091 * Resets the atomic state for @plane by freeing the state pointer (which might
1092 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1093 */
vmw_du_plane_reset(struct drm_plane *plane)1094 void vmw_du_plane_reset(struct drm_plane *plane)
1095 {
1096 struct vmw_plane_state *vps;
1097
1098 if (plane->state)
1099 vmw_du_plane_destroy_state(plane, plane->state);
1100
1101 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1102
1103 if (!vps) {
1104 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1105 return;
1106 }
1107
1108 __drm_atomic_helper_plane_reset(plane, &vps->base);
1109 }
1110
1111
1112 /**
1113 * vmw_du_plane_destroy_state - destroy plane state
1114 * @plane: DRM plane
1115 * @state: state object to destroy
1116 *
1117 * Destroys the plane state (both common and vmw-specific) for the
1118 * specified plane.
1119 */
1120 void
vmw_du_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state)1121 vmw_du_plane_destroy_state(struct drm_plane *plane,
1122 struct drm_plane_state *state)
1123 {
1124 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1125
1126 /* Should have been freed by cleanup_fb */
1127 if (vps->surf)
1128 vmw_surface_unreference(&vps->surf);
1129
1130 if (vps->bo)
1131 vmw_bo_unreference(&vps->bo);
1132
1133 drm_atomic_helper_plane_destroy_state(plane, state);
1134 }
1135
1136
1137 /**
1138 * vmw_du_connector_duplicate_state - duplicate connector state
1139 * @connector: DRM connector
1140 *
1141 * Allocates and returns a copy of the connector state (both common and
1142 * vmw-specific) for the specified connector.
1143 *
1144 * Returns: The newly allocated connector state, or NULL on failure.
1145 */
1146 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector *connector)1147 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1148 {
1149 struct drm_connector_state *state;
1150 struct vmw_connector_state *vcs;
1151
1152 if (WARN_ON(!connector->state))
1153 return NULL;
1154
1155 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1156
1157 if (!vcs)
1158 return NULL;
1159
1160 state = &vcs->base;
1161
1162 __drm_atomic_helper_connector_duplicate_state(connector, state);
1163
1164 return state;
1165 }
1166
1167
1168 /**
1169 * vmw_du_connector_reset - creates a blank vmw connector state
1170 * @connector: DRM connector
1171 *
1172 * Resets the atomic state for @connector by freeing the state pointer (which
1173 * might be NULL, e.g. at driver load time) and allocating a new empty state
1174 * object.
1175 */
vmw_du_connector_reset(struct drm_connector *connector)1176 void vmw_du_connector_reset(struct drm_connector *connector)
1177 {
1178 struct vmw_connector_state *vcs;
1179
1180
1181 if (connector->state) {
1182 __drm_atomic_helper_connector_destroy_state(connector->state);
1183
1184 kfree(vmw_connector_state_to_vcs(connector->state));
1185 }
1186
1187 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1188
1189 if (!vcs) {
1190 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1191 return;
1192 }
1193
1194 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1195 }
1196
1197
1198 /**
1199 * vmw_du_connector_destroy_state - destroy connector state
1200 * @connector: DRM connector
1201 * @state: state object to destroy
1202 *
1203 * Destroys the connector state (both common and vmw-specific) for the
1204 * specified plane.
1205 */
1206 void
vmw_du_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state)1207 vmw_du_connector_destroy_state(struct drm_connector *connector,
1208 struct drm_connector_state *state)
1209 {
1210 drm_atomic_helper_connector_destroy_state(connector, state);
1211 }
1212 /*
1213 * Generic framebuffer code
1214 */
1215
1216 /*
1217 * Surface framebuffer code
1218 */
1219
vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)1220 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1221 {
1222 struct vmw_framebuffer_surface *vfbs =
1223 vmw_framebuffer_to_vfbs(framebuffer);
1224
1225 drm_framebuffer_cleanup(framebuffer);
1226 vmw_surface_unreference(&vfbs->surface);
1227
1228 kfree(vfbs);
1229 }
1230
1231 /**
1232 * vmw_kms_readback - Perform a readback from the screen system to
1233 * a buffer-object backed framebuffer.
1234 *
1235 * @dev_priv: Pointer to the device private structure.
1236 * @file_priv: Pointer to a struct drm_file identifying the caller.
1237 * Must be set to NULL if @user_fence_rep is NULL.
1238 * @vfb: Pointer to the buffer-object backed framebuffer.
1239 * @user_fence_rep: User-space provided structure for fence information.
1240 * Must be set to non-NULL if @file_priv is non-NULL.
1241 * @vclips: Array of clip rects.
1242 * @num_clips: Number of clip rects in @vclips.
1243 *
1244 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1245 * interrupted.
1246 */
vmw_kms_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_rect *vclips, uint32_t num_clips)1247 int vmw_kms_readback(struct vmw_private *dev_priv,
1248 struct drm_file *file_priv,
1249 struct vmw_framebuffer *vfb,
1250 struct drm_vmw_fence_rep __user *user_fence_rep,
1251 struct drm_vmw_rect *vclips,
1252 uint32_t num_clips)
1253 {
1254 switch (dev_priv->active_display_unit) {
1255 case vmw_du_screen_object:
1256 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1257 user_fence_rep, vclips, num_clips,
1258 NULL);
1259 case vmw_du_screen_target:
1260 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1261 user_fence_rep, NULL, vclips, num_clips,
1262 1, NULL);
1263 default:
1264 WARN_ONCE(true,
1265 "Readback called with invalid display system.\n");
1266 }
1267
1268 return -ENOSYS;
1269 }
1270
1271
1272 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1273 .destroy = vmw_framebuffer_surface_destroy,
1274 .dirty = drm_atomic_helper_dirtyfb,
1275 };
1276
vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_surface *surface, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd2 *mode_cmd, bool is_bo_proxy)1277 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1278 struct vmw_surface *surface,
1279 struct vmw_framebuffer **out,
1280 const struct drm_mode_fb_cmd2
1281 *mode_cmd,
1282 bool is_bo_proxy)
1283
1284 {
1285 struct drm_device *dev = &dev_priv->drm;
1286 struct vmw_framebuffer_surface *vfbs;
1287 enum SVGA3dSurfaceFormat format;
1288 int ret;
1289
1290 /* 3D is only supported on HWv8 and newer hosts */
1291 if (dev_priv->active_display_unit == vmw_du_legacy)
1292 return -ENOSYS;
1293
1294 /*
1295 * Sanity checks.
1296 */
1297
1298 if (!drm_any_plane_has_format(&dev_priv->drm,
1299 mode_cmd->pixel_format,
1300 mode_cmd->modifier[0])) {
1301 drm_dbg(&dev_priv->drm,
1302 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1303 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1304 return -EINVAL;
1305 }
1306
1307 /* Surface must be marked as a scanout. */
1308 if (unlikely(!surface->metadata.scanout))
1309 return -EINVAL;
1310
1311 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1312 surface->metadata.num_sizes != 1 ||
1313 surface->metadata.base_size.width < mode_cmd->width ||
1314 surface->metadata.base_size.height < mode_cmd->height ||
1315 surface->metadata.base_size.depth != 1)) {
1316 DRM_ERROR("Incompatible surface dimensions "
1317 "for requested mode.\n");
1318 return -EINVAL;
1319 }
1320
1321 switch (mode_cmd->pixel_format) {
1322 case DRM_FORMAT_ARGB8888:
1323 format = SVGA3D_A8R8G8B8;
1324 break;
1325 case DRM_FORMAT_XRGB8888:
1326 format = SVGA3D_X8R8G8B8;
1327 break;
1328 case DRM_FORMAT_RGB565:
1329 format = SVGA3D_R5G6B5;
1330 break;
1331 case DRM_FORMAT_XRGB1555:
1332 format = SVGA3D_A1R5G5B5;
1333 break;
1334 default:
1335 DRM_ERROR("Invalid pixel format: %p4cc\n",
1336 &mode_cmd->pixel_format);
1337 return -EINVAL;
1338 }
1339
1340 /*
1341 * For DX, surface format validation is done when surface->scanout
1342 * is set.
1343 */
1344 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1345 DRM_ERROR("Invalid surface format for requested mode.\n");
1346 return -EINVAL;
1347 }
1348
1349 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1350 if (!vfbs) {
1351 ret = -ENOMEM;
1352 goto out_err1;
1353 }
1354
1355 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1356 vfbs->surface = vmw_surface_reference(surface);
1357 vfbs->base.user_handle = mode_cmd->handles[0];
1358 vfbs->is_bo_proxy = is_bo_proxy;
1359
1360 *out = &vfbs->base;
1361
1362 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1363 &vmw_framebuffer_surface_funcs);
1364 if (ret)
1365 goto out_err2;
1366
1367 return 0;
1368
1369 out_err2:
1370 vmw_surface_unreference(&surface);
1371 kfree(vfbs);
1372 out_err1:
1373 return ret;
1374 }
1375
1376 /*
1377 * Buffer-object framebuffer code
1378 */
1379
vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle)1380 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1381 struct drm_file *file_priv,
1382 unsigned int *handle)
1383 {
1384 struct vmw_framebuffer_bo *vfbd =
1385 vmw_framebuffer_to_vfbd(fb);
1386
1387 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1388 }
1389
vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)1390 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1391 {
1392 struct vmw_framebuffer_bo *vfbd =
1393 vmw_framebuffer_to_vfbd(framebuffer);
1394
1395 drm_framebuffer_cleanup(framebuffer);
1396 vmw_bo_unreference(&vfbd->buffer);
1397
1398 kfree(vfbd);
1399 }
1400
1401 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1402 .create_handle = vmw_framebuffer_bo_create_handle,
1403 .destroy = vmw_framebuffer_bo_destroy,
1404 .dirty = drm_atomic_helper_dirtyfb,
1405 };
1406
1407 /**
1408 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1409 *
1410 * @dev: DRM device
1411 * @mode_cmd: parameters for the new surface
1412 * @bo_mob: MOB backing the buffer object
1413 * @srf_out: newly created surface
1414 *
1415 * When the content FB is a buffer object, we create a surface as a proxy to the
1416 * same buffer. This way we can do a surface copy rather than a surface DMA.
1417 * This is a more efficient approach
1418 *
1419 * RETURNS:
1420 * 0 on success, error code otherwise
1421 */
vmw_create_bo_proxy(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct vmw_bo *bo_mob, struct vmw_surface **srf_out)1422 static int vmw_create_bo_proxy(struct drm_device *dev,
1423 const struct drm_mode_fb_cmd2 *mode_cmd,
1424 struct vmw_bo *bo_mob,
1425 struct vmw_surface **srf_out)
1426 {
1427 struct vmw_surface_metadata metadata = {0};
1428 uint32_t format;
1429 struct vmw_resource *res;
1430 unsigned int bytes_pp;
1431 int ret;
1432
1433 switch (mode_cmd->pixel_format) {
1434 case DRM_FORMAT_ARGB8888:
1435 case DRM_FORMAT_XRGB8888:
1436 format = SVGA3D_X8R8G8B8;
1437 bytes_pp = 4;
1438 break;
1439
1440 case DRM_FORMAT_RGB565:
1441 case DRM_FORMAT_XRGB1555:
1442 format = SVGA3D_R5G6B5;
1443 bytes_pp = 2;
1444 break;
1445
1446 case 8:
1447 format = SVGA3D_P8;
1448 bytes_pp = 1;
1449 break;
1450
1451 default:
1452 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1453 &mode_cmd->pixel_format);
1454 return -EINVAL;
1455 }
1456
1457 metadata.format = format;
1458 metadata.mip_levels[0] = 1;
1459 metadata.num_sizes = 1;
1460 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1461 metadata.base_size.height = mode_cmd->height;
1462 metadata.base_size.depth = 1;
1463 metadata.scanout = true;
1464
1465 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1466 if (ret) {
1467 DRM_ERROR("Failed to allocate proxy content buffer\n");
1468 return ret;
1469 }
1470
1471 res = &(*srf_out)->res;
1472
1473 /* Reserve and switch the backing mob. */
1474 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1475 (void) vmw_resource_reserve(res, false, true);
1476 vmw_user_bo_unref(&res->guest_memory_bo);
1477 res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1478 res->guest_memory_offset = 0;
1479 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1480 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1481
1482 return 0;
1483 }
1484
1485
1486
vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, struct vmw_bo *bo, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd2 *mode_cmd)1487 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1488 struct vmw_bo *bo,
1489 struct vmw_framebuffer **out,
1490 const struct drm_mode_fb_cmd2
1491 *mode_cmd)
1492
1493 {
1494 struct drm_device *dev = &dev_priv->drm;
1495 struct vmw_framebuffer_bo *vfbd;
1496 unsigned int requested_size;
1497 int ret;
1498
1499 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1500 if (unlikely(requested_size > bo->tbo.base.size)) {
1501 DRM_ERROR("Screen buffer object size is too small "
1502 "for requested mode.\n");
1503 return -EINVAL;
1504 }
1505
1506 if (!drm_any_plane_has_format(&dev_priv->drm,
1507 mode_cmd->pixel_format,
1508 mode_cmd->modifier[0])) {
1509 drm_dbg(&dev_priv->drm,
1510 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1511 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1512 return -EINVAL;
1513 }
1514
1515 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1516 if (!vfbd) {
1517 ret = -ENOMEM;
1518 goto out_err1;
1519 }
1520
1521 vfbd->base.base.obj[0] = &bo->tbo.base;
1522 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1523 vfbd->base.bo = true;
1524 vfbd->buffer = vmw_bo_reference(bo);
1525 vfbd->base.user_handle = mode_cmd->handles[0];
1526 *out = &vfbd->base;
1527
1528 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1529 &vmw_framebuffer_bo_funcs);
1530 if (ret)
1531 goto out_err2;
1532
1533 return 0;
1534
1535 out_err2:
1536 vmw_bo_unreference(&bo);
1537 kfree(vfbd);
1538 out_err1:
1539 return ret;
1540 }
1541
1542
1543 /**
1544 * vmw_kms_srf_ok - check if a surface can be created
1545 *
1546 * @dev_priv: Pointer to device private struct.
1547 * @width: requested width
1548 * @height: requested height
1549 *
1550 * Surfaces need to be less than texture size
1551 */
1552 static bool
vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)1553 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1554 {
1555 if (width > dev_priv->texture_max_width ||
1556 height > dev_priv->texture_max_height)
1557 return false;
1558
1559 return true;
1560 }
1561
1562 /**
1563 * vmw_kms_new_framebuffer - Create a new framebuffer.
1564 *
1565 * @dev_priv: Pointer to device private struct.
1566 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1567 * Either @bo or @surface must be NULL.
1568 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1569 * Either @bo or @surface must be NULL.
1570 * @only_2d: No presents will occur to this buffer object based framebuffer.
1571 * This helps the code to do some important optimizations.
1572 * @mode_cmd: Frame-buffer metadata.
1573 */
1574 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv, struct vmw_bo *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd)1575 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1576 struct vmw_bo *bo,
1577 struct vmw_surface *surface,
1578 bool only_2d,
1579 const struct drm_mode_fb_cmd2 *mode_cmd)
1580 {
1581 struct vmw_framebuffer *vfb = NULL;
1582 bool is_bo_proxy = false;
1583 int ret;
1584
1585 /*
1586 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1587 * therefore, wrap the buffer object in a surface so we can use the
1588 * SurfaceCopy command.
1589 */
1590 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1591 bo && only_2d &&
1592 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1593 dev_priv->active_display_unit == vmw_du_screen_target) {
1594 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1595 bo, &surface);
1596 if (ret)
1597 return ERR_PTR(ret);
1598
1599 is_bo_proxy = true;
1600 }
1601
1602 /* Create the new framebuffer depending one what we have */
1603 if (surface) {
1604 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1605 mode_cmd,
1606 is_bo_proxy);
1607 /*
1608 * vmw_create_bo_proxy() adds a reference that is no longer
1609 * needed
1610 */
1611 if (is_bo_proxy)
1612 vmw_surface_unreference(&surface);
1613 } else if (bo) {
1614 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1615 mode_cmd);
1616 } else {
1617 BUG();
1618 }
1619
1620 if (ret)
1621 return ERR_PTR(ret);
1622
1623 return vfb;
1624 }
1625
1626 /*
1627 * Generic Kernel modesetting functions
1628 */
1629
vmw_kms_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)1630 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1631 struct drm_file *file_priv,
1632 const struct drm_mode_fb_cmd2 *mode_cmd)
1633 {
1634 struct vmw_private *dev_priv = vmw_priv(dev);
1635 struct vmw_framebuffer *vfb = NULL;
1636 struct vmw_surface *surface = NULL;
1637 struct vmw_bo *bo = NULL;
1638 int ret;
1639
1640 /* returns either a bo or surface */
1641 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1642 mode_cmd->handles[0],
1643 &surface, &bo);
1644 if (ret) {
1645 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1646 mode_cmd->handles[0], mode_cmd->handles[0]);
1647 goto err_out;
1648 }
1649
1650
1651 if (!bo &&
1652 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1653 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1654 dev_priv->texture_max_width,
1655 dev_priv->texture_max_height);
1656 goto err_out;
1657 }
1658
1659
1660 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1661 !(dev_priv->capabilities & SVGA_CAP_3D),
1662 mode_cmd);
1663 if (IS_ERR(vfb)) {
1664 ret = PTR_ERR(vfb);
1665 goto err_out;
1666 }
1667
1668 err_out:
1669 /* vmw_user_lookup_handle takes one ref so does new_fb */
1670 if (bo)
1671 vmw_user_bo_unref(&bo);
1672 if (surface)
1673 vmw_surface_unreference(&surface);
1674
1675 if (ret) {
1676 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1677 return ERR_PTR(ret);
1678 }
1679
1680 return &vfb->base;
1681 }
1682
1683 /**
1684 * vmw_kms_check_display_memory - Validates display memory required for a
1685 * topology
1686 * @dev: DRM device
1687 * @num_rects: number of drm_rect in rects
1688 * @rects: array of drm_rect representing the topology to validate indexed by
1689 * crtc index.
1690 *
1691 * Returns:
1692 * 0 on success otherwise negative error code
1693 */
vmw_kms_check_display_memory(struct drm_device *dev, uint32_t num_rects, struct drm_rect *rects)1694 static int vmw_kms_check_display_memory(struct drm_device *dev,
1695 uint32_t num_rects,
1696 struct drm_rect *rects)
1697 {
1698 struct vmw_private *dev_priv = vmw_priv(dev);
1699 struct drm_rect bounding_box = {0};
1700 u64 total_pixels = 0, pixel_mem, bb_mem;
1701 int i;
1702
1703 for (i = 0; i < num_rects; i++) {
1704 /*
1705 * For STDU only individual screen (screen target) is limited by
1706 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1707 */
1708 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1709 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1710 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1711 VMW_DEBUG_KMS("Screen size not supported.\n");
1712 return -EINVAL;
1713 }
1714
1715 /* Bounding box upper left is at (0,0). */
1716 if (rects[i].x2 > bounding_box.x2)
1717 bounding_box.x2 = rects[i].x2;
1718
1719 if (rects[i].y2 > bounding_box.y2)
1720 bounding_box.y2 = rects[i].y2;
1721
1722 total_pixels += (u64) drm_rect_width(&rects[i]) *
1723 (u64) drm_rect_height(&rects[i]);
1724 }
1725
1726 /* Virtual svga device primary limits are always in 32-bpp. */
1727 pixel_mem = total_pixels * 4;
1728
1729 /*
1730 * For HV10 and below prim_bb_mem is vram size. When
1731 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1732 * limit on primary bounding box
1733 */
1734 if (pixel_mem > dev_priv->max_primary_mem) {
1735 VMW_DEBUG_KMS("Combined output size too large.\n");
1736 return -EINVAL;
1737 }
1738
1739 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1740 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1741 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1742 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1743
1744 if (bb_mem > dev_priv->max_primary_mem) {
1745 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1746 return -EINVAL;
1747 }
1748 }
1749
1750 return 0;
1751 }
1752
1753 /**
1754 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1755 * crtc mutex
1756 * @state: The atomic state pointer containing the new atomic state
1757 * @crtc: The crtc
1758 *
1759 * This function returns the new crtc state if it's part of the state update.
1760 * Otherwise returns the current crtc state. It also makes sure that the
1761 * crtc mutex is locked.
1762 *
1763 * Returns: A valid crtc state pointer or NULL. It may also return a
1764 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1765 */
1766 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)1767 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1768 {
1769 struct drm_crtc_state *crtc_state;
1770
1771 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1772 if (crtc_state) {
1773 lockdep_assert_held(&crtc->mutex.mutex.base);
1774 } else {
1775 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1776
1777 if (ret != 0 && ret != -EALREADY)
1778 return ERR_PTR(ret);
1779
1780 crtc_state = crtc->state;
1781 }
1782
1783 return crtc_state;
1784 }
1785
1786 /**
1787 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1788 * from the same fb after the new state is committed.
1789 * @dev: The drm_device.
1790 * @state: The new state to be checked.
1791 *
1792 * Returns:
1793 * Zero on success,
1794 * -EINVAL on invalid state,
1795 * -EDEADLK if modeset locking needs to be rerun.
1796 */
vmw_kms_check_implicit(struct drm_device *dev, struct drm_atomic_state *state)1797 static int vmw_kms_check_implicit(struct drm_device *dev,
1798 struct drm_atomic_state *state)
1799 {
1800 struct drm_framebuffer *implicit_fb = NULL;
1801 struct drm_crtc *crtc;
1802 struct drm_crtc_state *crtc_state;
1803 struct drm_plane_state *plane_state;
1804
1805 drm_for_each_crtc(crtc, dev) {
1806 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1807
1808 if (!du->is_implicit)
1809 continue;
1810
1811 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1812 if (IS_ERR(crtc_state))
1813 return PTR_ERR(crtc_state);
1814
1815 if (!crtc_state || !crtc_state->enable)
1816 continue;
1817
1818 /*
1819 * Can't move primary planes across crtcs, so this is OK.
1820 * It also means we don't need to take the plane mutex.
1821 */
1822 plane_state = du->primary.state;
1823 if (plane_state->crtc != crtc)
1824 continue;
1825
1826 if (!implicit_fb)
1827 implicit_fb = plane_state->fb;
1828 else if (implicit_fb != plane_state->fb)
1829 return -EINVAL;
1830 }
1831
1832 return 0;
1833 }
1834
1835 /**
1836 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1837 * @dev: DRM device
1838 * @state: the driver state object
1839 *
1840 * Returns:
1841 * 0 on success otherwise negative error code
1842 */
vmw_kms_check_topology(struct drm_device *dev, struct drm_atomic_state *state)1843 static int vmw_kms_check_topology(struct drm_device *dev,
1844 struct drm_atomic_state *state)
1845 {
1846 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1847 struct drm_rect *rects;
1848 struct drm_crtc *crtc;
1849 uint32_t i;
1850 int ret = 0;
1851
1852 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1853 GFP_KERNEL);
1854 if (!rects)
1855 return -ENOMEM;
1856
1857 drm_for_each_crtc(crtc, dev) {
1858 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1859 struct drm_crtc_state *crtc_state;
1860
1861 i = drm_crtc_index(crtc);
1862
1863 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1864 if (IS_ERR(crtc_state)) {
1865 ret = PTR_ERR(crtc_state);
1866 goto clean;
1867 }
1868
1869 if (!crtc_state)
1870 continue;
1871
1872 if (crtc_state->enable) {
1873 rects[i].x1 = du->gui_x;
1874 rects[i].y1 = du->gui_y;
1875 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1876 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1877 } else {
1878 rects[i].x1 = 0;
1879 rects[i].y1 = 0;
1880 rects[i].x2 = 0;
1881 rects[i].y2 = 0;
1882 }
1883 }
1884
1885 /* Determine change to topology due to new atomic state */
1886 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1887 new_crtc_state, i) {
1888 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1889 struct drm_connector *connector;
1890 struct drm_connector_state *conn_state;
1891 struct vmw_connector_state *vmw_conn_state;
1892
1893 if (!du->pref_active && new_crtc_state->enable) {
1894 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1895 ret = -EINVAL;
1896 goto clean;
1897 }
1898
1899 /*
1900 * For vmwgfx each crtc has only one connector attached and it
1901 * is not changed so don't really need to check the
1902 * crtc->connector_mask and iterate over it.
1903 */
1904 connector = &du->connector;
1905 conn_state = drm_atomic_get_connector_state(state, connector);
1906 if (IS_ERR(conn_state)) {
1907 ret = PTR_ERR(conn_state);
1908 goto clean;
1909 }
1910
1911 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1912 vmw_conn_state->gui_x = du->gui_x;
1913 vmw_conn_state->gui_y = du->gui_y;
1914 }
1915
1916 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1917 rects);
1918
1919 clean:
1920 kfree(rects);
1921 return ret;
1922 }
1923
1924 /**
1925 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1926 *
1927 * @dev: DRM device
1928 * @state: the driver state object
1929 *
1930 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1931 * us to assign a value to mode->crtc_clock so that
1932 * drm_calc_timestamping_constants() won't throw an error message
1933 *
1934 * Returns:
1935 * Zero for success or -errno
1936 */
1937 static int
vmw_kms_atomic_check_modeset(struct drm_device *dev, struct drm_atomic_state *state)1938 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1939 struct drm_atomic_state *state)
1940 {
1941 struct drm_crtc *crtc;
1942 struct drm_crtc_state *crtc_state;
1943 bool need_modeset = false;
1944 int i, ret;
1945
1946 ret = drm_atomic_helper_check(dev, state);
1947 if (ret)
1948 return ret;
1949
1950 ret = vmw_kms_check_implicit(dev, state);
1951 if (ret) {
1952 VMW_DEBUG_KMS("Invalid implicit state\n");
1953 return ret;
1954 }
1955
1956 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1957 if (drm_atomic_crtc_needs_modeset(crtc_state))
1958 need_modeset = true;
1959 }
1960
1961 if (need_modeset)
1962 return vmw_kms_check_topology(dev, state);
1963
1964 return ret;
1965 }
1966
1967 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1968 .fb_create = vmw_kms_fb_create,
1969 .atomic_check = vmw_kms_atomic_check_modeset,
1970 .atomic_commit = drm_atomic_helper_commit,
1971 };
1972
vmw_kms_generic_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, struct vmw_surface *surface, uint32_t sid, int32_t destX, int32_t destY, struct drm_vmw_rect *clips, uint32_t num_clips)1973 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1974 struct drm_file *file_priv,
1975 struct vmw_framebuffer *vfb,
1976 struct vmw_surface *surface,
1977 uint32_t sid,
1978 int32_t destX, int32_t destY,
1979 struct drm_vmw_rect *clips,
1980 uint32_t num_clips)
1981 {
1982 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1983 &surface->res, destX, destY,
1984 num_clips, 1, NULL, NULL);
1985 }
1986
1987
vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, struct vmw_surface *surface, uint32_t sid, int32_t destX, int32_t destY, struct drm_vmw_rect *clips, uint32_t num_clips)1988 int vmw_kms_present(struct vmw_private *dev_priv,
1989 struct drm_file *file_priv,
1990 struct vmw_framebuffer *vfb,
1991 struct vmw_surface *surface,
1992 uint32_t sid,
1993 int32_t destX, int32_t destY,
1994 struct drm_vmw_rect *clips,
1995 uint32_t num_clips)
1996 {
1997 int ret;
1998
1999 switch (dev_priv->active_display_unit) {
2000 case vmw_du_screen_target:
2001 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2002 &surface->res, destX, destY,
2003 num_clips, 1, NULL, NULL);
2004 break;
2005 case vmw_du_screen_object:
2006 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2007 sid, destX, destY, clips,
2008 num_clips);
2009 break;
2010 default:
2011 WARN_ONCE(true,
2012 "Present called with invalid display system.\n");
2013 ret = -ENOSYS;
2014 break;
2015 }
2016 if (ret)
2017 return ret;
2018
2019 vmw_cmd_flush(dev_priv, false);
2020
2021 return 0;
2022 }
2023
2024 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)2025 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2026 {
2027 if (dev_priv->hotplug_mode_update_property)
2028 return;
2029
2030 dev_priv->hotplug_mode_update_property =
2031 drm_property_create_range(&dev_priv->drm,
2032 DRM_MODE_PROP_IMMUTABLE,
2033 "hotplug_mode_update", 0, 1);
2034 }
2035
vmw_kms_init(struct vmw_private *dev_priv)2036 int vmw_kms_init(struct vmw_private *dev_priv)
2037 {
2038 struct drm_device *dev = &dev_priv->drm;
2039 int ret;
2040 static const char *display_unit_names[] = {
2041 "Invalid",
2042 "Legacy",
2043 "Screen Object",
2044 "Screen Target",
2045 "Invalid (max)"
2046 };
2047
2048 drm_mode_config_init(dev);
2049 dev->mode_config.funcs = &vmw_kms_funcs;
2050 dev->mode_config.min_width = 1;
2051 dev->mode_config.min_height = 1;
2052 dev->mode_config.max_width = dev_priv->texture_max_width;
2053 dev->mode_config.max_height = dev_priv->texture_max_height;
2054 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2055
2056 drm_mode_create_suggested_offset_properties(dev);
2057 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2058
2059 ret = vmw_kms_stdu_init_display(dev_priv);
2060 if (ret) {
2061 ret = vmw_kms_sou_init_display(dev_priv);
2062 if (ret) /* Fallback */
2063 ret = vmw_kms_ldu_init_display(dev_priv);
2064 }
2065 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2066 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2067 display_unit_names[dev_priv->active_display_unit]);
2068
2069 return ret;
2070 }
2071
vmw_kms_close(struct vmw_private *dev_priv)2072 int vmw_kms_close(struct vmw_private *dev_priv)
2073 {
2074 int ret = 0;
2075
2076 /*
2077 * Docs says we should take the lock before calling this function
2078 * but since it destroys encoders and our destructor calls
2079 * drm_encoder_cleanup which takes the lock we deadlock.
2080 */
2081 drm_mode_config_cleanup(&dev_priv->drm);
2082 if (dev_priv->active_display_unit == vmw_du_legacy)
2083 ret = vmw_kms_ldu_close_display(dev_priv);
2084
2085 return ret;
2086 }
2087
vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)2088 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2089 struct drm_file *file_priv)
2090 {
2091 struct drm_vmw_cursor_bypass_arg *arg = data;
2092 struct vmw_display_unit *du;
2093 struct drm_crtc *crtc;
2094 int ret = 0;
2095
2096 mutex_lock(&dev->mode_config.mutex);
2097 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2098
2099 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2100 du = vmw_crtc_to_du(crtc);
2101 du->hotspot_x = arg->xhot;
2102 du->hotspot_y = arg->yhot;
2103 }
2104
2105 mutex_unlock(&dev->mode_config.mutex);
2106 return 0;
2107 }
2108
2109 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2110 if (!crtc) {
2111 ret = -ENOENT;
2112 goto out;
2113 }
2114
2115 du = vmw_crtc_to_du(crtc);
2116
2117 du->hotspot_x = arg->xhot;
2118 du->hotspot_y = arg->yhot;
2119
2120 out:
2121 mutex_unlock(&dev->mode_config.mutex);
2122
2123 return ret;
2124 }
2125
vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth)2126 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2127 unsigned width, unsigned height, unsigned pitch,
2128 unsigned bpp, unsigned depth)
2129 {
2130 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2131 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2132 else if (vmw_fifo_have_pitchlock(vmw_priv))
2133 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2134 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2135 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2136 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2137 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2138
2139 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2140 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2141 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2142 return -EINVAL;
2143 }
2144
2145 return 0;
2146 }
2147
vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, uint32_t pitch, uint32_t height)2148 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2149 uint32_t pitch,
2150 uint32_t height)
2151 {
2152 return ((u64) pitch * (u64) height) < (u64)
2153 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2154 dev_priv->max_primary_mem : dev_priv->vram_size);
2155 }
2156
2157 /**
2158 * vmw_du_update_layout - Update the display unit with topology from resolution
2159 * plugin and generate DRM uevent
2160 * @dev_priv: device private
2161 * @num_rects: number of drm_rect in rects
2162 * @rects: toplogy to update
2163 */
vmw_du_update_layout(struct vmw_private *dev_priv, unsigned int num_rects, struct drm_rect *rects)2164 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2165 unsigned int num_rects, struct drm_rect *rects)
2166 {
2167 struct drm_device *dev = &dev_priv->drm;
2168 struct vmw_display_unit *du;
2169 struct drm_connector *con;
2170 struct drm_connector_list_iter conn_iter;
2171 struct drm_modeset_acquire_ctx ctx;
2172 struct drm_crtc *crtc;
2173 int ret;
2174
2175 /* Currently gui_x/y is protected with the crtc mutex */
2176 mutex_lock(&dev->mode_config.mutex);
2177 drm_modeset_acquire_init(&ctx, 0);
2178 retry:
2179 drm_for_each_crtc(crtc, dev) {
2180 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2181 if (ret < 0) {
2182 if (ret == -EDEADLK) {
2183 drm_modeset_backoff(&ctx);
2184 goto retry;
2185 }
2186 goto out_fini;
2187 }
2188 }
2189
2190 drm_connector_list_iter_begin(dev, &conn_iter);
2191 drm_for_each_connector_iter(con, &conn_iter) {
2192 du = vmw_connector_to_du(con);
2193 if (num_rects > du->unit) {
2194 du->pref_width = drm_rect_width(&rects[du->unit]);
2195 du->pref_height = drm_rect_height(&rects[du->unit]);
2196 du->pref_active = true;
2197 du->gui_x = rects[du->unit].x1;
2198 du->gui_y = rects[du->unit].y1;
2199 } else {
2200 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2201 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2202 du->pref_active = false;
2203 du->gui_x = 0;
2204 du->gui_y = 0;
2205 }
2206 }
2207 drm_connector_list_iter_end(&conn_iter);
2208
2209 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2210 du = vmw_connector_to_du(con);
2211 if (num_rects > du->unit) {
2212 drm_object_property_set_value
2213 (&con->base, dev->mode_config.suggested_x_property,
2214 du->gui_x);
2215 drm_object_property_set_value
2216 (&con->base, dev->mode_config.suggested_y_property,
2217 du->gui_y);
2218 } else {
2219 drm_object_property_set_value
2220 (&con->base, dev->mode_config.suggested_x_property,
2221 0);
2222 drm_object_property_set_value
2223 (&con->base, dev->mode_config.suggested_y_property,
2224 0);
2225 }
2226 con->status = vmw_du_connector_detect(con, true);
2227 }
2228 out_fini:
2229 drm_modeset_drop_locks(&ctx);
2230 drm_modeset_acquire_fini(&ctx);
2231 mutex_unlock(&dev->mode_config.mutex);
2232
2233 drm_sysfs_hotplug_event(dev);
2234
2235 return 0;
2236 }
2237
vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size, struct drm_modeset_acquire_ctx *ctx)2238 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2239 u16 *r, u16 *g, u16 *b,
2240 uint32_t size,
2241 struct drm_modeset_acquire_ctx *ctx)
2242 {
2243 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2244 int i;
2245
2246 for (i = 0; i < size; i++) {
2247 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2248 r[i], g[i], b[i]);
2249 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2250 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2251 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2252 }
2253
2254 return 0;
2255 }
2256
vmw_du_connector_dpms(struct drm_connector *connector, int mode)2257 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2258 {
2259 return 0;
2260 }
2261
2262 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)2263 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2264 {
2265 uint32_t num_displays;
2266 struct drm_device *dev = connector->dev;
2267 struct vmw_private *dev_priv = vmw_priv(dev);
2268 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2269
2270 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2271
2272 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2273 du->pref_active) ?
2274 connector_status_connected : connector_status_disconnected);
2275 }
2276
2277 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2278 /* 640x480@60Hz */
2279 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2280 752, 800, 0, 480, 489, 492, 525, 0,
2281 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2282 /* 800x600@60Hz */
2283 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2284 968, 1056, 0, 600, 601, 605, 628, 0,
2285 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2286 /* 1024x768@60Hz */
2287 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2288 1184, 1344, 0, 768, 771, 777, 806, 0,
2289 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2290 /* 1152x864@75Hz */
2291 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2292 1344, 1600, 0, 864, 865, 868, 900, 0,
2293 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2294 /* 1280x720@60Hz */
2295 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2296 1472, 1664, 0, 720, 723, 728, 748, 0,
2297 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2298 /* 1280x768@60Hz */
2299 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2300 1472, 1664, 0, 768, 771, 778, 798, 0,
2301 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2302 /* 1280x800@60Hz */
2303 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2304 1480, 1680, 0, 800, 803, 809, 831, 0,
2305 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2306 /* 1280x960@60Hz */
2307 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2308 1488, 1800, 0, 960, 961, 964, 1000, 0,
2309 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2310 /* 1280x1024@60Hz */
2311 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2312 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2313 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2314 /* 1360x768@60Hz */
2315 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2316 1536, 1792, 0, 768, 771, 777, 795, 0,
2317 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2318 /* 1440x1050@60Hz */
2319 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2320 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2321 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2322 /* 1440x900@60Hz */
2323 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2324 1672, 1904, 0, 900, 903, 909, 934, 0,
2325 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2326 /* 1600x1200@60Hz */
2327 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2328 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2329 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2330 /* 1680x1050@60Hz */
2331 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2332 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2333 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2334 /* 1792x1344@60Hz */
2335 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2336 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2337 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2338 /* 1853x1392@60Hz */
2339 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2340 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2341 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2342 /* 1920x1080@60Hz */
2343 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2344 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2345 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2346 /* 1920x1200@60Hz */
2347 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2348 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2349 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2350 /* 1920x1440@60Hz */
2351 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2352 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2353 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2354 /* 2560x1440@60Hz */
2355 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2356 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2357 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2358 /* 2560x1600@60Hz */
2359 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2360 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2361 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2362 /* 2880x1800@60Hz */
2363 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2364 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2365 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2366 /* 3840x2160@60Hz */
2367 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2368 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2369 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2370 /* 3840x2400@60Hz */
2371 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2372 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2373 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2374 /* Terminate */
2375 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2376 };
2377
2378 /**
2379 * vmw_guess_mode_timing - Provide fake timings for a
2380 * 60Hz vrefresh mode.
2381 *
2382 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2383 * members filled in.
2384 */
vmw_guess_mode_timing(struct drm_display_mode *mode)2385 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2386 {
2387 mode->hsync_start = mode->hdisplay + 50;
2388 mode->hsync_end = mode->hsync_start + 50;
2389 mode->htotal = mode->hsync_end + 50;
2390
2391 mode->vsync_start = mode->vdisplay + 50;
2392 mode->vsync_end = mode->vsync_start + 50;
2393 mode->vtotal = mode->vsync_end + 50;
2394
2395 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2396 }
2397
2398
vmw_du_connector_fill_modes(struct drm_connector *connector, uint32_t max_width, uint32_t max_height)2399 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2400 uint32_t max_width, uint32_t max_height)
2401 {
2402 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2403 struct drm_device *dev = connector->dev;
2404 struct vmw_private *dev_priv = vmw_priv(dev);
2405 struct drm_display_mode *mode = NULL;
2406 struct drm_display_mode *bmode;
2407 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2408 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2409 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2410 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2411 };
2412 int i;
2413 u32 assumed_bpp = 4;
2414
2415 if (dev_priv->assume_16bpp)
2416 assumed_bpp = 2;
2417
2418 max_width = min(max_width, dev_priv->texture_max_width);
2419 max_height = min(max_height, dev_priv->texture_max_height);
2420
2421 /*
2422 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2423 * HEIGHT registers.
2424 */
2425 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2426 max_width = min(max_width, dev_priv->stdu_max_width);
2427 max_height = min(max_height, dev_priv->stdu_max_height);
2428 }
2429
2430 /* Add preferred mode */
2431 mode = drm_mode_duplicate(dev, &prefmode);
2432 if (!mode)
2433 return 0;
2434 mode->hdisplay = du->pref_width;
2435 mode->vdisplay = du->pref_height;
2436 vmw_guess_mode_timing(mode);
2437 drm_mode_set_name(mode);
2438
2439 if (vmw_kms_validate_mode_vram(dev_priv,
2440 mode->hdisplay * assumed_bpp,
2441 mode->vdisplay)) {
2442 drm_mode_probed_add(connector, mode);
2443 } else {
2444 drm_mode_destroy(dev, mode);
2445 mode = NULL;
2446 }
2447
2448 if (du->pref_mode) {
2449 list_del_init(&du->pref_mode->head);
2450 drm_mode_destroy(dev, du->pref_mode);
2451 }
2452
2453 /* mode might be null here, this is intended */
2454 du->pref_mode = mode;
2455
2456 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2457 bmode = &vmw_kms_connector_builtin[i];
2458 if (bmode->hdisplay > max_width ||
2459 bmode->vdisplay > max_height)
2460 continue;
2461
2462 if (!vmw_kms_validate_mode_vram(dev_priv,
2463 bmode->hdisplay * assumed_bpp,
2464 bmode->vdisplay))
2465 continue;
2466
2467 mode = drm_mode_duplicate(dev, bmode);
2468 if (!mode)
2469 return 0;
2470
2471 drm_mode_probed_add(connector, mode);
2472 }
2473
2474 drm_connector_list_update(connector);
2475 /* Move the prefered mode first, help apps pick the right mode. */
2476 drm_mode_sort(&connector->modes);
2477
2478 return 1;
2479 }
2480
2481 /**
2482 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2483 * @dev: drm device for the ioctl
2484 * @data: data pointer for the ioctl
2485 * @file_priv: drm file for the ioctl call
2486 *
2487 * Update preferred topology of display unit as per ioctl request. The topology
2488 * is expressed as array of drm_vmw_rect.
2489 * e.g.
2490 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2491 *
2492 * NOTE:
2493 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2494 * device limit on topology, x + w and y + h (lower right) cannot be greater
2495 * than INT_MAX. So topology beyond these limits will return with error.
2496 *
2497 * Returns:
2498 * Zero on success, negative errno on failure.
2499 */
vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)2500 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2501 struct drm_file *file_priv)
2502 {
2503 struct vmw_private *dev_priv = vmw_priv(dev);
2504 struct drm_mode_config *mode_config = &dev->mode_config;
2505 struct drm_vmw_update_layout_arg *arg =
2506 (struct drm_vmw_update_layout_arg *)data;
2507 void __user *user_rects;
2508 struct drm_vmw_rect *rects;
2509 struct drm_rect *drm_rects;
2510 unsigned rects_size;
2511 int ret, i;
2512
2513 if (!arg->num_outputs) {
2514 struct drm_rect def_rect = {0, 0,
2515 VMWGFX_MIN_INITIAL_WIDTH,
2516 VMWGFX_MIN_INITIAL_HEIGHT};
2517 vmw_du_update_layout(dev_priv, 1, &def_rect);
2518 return 0;
2519 }
2520
2521 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2522 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2523 GFP_KERNEL);
2524 if (unlikely(!rects))
2525 return -ENOMEM;
2526
2527 user_rects = (void __user *)(unsigned long)arg->rects;
2528 ret = copy_from_user(rects, user_rects, rects_size);
2529 if (unlikely(ret != 0)) {
2530 DRM_ERROR("Failed to get rects.\n");
2531 ret = -EFAULT;
2532 goto out_free;
2533 }
2534
2535 drm_rects = (struct drm_rect *)rects;
2536
2537 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2538 for (i = 0; i < arg->num_outputs; i++) {
2539 struct drm_vmw_rect curr_rect;
2540
2541 /* Verify user-space for overflow as kernel use drm_rect */
2542 if ((rects[i].x + rects[i].w > INT_MAX) ||
2543 (rects[i].y + rects[i].h > INT_MAX)) {
2544 ret = -ERANGE;
2545 goto out_free;
2546 }
2547
2548 curr_rect = rects[i];
2549 drm_rects[i].x1 = curr_rect.x;
2550 drm_rects[i].y1 = curr_rect.y;
2551 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2552 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2553
2554 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2555 drm_rects[i].x1, drm_rects[i].y1,
2556 drm_rects[i].x2, drm_rects[i].y2);
2557
2558 /*
2559 * Currently this check is limiting the topology within
2560 * mode_config->max (which actually is max texture size
2561 * supported by virtual device). This limit is here to address
2562 * window managers that create a big framebuffer for whole
2563 * topology.
2564 */
2565 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2566 drm_rects[i].x2 > mode_config->max_width ||
2567 drm_rects[i].y2 > mode_config->max_height) {
2568 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2569 drm_rects[i].x1, drm_rects[i].y1,
2570 drm_rects[i].x2, drm_rects[i].y2);
2571 ret = -EINVAL;
2572 goto out_free;
2573 }
2574 }
2575
2576 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2577
2578 if (ret == 0)
2579 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2580
2581 out_free:
2582 kfree(rects);
2583 return ret;
2584 }
2585
2586 /**
2587 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2588 * on a set of cliprects and a set of display units.
2589 *
2590 * @dev_priv: Pointer to a device private structure.
2591 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2592 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2593 * Cliprects are given in framebuffer coordinates.
2594 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2595 * be NULL. Cliprects are given in source coordinates.
2596 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2597 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2598 * @num_clips: Number of cliprects in the @clips or @vclips array.
2599 * @increment: Integer with which to increment the clip counter when looping.
2600 * Used to skip a predetermined number of clip rects.
2601 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2602 */
vmw_kms_helper_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer, const struct drm_clip_rect *clips, const struct drm_vmw_rect *vclips, s32 dest_x, s32 dest_y, int num_clips, int increment, struct vmw_kms_dirty *dirty)2603 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2604 struct vmw_framebuffer *framebuffer,
2605 const struct drm_clip_rect *clips,
2606 const struct drm_vmw_rect *vclips,
2607 s32 dest_x, s32 dest_y,
2608 int num_clips,
2609 int increment,
2610 struct vmw_kms_dirty *dirty)
2611 {
2612 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2613 struct drm_crtc *crtc;
2614 u32 num_units = 0;
2615 u32 i, k;
2616
2617 dirty->dev_priv = dev_priv;
2618
2619 /* If crtc is passed, no need to iterate over other display units */
2620 if (dirty->crtc) {
2621 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2622 } else {
2623 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2624 head) {
2625 struct drm_plane *plane = crtc->primary;
2626
2627 if (plane->state->fb == &framebuffer->base)
2628 units[num_units++] = vmw_crtc_to_du(crtc);
2629 }
2630 }
2631
2632 for (k = 0; k < num_units; k++) {
2633 struct vmw_display_unit *unit = units[k];
2634 s32 crtc_x = unit->crtc.x;
2635 s32 crtc_y = unit->crtc.y;
2636 s32 crtc_width = unit->crtc.mode.hdisplay;
2637 s32 crtc_height = unit->crtc.mode.vdisplay;
2638 const struct drm_clip_rect *clips_ptr = clips;
2639 const struct drm_vmw_rect *vclips_ptr = vclips;
2640
2641 dirty->unit = unit;
2642 if (dirty->fifo_reserve_size > 0) {
2643 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2644 dirty->fifo_reserve_size);
2645 if (!dirty->cmd)
2646 return -ENOMEM;
2647
2648 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2649 }
2650 dirty->num_hits = 0;
2651 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2652 vclips_ptr += increment) {
2653 s32 clip_left;
2654 s32 clip_top;
2655
2656 /*
2657 * Select clip array type. Note that integer type
2658 * in @clips is unsigned short, whereas in @vclips
2659 * it's 32-bit.
2660 */
2661 if (clips) {
2662 dirty->fb_x = (s32) clips_ptr->x1;
2663 dirty->fb_y = (s32) clips_ptr->y1;
2664 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2665 crtc_x;
2666 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2667 crtc_y;
2668 } else {
2669 dirty->fb_x = vclips_ptr->x;
2670 dirty->fb_y = vclips_ptr->y;
2671 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2672 dest_x - crtc_x;
2673 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2674 dest_y - crtc_y;
2675 }
2676
2677 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2678 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2679
2680 /* Skip this clip if it's outside the crtc region */
2681 if (dirty->unit_x1 >= crtc_width ||
2682 dirty->unit_y1 >= crtc_height ||
2683 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2684 continue;
2685
2686 /* Clip right and bottom to crtc limits */
2687 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2688 crtc_width);
2689 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2690 crtc_height);
2691
2692 /* Clip left and top to crtc limits */
2693 clip_left = min_t(s32, dirty->unit_x1, 0);
2694 clip_top = min_t(s32, dirty->unit_y1, 0);
2695 dirty->unit_x1 -= clip_left;
2696 dirty->unit_y1 -= clip_top;
2697 dirty->fb_x -= clip_left;
2698 dirty->fb_y -= clip_top;
2699
2700 dirty->clip(dirty);
2701 }
2702
2703 dirty->fifo_commit(dirty);
2704 }
2705
2706 return 0;
2707 }
2708
2709 /**
2710 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2711 * cleanup and fencing
2712 * @dev_priv: Pointer to the device-private struct
2713 * @file_priv: Pointer identifying the client when user-space fencing is used
2714 * @ctx: Pointer to the validation context
2715 * @out_fence: If non-NULL, returned refcounted fence-pointer
2716 * @user_fence_rep: If non-NULL, pointer to user-space address area
2717 * in which to copy user-space fence info
2718 */
vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_validation_context *ctx, struct vmw_fence_obj **out_fence, struct drm_vmw_fence_rep __user * user_fence_rep)2719 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2720 struct drm_file *file_priv,
2721 struct vmw_validation_context *ctx,
2722 struct vmw_fence_obj **out_fence,
2723 struct drm_vmw_fence_rep __user *
2724 user_fence_rep)
2725 {
2726 struct vmw_fence_obj *fence = NULL;
2727 uint32_t handle = 0;
2728 int ret = 0;
2729
2730 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2731 out_fence)
2732 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2733 file_priv ? &handle : NULL);
2734 vmw_validation_done(ctx, fence);
2735 if (file_priv)
2736 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2737 ret, user_fence_rep, fence,
2738 handle, -1);
2739 if (out_fence)
2740 *out_fence = fence;
2741 else
2742 vmw_fence_obj_unreference(&fence);
2743 }
2744
2745 /**
2746 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2747 * its backing MOB.
2748 *
2749 * @res: Pointer to the surface resource
2750 * @clips: Clip rects in framebuffer (surface) space.
2751 * @num_clips: Number of clips in @clips.
2752 * @increment: Integer with which to increment the clip counter when looping.
2753 * Used to skip a predetermined number of clip rects.
2754 *
2755 * This function makes sure the proxy surface is updated from its backing MOB
2756 * using the region given by @clips. The surface resource @res and its backing
2757 * MOB needs to be reserved and validated on call.
2758 */
vmw_kms_update_proxy(struct vmw_resource *res, const struct drm_clip_rect *clips, unsigned num_clips, int increment)2759 int vmw_kms_update_proxy(struct vmw_resource *res,
2760 const struct drm_clip_rect *clips,
2761 unsigned num_clips,
2762 int increment)
2763 {
2764 struct vmw_private *dev_priv = res->dev_priv;
2765 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2766 struct {
2767 SVGA3dCmdHeader header;
2768 SVGA3dCmdUpdateGBImage body;
2769 } *cmd;
2770 SVGA3dBox *box;
2771 size_t copy_size = 0;
2772 int i;
2773
2774 if (!clips)
2775 return 0;
2776
2777 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2778 if (!cmd)
2779 return -ENOMEM;
2780
2781 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2782 box = &cmd->body.box;
2783
2784 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2785 cmd->header.size = sizeof(cmd->body);
2786 cmd->body.image.sid = res->id;
2787 cmd->body.image.face = 0;
2788 cmd->body.image.mipmap = 0;
2789
2790 if (clips->x1 > size->width || clips->x2 > size->width ||
2791 clips->y1 > size->height || clips->y2 > size->height) {
2792 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2793 return -EINVAL;
2794 }
2795
2796 box->x = clips->x1;
2797 box->y = clips->y1;
2798 box->z = 0;
2799 box->w = clips->x2 - clips->x1;
2800 box->h = clips->y2 - clips->y1;
2801 box->d = 1;
2802
2803 copy_size += sizeof(*cmd);
2804 }
2805
2806 vmw_cmd_commit(dev_priv, copy_size);
2807
2808 return 0;
2809 }
2810
2811 /**
2812 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2813 * property.
2814 *
2815 * @dev_priv: Pointer to a device private struct.
2816 *
2817 * Sets up the implicit placement property unless it's already set up.
2818 */
2819 void
vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)2820 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2821 {
2822 if (dev_priv->implicit_placement_property)
2823 return;
2824
2825 dev_priv->implicit_placement_property =
2826 drm_property_create_range(&dev_priv->drm,
2827 DRM_MODE_PROP_IMMUTABLE,
2828 "implicit_placement", 0, 1);
2829 }
2830
2831 /**
2832 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2833 *
2834 * @dev: Pointer to the drm device
2835 * Return: 0 on success. Negative error code on failure.
2836 */
vmw_kms_suspend(struct drm_device *dev)2837 int vmw_kms_suspend(struct drm_device *dev)
2838 {
2839 struct vmw_private *dev_priv = vmw_priv(dev);
2840
2841 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2842 if (IS_ERR(dev_priv->suspend_state)) {
2843 int ret = PTR_ERR(dev_priv->suspend_state);
2844
2845 DRM_ERROR("Failed kms suspend: %d\n", ret);
2846 dev_priv->suspend_state = NULL;
2847
2848 return ret;
2849 }
2850
2851 return 0;
2852 }
2853
2854
2855 /**
2856 * vmw_kms_resume - Re-enable modesetting and restore state
2857 *
2858 * @dev: Pointer to the drm device
2859 * Return: 0 on success. Negative error code on failure.
2860 *
2861 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2862 * to call this function without a previous vmw_kms_suspend().
2863 */
vmw_kms_resume(struct drm_device *dev)2864 int vmw_kms_resume(struct drm_device *dev)
2865 {
2866 struct vmw_private *dev_priv = vmw_priv(dev);
2867 int ret;
2868
2869 if (WARN_ON(!dev_priv->suspend_state))
2870 return 0;
2871
2872 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2873 dev_priv->suspend_state = NULL;
2874
2875 return ret;
2876 }
2877
2878 /**
2879 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2880 *
2881 * @dev: Pointer to the drm device
2882 */
vmw_kms_lost_device(struct drm_device *dev)2883 void vmw_kms_lost_device(struct drm_device *dev)
2884 {
2885 drm_atomic_helper_shutdown(dev);
2886 }
2887
2888 /**
2889 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2890 * @update: The closure structure.
2891 *
2892 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2893 * update on display unit.
2894 *
2895 * Return: 0 on success or a negative error code on failure.
2896 */
vmw_du_helper_plane_update(struct vmw_du_update_plane *update)2897 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2898 {
2899 struct drm_plane_state *state = update->plane->state;
2900 struct drm_plane_state *old_state = update->old_state;
2901 struct drm_atomic_helper_damage_iter iter;
2902 struct drm_rect clip;
2903 struct drm_rect bb;
2904 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2905 uint32_t reserved_size = 0;
2906 uint32_t submit_size = 0;
2907 uint32_t curr_size = 0;
2908 uint32_t num_hits = 0;
2909 void *cmd_start;
2910 char *cmd_next;
2911 int ret;
2912
2913 /*
2914 * Iterate in advance to check if really need plane update and find the
2915 * number of clips that actually are in plane src for fifo allocation.
2916 */
2917 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2918 drm_atomic_for_each_plane_damage(&iter, &clip)
2919 num_hits++;
2920
2921 if (num_hits == 0)
2922 return 0;
2923
2924 if (update->vfb->bo) {
2925 struct vmw_framebuffer_bo *vfbbo =
2926 container_of(update->vfb, typeof(*vfbbo), base);
2927
2928 /*
2929 * For screen targets we want a mappable bo, for everything else we want
2930 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2931 * is not screen target then mob's shouldn't be available.
2932 */
2933 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2934 vmw_bo_placement_set(vfbbo->buffer,
2935 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2936 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2937 } else {
2938 WARN_ON(update->dev_priv->has_mob);
2939 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2940 }
2941 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2942 } else {
2943 struct vmw_framebuffer_surface *vfbs =
2944 container_of(update->vfb, typeof(*vfbs), base);
2945
2946 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2947 0, VMW_RES_DIRTY_NONE, NULL,
2948 NULL);
2949 }
2950
2951 if (ret)
2952 return ret;
2953
2954 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2955 if (ret)
2956 goto out_unref;
2957
2958 reserved_size = update->calc_fifo_size(update, num_hits);
2959 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2960 if (!cmd_start) {
2961 ret = -ENOMEM;
2962 goto out_revert;
2963 }
2964
2965 cmd_next = cmd_start;
2966
2967 if (update->post_prepare) {
2968 curr_size = update->post_prepare(update, cmd_next);
2969 cmd_next += curr_size;
2970 submit_size += curr_size;
2971 }
2972
2973 if (update->pre_clip) {
2974 curr_size = update->pre_clip(update, cmd_next, num_hits);
2975 cmd_next += curr_size;
2976 submit_size += curr_size;
2977 }
2978
2979 bb.x1 = INT_MAX;
2980 bb.y1 = INT_MAX;
2981 bb.x2 = INT_MIN;
2982 bb.y2 = INT_MIN;
2983
2984 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2985 drm_atomic_for_each_plane_damage(&iter, &clip) {
2986 uint32_t fb_x = clip.x1;
2987 uint32_t fb_y = clip.y1;
2988
2989 vmw_du_translate_to_crtc(state, &clip);
2990 if (update->clip) {
2991 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2992 fb_y);
2993 cmd_next += curr_size;
2994 submit_size += curr_size;
2995 }
2996 bb.x1 = min_t(int, bb.x1, clip.x1);
2997 bb.y1 = min_t(int, bb.y1, clip.y1);
2998 bb.x2 = max_t(int, bb.x2, clip.x2);
2999 bb.y2 = max_t(int, bb.y2, clip.y2);
3000 }
3001
3002 curr_size = update->post_clip(update, cmd_next, &bb);
3003 submit_size += curr_size;
3004
3005 if (reserved_size < submit_size)
3006 submit_size = 0;
3007
3008 vmw_cmd_commit(update->dev_priv, submit_size);
3009
3010 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3011 update->out_fence, NULL);
3012 return ret;
3013
3014 out_revert:
3015 vmw_validation_revert(&val_ctx);
3016
3017 out_unref:
3018 vmw_validation_unref_lists(&val_ctx);
3019 return ret;
3020 }
3021