Lines Matching refs:backup
42 struct vmw_buffer_object *backup = res->backup;
43 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
45 dma_resv_assert_held(res->backup->base.base.resv);
59 rb_insert_color(&res->mob_node, &backup->res_tree);
61 vmw_bo_prio_add(backup, res->used_prio);
70 struct vmw_buffer_object *backup = res->backup;
72 dma_resv_assert_held(backup->base.base.resv);
74 rb_erase(&res->mob_node, &backup->res_tree);
76 vmw_bo_prio_del(backup, res->used_prio);
122 if (res->backup) {
123 struct ttm_buffer_object *bo = &res->backup->base;
139 vmw_bo_dirty_release(res->backup);
141 vmw_bo_unreference(&res->backup);
225 res->backup = NULL;
348 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
350 * @res: The resource for which to allocate a backup buffer.
359 struct vmw_buffer_object *backup;
362 if (likely(res->backup)) {
363 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
367 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
368 if (unlikely(!backup))
371 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
378 res->backup = backup;
390 * containing backup data if a bind operation is needed.
420 * Handle the case where the backup mob is marked coherent but
425 if (res->backup->dirty && !res->dirty) {
429 } else if (!res->backup->dirty && res->dirty) {
445 vmw_bo_dirty_unmap(res->backup, start, end);
468 * @new_backup: Pointer to new backup buffer if command submission
470 * @new_backup_offset: New backup offset if @switch_backup is true.
487 if (switch_backup && new_backup != res->backup) {
488 if (res->backup) {
491 vmw_bo_dirty_release(res->backup);
492 vmw_bo_unreference(&res->backup);
496 res->backup = vmw_bo_reference(new_backup);
506 res->backup = NULL;
509 vmw_bo_dirty_release(res->backup);
528 * vmw_resource_check_buffer - Check whether a backup buffer is needed
533 * @res: The resource for which to allocate a backup buffer.
537 * reserved and validated backup buffer.
550 if (unlikely(res->backup == NULL)) {
557 ttm_bo_get(&res->backup->base);
558 val_buf->bo = &res->backup->base;
569 ret = ttm_bo_validate(&res->backup->base,
584 vmw_bo_unreference(&res->backup);
595 * a backup buffer is present for guest-backed resources. However,
609 if (res->func->needs_backup && res->backup == NULL &&
613 DRM_ERROR("Failed to allocate a backup buffer "
625 * backup buffer
648 * to a backup buffer.
693 * On succesful return, any backup DMA buffer pointed to by @res->backup will
716 if (res->backup)
717 val_buf.bo = &res->backup->base;
739 /* Trylock backup buffers with a NULL ticket. */
757 else if (!res->func->needs_backup && res->backup) {
759 vmw_bo_unreference(&res->backup);
774 * Evicts the Guest Backed hardware resource if the backup
778 * both require the backup buffer to be reserved.
896 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
935 /* Wait lock backup buffers with a ticket. */
1001 if (res->backup) {
1002 vbo = res->backup;
1056 if (--res->pin_count == 0 && res->backup) {
1057 struct vmw_buffer_object *vbo = res->backup;