Lines Matching refs:qdev

35 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
186 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
192 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
194 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
198 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
204 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
206 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
209 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
211 if (!qxl_check_idle(qdev->release_ring)) {
212 schedule_work(&qdev->gc_work);
214 flush_work(&qdev->gc_work);
220 int qxl_garbage_collect(struct qxl_device *qdev)
227 while (qxl_ring_pop(qdev->release_ring, &id)) {
230 release = qxl_release_from_id_locked(qdev, id);
234 info = qxl_release_map(qdev, release);
236 qxl_release_unmap(qdev, release, info);
252 qxl_release_free(qdev, release);
262 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
270 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
287 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
290 long addr = qdev->io_base + port;
293 mutex_lock(&qdev->async_io_mutex);
294 irq_num = atomic_read(&qdev->irq_received_io_cmd);
295 if (qdev->last_sent_io_cmd > irq_num) {
297 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
298 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
300 ret = wait_event_timeout(qdev->io_cmd_event,
301 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 irq_num = atomic_read(&qdev->irq_received_io_cmd);
308 qdev->last_sent_io_cmd = irq_num + 1;
310 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
311 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
313 ret = wait_event_timeout(qdev->io_cmd_event,
314 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
318 mutex_unlock(&qdev->async_io_mutex);
322 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
327 ret = wait_for_io_cmd_user(qdev, val, port, false);
332 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
353 mutex_lock(&qdev->update_area_mutex);
354 qdev->ram_header->update_area = *area;
355 qdev->ram_header->update_surface = surface_id;
356 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
357 mutex_unlock(&qdev->update_area_mutex);
361 void qxl_io_notify_oom(struct qxl_device *qdev)
363 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
366 void qxl_io_flush_release(struct qxl_device *qdev)
368 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
371 void qxl_io_flush_surfaces(struct qxl_device *qdev)
373 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
376 void qxl_io_destroy_primary(struct qxl_device *qdev)
378 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
379 qdev->primary_bo->is_primary = false;
380 drm_gem_object_put(&qdev->primary_bo->tbo.base);
381 qdev->primary_bo = NULL;
384 void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
388 if (WARN_ON(qdev->primary_bo))
391 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
392 create = &qdev->ram_header->create_surface;
397 create->mem = qxl_bo_physical_address(qdev, bo, 0);
404 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
405 qdev->primary_bo = bo;
406 qdev->primary_bo->is_primary = true;
407 drm_gem_object_get(&qdev->primary_bo->tbo.base);
410 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
413 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
416 void qxl_io_reset(struct qxl_device *qdev)
418 outb(0, qdev->io_base + QXL_IO_RESET);
421 void qxl_io_monitors_config(struct qxl_device *qdev)
423 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
426 int qxl_surface_id_alloc(struct qxl_device *qdev,
434 spin_lock(&qdev->surf_id_idr_lock);
435 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
436 spin_unlock(&qdev->surf_id_idr_lock);
442 if (handle >= qdev->rom->n_surfaces) {
444 spin_lock(&qdev->surf_id_idr_lock);
445 idr_remove(&qdev->surf_id_idr, handle);
446 spin_unlock(&qdev->surf_id_idr_lock);
447 qxl_reap_surface_id(qdev, 2);
452 spin_lock(&qdev->surf_id_idr_lock);
453 qdev->last_alloced_surf_id = handle;
454 spin_unlock(&qdev->surf_id_idr_lock);
458 void qxl_surface_id_dealloc(struct qxl_device *qdev,
461 spin_lock(&qdev->surf_id_idr_lock);
462 idr_remove(&qdev->surf_id_idr, surface_id);
463 spin_unlock(&qdev->surf_id_idr_lock);
466 int qxl_hw_surface_alloc(struct qxl_device *qdev,
476 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
484 qxl_release_free(qdev, release);
487 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
494 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
496 qxl_release_unmap(qdev, release, &cmd->release_info);
504 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
507 spin_lock(&qdev->surf_id_idr_lock);
508 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
509 spin_unlock(&qdev->surf_id_idr_lock);
513 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
524 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
532 spin_lock(&qdev->surf_id_idr_lock);
533 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
534 spin_unlock(&qdev->surf_id_idr_lock);
541 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
544 qxl_release_unmap(qdev, release, &cmd->release_info);
547 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
552 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
564 ret = qxl_io_update_area(qdev, surf, &rect);
570 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
574 qxl_update_surface(qdev, surf);
577 qxl_hw_surface_dealloc(qdev, surf);
580 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
582 mutex_lock(&qdev->surf_evict_mutex);
583 qxl_surface_evict_locked(qdev, surf, do_update_area);
584 mutex_unlock(&qdev->surf_evict_mutex);
587 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
596 mutex_unlock(&qdev->surf_evict_mutex);
601 mutex_lock(&qdev->surf_evict_mutex);
607 qxl_surface_evict_locked(qdev, surf, true);
612 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
619 mutex_lock(&qdev->surf_evict_mutex);
622 spin_lock(&qdev->surf_id_idr_lock);
623 start = qdev->last_alloced_surf_id + 1;
624 spin_unlock(&qdev->surf_id_idr_lock);
626 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
628 int surfid = i % qdev->rom->n_surfaces;
633 spin_lock(&qdev->surf_id_idr_lock);
634 objptr = idr_find(&qdev->surf_id_idr, surfid);
635 spin_unlock(&qdev->surf_id_idr_lock);
640 ret = qxl_reap_surf(qdev, objptr, stall);
651 mutex_unlock(&qdev->surf_evict_mutex);
654 qxl_queue_garbage_collect(qdev, true);