Lines Matching defs:par
76 struct vmw_fb_par *par = info->par;
77 u32 *pal = par->pseudo_palette;
84 switch (par->set_fb->format->depth) {
93 par->set_fb->format->depth,
94 par->set_fb->format->cpp[0] * 8);
105 struct vmw_fb_par *par = info->par;
106 struct vmw_private *vmw_priv = par->vmw_priv;
143 if ((var->xoffset + var->xres) > par->max_width ||
144 (var->yoffset + var->yres) > par->max_height) {
173 * off during hibernation using the par->dirty.active bool.
177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
179 struct vmw_private *vmw_priv = par->vmw_priv;
187 struct vmw_buffer_object *vbo = par->vmw_bo;
190 if (!READ_ONCE(par->dirty.active))
193 mutex_lock(&par->bo_mutex);
194 cur_fb = par->set_fb;
204 spin_lock_irqsave(&par->dirty.lock, irq_flags);
205 if (!par->dirty.active) {
206 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
215 max_x = par->fb_x + cur_fb->width;
216 max_y = par->fb_y + cur_fb->height;
218 dst_x1 = par->dirty.x1 - par->fb_x;
219 dst_y1 = par->dirty.y1 - par->fb_y;
223 dst_x2 = par->dirty.x2 - par->fb_x;
224 dst_y2 = par->dirty.y2 - par->fb_y;
232 par->dirty.x1 = par->dirty.x2 = 0;
233 par->dirty.y1 = par->dirty.y2 = 0;
234 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
238 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239 src_ptr = (u8 *)par->vmalloc +
240 ((dst_y1 + par->fb_y) * info->fix.line_length +
241 (dst_x1 + par->fb_x) * cpp);
245 dst_ptr += par->set_fb->pitches[0];
259 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
264 mutex_unlock(&par->bo_mutex);
267 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
275 spin_lock_irqsave(&par->dirty.lock, flags);
276 if (par->dirty.x1 == par->dirty.x2) {
277 par->dirty.x1 = x1;
278 par->dirty.y1 = y1;
279 par->dirty.x2 = x2;
280 par->dirty.y2 = y2;
283 if (par->dirty.active)
284 schedule_delayed_work(&par->local_work,
287 if (x1 < par->dirty.x1)
288 par->dirty.x1 = x1;
289 if (y1 < par->dirty.y1)
290 par->dirty.y1 = y1;
291 if (x2 > par->dirty.x2)
292 par->dirty.x2 = x2;
293 if (y2 > par->dirty.y2)
294 par->dirty.y2 = y2;
296 spin_unlock_irqrestore(&par->dirty.lock, flags);
302 struct vmw_fb_par *par = info->par;
310 mutex_lock(&par->bo_mutex);
311 par->fb_x = var->xoffset;
312 par->fb_y = var->yoffset;
313 if (par->set_fb)
314 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315 par->set_fb->height);
316 mutex_unlock(&par->bo_mutex);
324 struct vmw_fb_par *par = info->par;
343 spin_lock_irqsave(&par->dirty.lock, flags);
344 par->dirty.x1 = 0;
345 par->dirty.y1 = y1;
346 par->dirty.x2 = info->var.xres;
347 par->dirty.y2 = y2;
348 spin_unlock_irqrestore(&par->dirty.lock, flags);
354 cancel_delayed_work(&par->local_work);
355 schedule_delayed_work(&par->local_work, 0);
371 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
378 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
385 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
461 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
465 struct drm_framebuffer *cur_fb = par->set_fb;
469 if (par->set_mode) {
472 set.crtc = par->crtc;
478 set.connectors = &par->con;
484 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
485 par->set_mode = NULL;
490 par->set_fb = NULL;
493 if (par->vmw_bo && detach_bo && unref_bo)
494 vmw_bo_unreference(&par->vmw_bo);
502 struct vmw_fb_par *par = info->par;
519 cur_fb = par->set_fb;
528 ret = vmw_fb_kms_detach(par,
529 par->bo_size < new_bo_size ||
530 par->bo_size > 2*new_bo_size,
535 if (!par->vmw_bo) {
536 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
537 &par->vmw_bo);
543 par->bo_size = new_bo_size;
546 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
551 par->set_fb = &vfb->base;
558 struct vmw_fb_par *par = info->par;
559 struct vmw_private *vmw_priv = par->vmw_priv;
588 mutex_lock(&par->bo_mutex);
593 par->fb_x = var->xoffset;
594 par->fb_y = var->yoffset;
596 set.crtc = par->crtc;
600 set.fb = par->set_fb;
602 set.connectors = &par->con;
608 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
609 par->set_fb->width, par->set_fb->height);
614 schedule_delayed_work(&par->local_work, 0);
617 if (par->set_mode)
618 drm_mode_destroy(vmw_priv->dev, par->set_mode);
619 par->set_mode = mode;
621 mutex_unlock(&par->bo_mutex);
642 struct vmw_fb_par *par;
658 info = framebuffer_alloc(sizeof(*par), device);
666 par = info->par;
667 memset(par, 0, sizeof(*par));
668 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
669 par->vmw_priv = vmw_priv;
670 par->vmalloc = NULL;
671 par->max_width = fb_width;
672 par->max_height = fb_height;
674 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
675 par->max_height, &par->con,
676 &par->crtc, &init_mode);
686 par->vmalloc = vzalloc(fb_size);
687 if (unlikely(par->vmalloc == NULL)) {
708 info->pseudo_palette = par->pseudo_palette;
709 info->screen_base = (char __iomem *)par->vmalloc;
745 par->dirty.x1 = par->dirty.x2 = 0;
746 par->dirty.y1 = par->dirty.y2 = 0;
747 par->dirty.active = true;
748 spin_lock_init(&par->dirty.lock);
749 mutex_init(&par->bo_mutex);
765 vfree(par->vmalloc);
776 struct vmw_fb_par *par;
782 par = info->par;
786 cancel_delayed_work_sync(&par->local_work);
789 mutex_lock(&par->bo_mutex);
790 (void) vmw_fb_kms_detach(par, true, true);
791 mutex_unlock(&par->bo_mutex);
793 vfree(par->vmalloc);
802 struct vmw_fb_par *par;
809 par = info->par;
811 spin_lock_irqsave(&par->dirty.lock, flags);
812 par->dirty.active = false;
813 spin_unlock_irqrestore(&par->dirty.lock, flags);
816 flush_delayed_work(&par->local_work);
824 struct vmw_fb_par *par;
831 par = info->par;
833 spin_lock_irqsave(&par->dirty.lock, flags);
834 par->dirty.active = true;
835 spin_unlock_irqrestore(&par->dirty.lock, flags);
842 schedule_delayed_work(&par->local_work, 0);