Lines Matching refs:vma

59 	struct vm_area_struct *vma;
63 static void drm_vm_open(struct vm_area_struct *vma);
64 static void drm_vm_close(struct vm_area_struct *vma);
67 struct vm_area_struct *vma)
69 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
78 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
79 vma->vm_start))
89 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
91 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
102 * \param vma virtual memory area.
112 struct vm_area_struct *vma = vmf->vma;
113 struct drm_file *priv = vma->vm_file->private_data;
128 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
139 resource_size_t offset = vmf->address - vma->vm_start;
192 * \param vma virtual memory area.
201 struct vm_area_struct *vma = vmf->vma;
202 struct drm_local_map *map = vma->vm_private_data;
210 offset = vmf->address - vma->vm_start;
225 * \param vma virtual memory area.
230 static void drm_vm_shm_close(struct vm_area_struct *vma)
232 struct drm_file *priv = vma->vm_file->private_data;
240 vma->vm_start, vma->vm_end - vma->vm_start);
242 map = vma->vm_private_data;
246 if (pt->vma->vm_private_data == map)
248 if (pt->vma == vma) {
301 struct vm_area_struct *vma = vmf->vma;
302 struct drm_file *priv = vma->vm_file->private_data;
314 offset = vmf->address - vma->vm_start;
336 struct vm_area_struct *vma = vmf->vma;
337 struct drm_local_map *map = vma->vm_private_data;
338 struct drm_file *priv = vma->vm_file->private_data;
351 offset = vmf->address - vma->vm_start;
390 struct vm_area_struct *vma)
395 vma->vm_start, vma->vm_end - vma->vm_start);
399 vma_entry->vma = vma;
405 static void drm_vm_open(struct vm_area_struct *vma)
407 struct drm_file *priv = vma->vm_file->private_data;
411 drm_vm_open_locked(dev, vma);
416 struct vm_area_struct *vma)
421 vma->vm_start, vma->vm_end - vma->vm_start);
424 if (pt->vma == vma) {
435 * \param vma virtual memory area.
437 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
440 static void drm_vm_close(struct vm_area_struct *vma)
442 struct drm_file *priv = vma->vm_file->private_data;
446 drm_vm_close_locked(dev, vma);
454 * \param vma virtual memory area.
460 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
465 unsigned long length = vma->vm_end - vma->vm_start;
470 vma->vm_start, vma->vm_end, vma->vm_pgoff);
479 vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
481 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
486 vma->vm_page_prot =
489 (__pte(pgprot_val(vma->vm_page_prot)))));
493 vma->vm_ops = &drm_vm_dma_ops;
495 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
497 drm_vm_open_locked(dev, vma);
514 * \param vma virtual memory area.
523 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
532 vma->vm_start, vma->vm_end, vma->vm_pgoff);
541 if (!vma->vm_pgoff
547 return drm_mmap_dma(filp, vma);
549 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
559 if (map->size < vma->vm_end - vma->vm_start)
563 vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
565 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
570 vma->vm_page_prot =
573 (__pte(pgprot_val(vma->vm_page_prot)))));
587 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
589 vma->vm_ops = &drm_vm_ops;
597 vma->vm_page_prot = drm_io_prot(map, vma);
598 if (io_remap_pfn_range(vma, vma->vm_start,
600 vma->vm_end - vma->vm_start,
601 vma->vm_page_prot))
606 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
608 vma->vm_ops = &drm_vm_ops;
613 if (remap_pfn_range(vma, vma->vm_start,
615 vma->vm_end - vma->vm_start, vma->vm_page_prot))
617 vma->vm_page_prot = drm_dma_prot(map->type, vma);
620 vma->vm_ops = &drm_vm_shm_ops;
621 vma->vm_private_data = (void *)map;
624 vma->vm_ops = &drm_vm_sg_ops;
625 vma->vm_private_data = (void *)map;
626 vma->vm_page_prot = drm_dma_prot(map->type, vma);
631 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
633 drm_vm_open_locked(dev, vma);
637 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
647 ret = drm_mmap_locked(filp, vma);
657 struct drm_vma_entry *vma, *vma_temp;
659 /* Clear vma list (only needed for legacy drivers) */
660 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
661 list_del(&vma->head);
662 kfree(vma);