18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright (C) 2015-2018 Etnaviv Project
48c2ecf20Sopenharmony_ci */
58c2ecf20Sopenharmony_ci
68c2ecf20Sopenharmony_ci#include <drm/drm_prime.h>
78c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
88c2ecf20Sopenharmony_ci#include <linux/shmem_fs.h>
98c2ecf20Sopenharmony_ci#include <linux/spinlock.h>
108c2ecf20Sopenharmony_ci#include <linux/vmalloc.h>
118c2ecf20Sopenharmony_ci
128c2ecf20Sopenharmony_ci#include "etnaviv_drv.h"
138c2ecf20Sopenharmony_ci#include "etnaviv_gem.h"
148c2ecf20Sopenharmony_ci#include "etnaviv_gpu.h"
158c2ecf20Sopenharmony_ci#include "etnaviv_mmu.h"
168c2ecf20Sopenharmony_ci
178c2ecf20Sopenharmony_cistatic struct lock_class_key etnaviv_shm_lock_class;
188c2ecf20Sopenharmony_cistatic struct lock_class_key etnaviv_userptr_lock_class;
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_cistatic void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
218c2ecf20Sopenharmony_ci{
228c2ecf20Sopenharmony_ci	struct drm_device *dev = etnaviv_obj->base.dev;
238c2ecf20Sopenharmony_ci	struct sg_table *sgt = etnaviv_obj->sgt;
248c2ecf20Sopenharmony_ci
258c2ecf20Sopenharmony_ci	/*
268c2ecf20Sopenharmony_ci	 * For non-cached buffers, ensure the new pages are clean
278c2ecf20Sopenharmony_ci	 * because display controller, GPU, etc. are not coherent.
288c2ecf20Sopenharmony_ci	 */
298c2ecf20Sopenharmony_ci	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
308c2ecf20Sopenharmony_ci		dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
318c2ecf20Sopenharmony_ci}
328c2ecf20Sopenharmony_ci
338c2ecf20Sopenharmony_cistatic void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
348c2ecf20Sopenharmony_ci{
358c2ecf20Sopenharmony_ci	struct drm_device *dev = etnaviv_obj->base.dev;
368c2ecf20Sopenharmony_ci	struct sg_table *sgt = etnaviv_obj->sgt;
378c2ecf20Sopenharmony_ci
388c2ecf20Sopenharmony_ci	/*
398c2ecf20Sopenharmony_ci	 * For non-cached buffers, ensure the new pages are clean
408c2ecf20Sopenharmony_ci	 * because display controller, GPU, etc. are not coherent:
418c2ecf20Sopenharmony_ci	 *
428c2ecf20Sopenharmony_ci	 * WARNING: The DMA API does not support concurrent CPU
438c2ecf20Sopenharmony_ci	 * and device access to the memory area.  With BIDIRECTIONAL,
448c2ecf20Sopenharmony_ci	 * we will clean the cache lines which overlap the region,
458c2ecf20Sopenharmony_ci	 * and invalidate all cache lines (partially) contained in
468c2ecf20Sopenharmony_ci	 * the region.
478c2ecf20Sopenharmony_ci	 *
488c2ecf20Sopenharmony_ci	 * If you have dirty data in the overlapping cache lines,
498c2ecf20Sopenharmony_ci	 * that will corrupt the GPU-written data.  If you have
508c2ecf20Sopenharmony_ci	 * written into the remainder of the region, this can
518c2ecf20Sopenharmony_ci	 * discard those writes.
528c2ecf20Sopenharmony_ci	 */
538c2ecf20Sopenharmony_ci	if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
548c2ecf20Sopenharmony_ci		dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
558c2ecf20Sopenharmony_ci}
568c2ecf20Sopenharmony_ci
578c2ecf20Sopenharmony_ci/* called with etnaviv_obj->lock held */
588c2ecf20Sopenharmony_cistatic int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
598c2ecf20Sopenharmony_ci{
608c2ecf20Sopenharmony_ci	struct drm_device *dev = etnaviv_obj->base.dev;
618c2ecf20Sopenharmony_ci	struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
628c2ecf20Sopenharmony_ci
638c2ecf20Sopenharmony_ci	if (IS_ERR(p)) {
648c2ecf20Sopenharmony_ci		dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
658c2ecf20Sopenharmony_ci		return PTR_ERR(p);
668c2ecf20Sopenharmony_ci	}
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci	etnaviv_obj->pages = p;
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_ci	return 0;
718c2ecf20Sopenharmony_ci}
728c2ecf20Sopenharmony_ci
738c2ecf20Sopenharmony_cistatic void put_pages(struct etnaviv_gem_object *etnaviv_obj)
748c2ecf20Sopenharmony_ci{
758c2ecf20Sopenharmony_ci	if (etnaviv_obj->sgt) {
768c2ecf20Sopenharmony_ci		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
778c2ecf20Sopenharmony_ci		sg_free_table(etnaviv_obj->sgt);
788c2ecf20Sopenharmony_ci		kfree(etnaviv_obj->sgt);
798c2ecf20Sopenharmony_ci		etnaviv_obj->sgt = NULL;
808c2ecf20Sopenharmony_ci	}
818c2ecf20Sopenharmony_ci	if (etnaviv_obj->pages) {
828c2ecf20Sopenharmony_ci		drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
838c2ecf20Sopenharmony_ci				  true, false);
848c2ecf20Sopenharmony_ci
858c2ecf20Sopenharmony_ci		etnaviv_obj->pages = NULL;
868c2ecf20Sopenharmony_ci	}
878c2ecf20Sopenharmony_ci}
888c2ecf20Sopenharmony_ci
898c2ecf20Sopenharmony_cistruct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
908c2ecf20Sopenharmony_ci{
918c2ecf20Sopenharmony_ci	int ret;
928c2ecf20Sopenharmony_ci
938c2ecf20Sopenharmony_ci	lockdep_assert_held(&etnaviv_obj->lock);
948c2ecf20Sopenharmony_ci
958c2ecf20Sopenharmony_ci	if (!etnaviv_obj->pages) {
968c2ecf20Sopenharmony_ci		ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
978c2ecf20Sopenharmony_ci		if (ret < 0)
988c2ecf20Sopenharmony_ci			return ERR_PTR(ret);
998c2ecf20Sopenharmony_ci	}
1008c2ecf20Sopenharmony_ci
1018c2ecf20Sopenharmony_ci	if (!etnaviv_obj->sgt) {
1028c2ecf20Sopenharmony_ci		struct drm_device *dev = etnaviv_obj->base.dev;
1038c2ecf20Sopenharmony_ci		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
1048c2ecf20Sopenharmony_ci		struct sg_table *sgt;
1058c2ecf20Sopenharmony_ci
1068c2ecf20Sopenharmony_ci		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
1078c2ecf20Sopenharmony_ci					    etnaviv_obj->pages, npages);
1088c2ecf20Sopenharmony_ci		if (IS_ERR(sgt)) {
1098c2ecf20Sopenharmony_ci			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
1108c2ecf20Sopenharmony_ci				PTR_ERR(sgt));
1118c2ecf20Sopenharmony_ci			return ERR_CAST(sgt);
1128c2ecf20Sopenharmony_ci		}
1138c2ecf20Sopenharmony_ci
1148c2ecf20Sopenharmony_ci		etnaviv_obj->sgt = sgt;
1158c2ecf20Sopenharmony_ci
1168c2ecf20Sopenharmony_ci		etnaviv_gem_scatter_map(etnaviv_obj);
1178c2ecf20Sopenharmony_ci	}
1188c2ecf20Sopenharmony_ci
1198c2ecf20Sopenharmony_ci	return etnaviv_obj->pages;
1208c2ecf20Sopenharmony_ci}
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_civoid etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
1238c2ecf20Sopenharmony_ci{
1248c2ecf20Sopenharmony_ci	lockdep_assert_held(&etnaviv_obj->lock);
1258c2ecf20Sopenharmony_ci	/* when we start tracking the pin count, then do something here */
1268c2ecf20Sopenharmony_ci}
1278c2ecf20Sopenharmony_ci
1288c2ecf20Sopenharmony_cistatic int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
1298c2ecf20Sopenharmony_ci		struct vm_area_struct *vma)
1308c2ecf20Sopenharmony_ci{
1318c2ecf20Sopenharmony_ci	pgprot_t vm_page_prot;
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_ci	vma->vm_flags &= ~VM_PFNMAP;
1348c2ecf20Sopenharmony_ci	vma->vm_flags |= VM_MIXEDMAP;
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci	vm_page_prot = vm_get_page_prot(vma->vm_flags);
1378c2ecf20Sopenharmony_ci
1388c2ecf20Sopenharmony_ci	if (etnaviv_obj->flags & ETNA_BO_WC) {
1398c2ecf20Sopenharmony_ci		vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
1408c2ecf20Sopenharmony_ci	} else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
1418c2ecf20Sopenharmony_ci		vma->vm_page_prot = pgprot_noncached(vm_page_prot);
1428c2ecf20Sopenharmony_ci	} else {
1438c2ecf20Sopenharmony_ci		/*
1448c2ecf20Sopenharmony_ci		 * Shunt off cached objs to shmem file so they have their own
1458c2ecf20Sopenharmony_ci		 * address_space (so unmap_mapping_range does what we want,
1468c2ecf20Sopenharmony_ci		 * in particular in the case of mmap'd dmabufs)
1478c2ecf20Sopenharmony_ci		 */
1488c2ecf20Sopenharmony_ci		fput(vma->vm_file);
1498c2ecf20Sopenharmony_ci		get_file(etnaviv_obj->base.filp);
1508c2ecf20Sopenharmony_ci		vma->vm_pgoff = 0;
1518c2ecf20Sopenharmony_ci		vma->vm_file  = etnaviv_obj->base.filp;
1528c2ecf20Sopenharmony_ci
1538c2ecf20Sopenharmony_ci		vma->vm_page_prot = vm_page_prot;
1548c2ecf20Sopenharmony_ci	}
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci	return 0;
1578c2ecf20Sopenharmony_ci}
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ciint etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1608c2ecf20Sopenharmony_ci{
1618c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *obj;
1628c2ecf20Sopenharmony_ci	int ret;
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	ret = drm_gem_mmap(filp, vma);
1658c2ecf20Sopenharmony_ci	if (ret) {
1668c2ecf20Sopenharmony_ci		DBG("mmap failed: %d", ret);
1678c2ecf20Sopenharmony_ci		return ret;
1688c2ecf20Sopenharmony_ci	}
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_ci	obj = to_etnaviv_bo(vma->vm_private_data);
1718c2ecf20Sopenharmony_ci	return obj->ops->mmap(obj, vma);
1728c2ecf20Sopenharmony_ci}
1738c2ecf20Sopenharmony_ci
1748c2ecf20Sopenharmony_civm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
1758c2ecf20Sopenharmony_ci{
1768c2ecf20Sopenharmony_ci	struct vm_area_struct *vma = vmf->vma;
1778c2ecf20Sopenharmony_ci	struct drm_gem_object *obj = vma->vm_private_data;
1788c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
1798c2ecf20Sopenharmony_ci	struct page **pages, *page;
1808c2ecf20Sopenharmony_ci	pgoff_t pgoff;
1818c2ecf20Sopenharmony_ci	int err;
1828c2ecf20Sopenharmony_ci
1838c2ecf20Sopenharmony_ci	/*
1848c2ecf20Sopenharmony_ci	 * Make sure we don't parallel update on a fault, nor move or remove
1858c2ecf20Sopenharmony_ci	 * something from beneath our feet.  Note that vmf_insert_page() is
1868c2ecf20Sopenharmony_ci	 * specifically coded to take care of this, so we don't have to.
1878c2ecf20Sopenharmony_ci	 */
1888c2ecf20Sopenharmony_ci	err = mutex_lock_interruptible(&etnaviv_obj->lock);
1898c2ecf20Sopenharmony_ci	if (err)
1908c2ecf20Sopenharmony_ci		return VM_FAULT_NOPAGE;
1918c2ecf20Sopenharmony_ci	/* make sure we have pages attached now */
1928c2ecf20Sopenharmony_ci	pages = etnaviv_gem_get_pages(etnaviv_obj);
1938c2ecf20Sopenharmony_ci	mutex_unlock(&etnaviv_obj->lock);
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci	if (IS_ERR(pages)) {
1968c2ecf20Sopenharmony_ci		err = PTR_ERR(pages);
1978c2ecf20Sopenharmony_ci		return vmf_error(err);
1988c2ecf20Sopenharmony_ci	}
1998c2ecf20Sopenharmony_ci
2008c2ecf20Sopenharmony_ci	/* We don't use vmf->pgoff since that has the fake offset: */
2018c2ecf20Sopenharmony_ci	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
2028c2ecf20Sopenharmony_ci
2038c2ecf20Sopenharmony_ci	page = pages[pgoff];
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
2068c2ecf20Sopenharmony_ci	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
2078c2ecf20Sopenharmony_ci
2088c2ecf20Sopenharmony_ci	return vmf_insert_page(vma, vmf->address, page);
2098c2ecf20Sopenharmony_ci}
2108c2ecf20Sopenharmony_ci
2118c2ecf20Sopenharmony_ciint etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
2128c2ecf20Sopenharmony_ci{
2138c2ecf20Sopenharmony_ci	int ret;
2148c2ecf20Sopenharmony_ci
2158c2ecf20Sopenharmony_ci	/* Make it mmapable */
2168c2ecf20Sopenharmony_ci	ret = drm_gem_create_mmap_offset(obj);
2178c2ecf20Sopenharmony_ci	if (ret)
2188c2ecf20Sopenharmony_ci		dev_err(obj->dev->dev, "could not allocate mmap offset\n");
2198c2ecf20Sopenharmony_ci	else
2208c2ecf20Sopenharmony_ci		*offset = drm_vma_node_offset_addr(&obj->vma_node);
2218c2ecf20Sopenharmony_ci
2228c2ecf20Sopenharmony_ci	return ret;
2238c2ecf20Sopenharmony_ci}
2248c2ecf20Sopenharmony_ci
2258c2ecf20Sopenharmony_cistatic struct etnaviv_vram_mapping *
2268c2ecf20Sopenharmony_cietnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
2278c2ecf20Sopenharmony_ci			     struct etnaviv_iommu_context *context)
2288c2ecf20Sopenharmony_ci{
2298c2ecf20Sopenharmony_ci	struct etnaviv_vram_mapping *mapping;
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci	list_for_each_entry(mapping, &obj->vram_list, obj_node) {
2328c2ecf20Sopenharmony_ci		if (mapping->context == context)
2338c2ecf20Sopenharmony_ci			return mapping;
2348c2ecf20Sopenharmony_ci	}
2358c2ecf20Sopenharmony_ci
2368c2ecf20Sopenharmony_ci	return NULL;
2378c2ecf20Sopenharmony_ci}
2388c2ecf20Sopenharmony_ci
2398c2ecf20Sopenharmony_civoid etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
2408c2ecf20Sopenharmony_ci{
2418c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	mutex_lock(&etnaviv_obj->lock);
2448c2ecf20Sopenharmony_ci	WARN_ON(mapping->use == 0);
2458c2ecf20Sopenharmony_ci	mapping->use -= 1;
2468c2ecf20Sopenharmony_ci	mutex_unlock(&etnaviv_obj->lock);
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci	drm_gem_object_put(&etnaviv_obj->base);
2498c2ecf20Sopenharmony_ci}
2508c2ecf20Sopenharmony_ci
2518c2ecf20Sopenharmony_cistruct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
2528c2ecf20Sopenharmony_ci	struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
2538c2ecf20Sopenharmony_ci	u64 va)
2548c2ecf20Sopenharmony_ci{
2558c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
2568c2ecf20Sopenharmony_ci	struct etnaviv_vram_mapping *mapping;
2578c2ecf20Sopenharmony_ci	struct page **pages;
2588c2ecf20Sopenharmony_ci	int ret = 0;
2598c2ecf20Sopenharmony_ci
2608c2ecf20Sopenharmony_ci	mutex_lock(&etnaviv_obj->lock);
2618c2ecf20Sopenharmony_ci	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
2628c2ecf20Sopenharmony_ci	if (mapping) {
2638c2ecf20Sopenharmony_ci		/*
2648c2ecf20Sopenharmony_ci		 * Holding the object lock prevents the use count changing
2658c2ecf20Sopenharmony_ci		 * beneath us.  If the use count is zero, the MMU might be
2668c2ecf20Sopenharmony_ci		 * reaping this object, so take the lock and re-check that
2678c2ecf20Sopenharmony_ci		 * the MMU owns this mapping to close this race.
2688c2ecf20Sopenharmony_ci		 */
2698c2ecf20Sopenharmony_ci		if (mapping->use == 0) {
2708c2ecf20Sopenharmony_ci			mutex_lock(&mmu_context->lock);
2718c2ecf20Sopenharmony_ci			if (mapping->context == mmu_context)
2728c2ecf20Sopenharmony_ci				mapping->use += 1;
2738c2ecf20Sopenharmony_ci			else
2748c2ecf20Sopenharmony_ci				mapping = NULL;
2758c2ecf20Sopenharmony_ci			mutex_unlock(&mmu_context->lock);
2768c2ecf20Sopenharmony_ci			if (mapping)
2778c2ecf20Sopenharmony_ci				goto out;
2788c2ecf20Sopenharmony_ci		} else {
2798c2ecf20Sopenharmony_ci			mapping->use += 1;
2808c2ecf20Sopenharmony_ci			goto out;
2818c2ecf20Sopenharmony_ci		}
2828c2ecf20Sopenharmony_ci	}
2838c2ecf20Sopenharmony_ci
2848c2ecf20Sopenharmony_ci	pages = etnaviv_gem_get_pages(etnaviv_obj);
2858c2ecf20Sopenharmony_ci	if (IS_ERR(pages)) {
2868c2ecf20Sopenharmony_ci		ret = PTR_ERR(pages);
2878c2ecf20Sopenharmony_ci		goto out;
2888c2ecf20Sopenharmony_ci	}
2898c2ecf20Sopenharmony_ci
2908c2ecf20Sopenharmony_ci	/*
2918c2ecf20Sopenharmony_ci	 * See if we have a reaped vram mapping we can re-use before
2928c2ecf20Sopenharmony_ci	 * allocating a fresh mapping.
2938c2ecf20Sopenharmony_ci	 */
2948c2ecf20Sopenharmony_ci	mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
2958c2ecf20Sopenharmony_ci	if (!mapping) {
2968c2ecf20Sopenharmony_ci		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
2978c2ecf20Sopenharmony_ci		if (!mapping) {
2988c2ecf20Sopenharmony_ci			ret = -ENOMEM;
2998c2ecf20Sopenharmony_ci			goto out;
3008c2ecf20Sopenharmony_ci		}
3018c2ecf20Sopenharmony_ci
3028c2ecf20Sopenharmony_ci		INIT_LIST_HEAD(&mapping->scan_node);
3038c2ecf20Sopenharmony_ci		mapping->object = etnaviv_obj;
3048c2ecf20Sopenharmony_ci	} else {
3058c2ecf20Sopenharmony_ci		list_del(&mapping->obj_node);
3068c2ecf20Sopenharmony_ci	}
3078c2ecf20Sopenharmony_ci
3088c2ecf20Sopenharmony_ci	mapping->context = etnaviv_iommu_context_get(mmu_context);
3098c2ecf20Sopenharmony_ci	mapping->use = 1;
3108c2ecf20Sopenharmony_ci
3118c2ecf20Sopenharmony_ci	ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
3128c2ecf20Sopenharmony_ci				    mmu_context->global->memory_base,
3138c2ecf20Sopenharmony_ci				    mapping, va);
3148c2ecf20Sopenharmony_ci	if (ret < 0) {
3158c2ecf20Sopenharmony_ci		etnaviv_iommu_context_put(mmu_context);
3168c2ecf20Sopenharmony_ci		kfree(mapping);
3178c2ecf20Sopenharmony_ci	} else {
3188c2ecf20Sopenharmony_ci		list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
3198c2ecf20Sopenharmony_ci	}
3208c2ecf20Sopenharmony_ci
3218c2ecf20Sopenharmony_ciout:
3228c2ecf20Sopenharmony_ci	mutex_unlock(&etnaviv_obj->lock);
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci	if (ret)
3258c2ecf20Sopenharmony_ci		return ERR_PTR(ret);
3268c2ecf20Sopenharmony_ci
3278c2ecf20Sopenharmony_ci	/* Take a reference on the object */
3288c2ecf20Sopenharmony_ci	drm_gem_object_get(obj);
3298c2ecf20Sopenharmony_ci	return mapping;
3308c2ecf20Sopenharmony_ci}
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_civoid *etnaviv_gem_vmap(struct drm_gem_object *obj)
3338c2ecf20Sopenharmony_ci{
3348c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
3358c2ecf20Sopenharmony_ci
3368c2ecf20Sopenharmony_ci	if (etnaviv_obj->vaddr)
3378c2ecf20Sopenharmony_ci		return etnaviv_obj->vaddr;
3388c2ecf20Sopenharmony_ci
3398c2ecf20Sopenharmony_ci	mutex_lock(&etnaviv_obj->lock);
3408c2ecf20Sopenharmony_ci	/*
3418c2ecf20Sopenharmony_ci	 * Need to check again, as we might have raced with another thread
3428c2ecf20Sopenharmony_ci	 * while waiting for the mutex.
3438c2ecf20Sopenharmony_ci	 */
3448c2ecf20Sopenharmony_ci	if (!etnaviv_obj->vaddr)
3458c2ecf20Sopenharmony_ci		etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
3468c2ecf20Sopenharmony_ci	mutex_unlock(&etnaviv_obj->lock);
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_ci	return etnaviv_obj->vaddr;
3498c2ecf20Sopenharmony_ci}
3508c2ecf20Sopenharmony_ci
3518c2ecf20Sopenharmony_cistatic void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
3528c2ecf20Sopenharmony_ci{
3538c2ecf20Sopenharmony_ci	struct page **pages;
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	lockdep_assert_held(&obj->lock);
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_ci	pages = etnaviv_gem_get_pages(obj);
3588c2ecf20Sopenharmony_ci	if (IS_ERR(pages))
3598c2ecf20Sopenharmony_ci		return NULL;
3608c2ecf20Sopenharmony_ci
3618c2ecf20Sopenharmony_ci	return vmap(pages, obj->base.size >> PAGE_SHIFT,
3628c2ecf20Sopenharmony_ci			VM_MAP, pgprot_writecombine(PAGE_KERNEL));
3638c2ecf20Sopenharmony_ci}
3648c2ecf20Sopenharmony_ci
3658c2ecf20Sopenharmony_cistatic inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
3668c2ecf20Sopenharmony_ci{
3678c2ecf20Sopenharmony_ci	if (op & ETNA_PREP_READ)
3688c2ecf20Sopenharmony_ci		return DMA_FROM_DEVICE;
3698c2ecf20Sopenharmony_ci	else if (op & ETNA_PREP_WRITE)
3708c2ecf20Sopenharmony_ci		return DMA_TO_DEVICE;
3718c2ecf20Sopenharmony_ci	else
3728c2ecf20Sopenharmony_ci		return DMA_BIDIRECTIONAL;
3738c2ecf20Sopenharmony_ci}
3748c2ecf20Sopenharmony_ci
3758c2ecf20Sopenharmony_ciint etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
3768c2ecf20Sopenharmony_ci		struct drm_etnaviv_timespec *timeout)
3778c2ecf20Sopenharmony_ci{
3788c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
3798c2ecf20Sopenharmony_ci	struct drm_device *dev = obj->dev;
3808c2ecf20Sopenharmony_ci	bool write = !!(op & ETNA_PREP_WRITE);
3818c2ecf20Sopenharmony_ci	int ret;
3828c2ecf20Sopenharmony_ci
3838c2ecf20Sopenharmony_ci	if (!etnaviv_obj->sgt) {
3848c2ecf20Sopenharmony_ci		void *ret;
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ci		mutex_lock(&etnaviv_obj->lock);
3878c2ecf20Sopenharmony_ci		ret = etnaviv_gem_get_pages(etnaviv_obj);
3888c2ecf20Sopenharmony_ci		mutex_unlock(&etnaviv_obj->lock);
3898c2ecf20Sopenharmony_ci		if (IS_ERR(ret))
3908c2ecf20Sopenharmony_ci			return PTR_ERR(ret);
3918c2ecf20Sopenharmony_ci	}
3928c2ecf20Sopenharmony_ci
3938c2ecf20Sopenharmony_ci	if (op & ETNA_PREP_NOSYNC) {
3948c2ecf20Sopenharmony_ci		if (!dma_resv_test_signaled_rcu(obj->resv,
3958c2ecf20Sopenharmony_ci							  write))
3968c2ecf20Sopenharmony_ci			return -EBUSY;
3978c2ecf20Sopenharmony_ci	} else {
3988c2ecf20Sopenharmony_ci		unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
3998c2ecf20Sopenharmony_ci
4008c2ecf20Sopenharmony_ci		ret = dma_resv_wait_timeout_rcu(obj->resv,
4018c2ecf20Sopenharmony_ci							  write, true, remain);
4028c2ecf20Sopenharmony_ci		if (ret <= 0)
4038c2ecf20Sopenharmony_ci			return ret == 0 ? -ETIMEDOUT : ret;
4048c2ecf20Sopenharmony_ci	}
4058c2ecf20Sopenharmony_ci
4068c2ecf20Sopenharmony_ci	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
4078c2ecf20Sopenharmony_ci		dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
4088c2ecf20Sopenharmony_ci					 etnaviv_op_to_dma_dir(op));
4098c2ecf20Sopenharmony_ci		etnaviv_obj->last_cpu_prep_op = op;
4108c2ecf20Sopenharmony_ci	}
4118c2ecf20Sopenharmony_ci
4128c2ecf20Sopenharmony_ci	return 0;
4138c2ecf20Sopenharmony_ci}
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ciint etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
4168c2ecf20Sopenharmony_ci{
4178c2ecf20Sopenharmony_ci	struct drm_device *dev = obj->dev;
4188c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
4198c2ecf20Sopenharmony_ci
4208c2ecf20Sopenharmony_ci	if (etnaviv_obj->flags & ETNA_BO_CACHED) {
4218c2ecf20Sopenharmony_ci		/* fini without a prep is almost certainly a userspace error */
4228c2ecf20Sopenharmony_ci		WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
4238c2ecf20Sopenharmony_ci		dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
4248c2ecf20Sopenharmony_ci			etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
4258c2ecf20Sopenharmony_ci		etnaviv_obj->last_cpu_prep_op = 0;
4268c2ecf20Sopenharmony_ci	}
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_ci	return 0;
4298c2ecf20Sopenharmony_ci}
4308c2ecf20Sopenharmony_ci
4318c2ecf20Sopenharmony_ciint etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
4328c2ecf20Sopenharmony_ci	struct drm_etnaviv_timespec *timeout)
4338c2ecf20Sopenharmony_ci{
4348c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
4358c2ecf20Sopenharmony_ci
4368c2ecf20Sopenharmony_ci	return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
4378c2ecf20Sopenharmony_ci}
4388c2ecf20Sopenharmony_ci
4398c2ecf20Sopenharmony_ci#ifdef CONFIG_DEBUG_FS
4408c2ecf20Sopenharmony_cistatic void etnaviv_gem_describe_fence(struct dma_fence *fence,
4418c2ecf20Sopenharmony_ci	const char *type, struct seq_file *m)
4428c2ecf20Sopenharmony_ci{
4438c2ecf20Sopenharmony_ci	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
4448c2ecf20Sopenharmony_ci		seq_printf(m, "\t%9s: %s %s seq %llu\n",
4458c2ecf20Sopenharmony_ci			   type,
4468c2ecf20Sopenharmony_ci			   fence->ops->get_driver_name(fence),
4478c2ecf20Sopenharmony_ci			   fence->ops->get_timeline_name(fence),
4488c2ecf20Sopenharmony_ci			   fence->seqno);
4498c2ecf20Sopenharmony_ci}
4508c2ecf20Sopenharmony_ci
4518c2ecf20Sopenharmony_cistatic void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
4528c2ecf20Sopenharmony_ci{
4538c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
4548c2ecf20Sopenharmony_ci	struct dma_resv *robj = obj->resv;
4558c2ecf20Sopenharmony_ci	struct dma_resv_list *fobj;
4568c2ecf20Sopenharmony_ci	struct dma_fence *fence;
4578c2ecf20Sopenharmony_ci	unsigned long off = drm_vma_node_start(&obj->vma_node);
4588c2ecf20Sopenharmony_ci
4598c2ecf20Sopenharmony_ci	seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
4608c2ecf20Sopenharmony_ci			etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
4618c2ecf20Sopenharmony_ci			obj->name, kref_read(&obj->refcount),
4628c2ecf20Sopenharmony_ci			off, etnaviv_obj->vaddr, obj->size);
4638c2ecf20Sopenharmony_ci
4648c2ecf20Sopenharmony_ci	rcu_read_lock();
4658c2ecf20Sopenharmony_ci	fobj = rcu_dereference(robj->fence);
4668c2ecf20Sopenharmony_ci	if (fobj) {
4678c2ecf20Sopenharmony_ci		unsigned int i, shared_count = fobj->shared_count;
4688c2ecf20Sopenharmony_ci
4698c2ecf20Sopenharmony_ci		for (i = 0; i < shared_count; i++) {
4708c2ecf20Sopenharmony_ci			fence = rcu_dereference(fobj->shared[i]);
4718c2ecf20Sopenharmony_ci			etnaviv_gem_describe_fence(fence, "Shared", m);
4728c2ecf20Sopenharmony_ci		}
4738c2ecf20Sopenharmony_ci	}
4748c2ecf20Sopenharmony_ci
4758c2ecf20Sopenharmony_ci	fence = rcu_dereference(robj->fence_excl);
4768c2ecf20Sopenharmony_ci	if (fence)
4778c2ecf20Sopenharmony_ci		etnaviv_gem_describe_fence(fence, "Exclusive", m);
4788c2ecf20Sopenharmony_ci	rcu_read_unlock();
4798c2ecf20Sopenharmony_ci}
4808c2ecf20Sopenharmony_ci
4818c2ecf20Sopenharmony_civoid etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
4828c2ecf20Sopenharmony_ci	struct seq_file *m)
4838c2ecf20Sopenharmony_ci{
4848c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj;
4858c2ecf20Sopenharmony_ci	int count = 0;
4868c2ecf20Sopenharmony_ci	size_t size = 0;
4878c2ecf20Sopenharmony_ci
4888c2ecf20Sopenharmony_ci	mutex_lock(&priv->gem_lock);
4898c2ecf20Sopenharmony_ci	list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
4908c2ecf20Sopenharmony_ci		struct drm_gem_object *obj = &etnaviv_obj->base;
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ci		seq_puts(m, "   ");
4938c2ecf20Sopenharmony_ci		etnaviv_gem_describe(obj, m);
4948c2ecf20Sopenharmony_ci		count++;
4958c2ecf20Sopenharmony_ci		size += obj->size;
4968c2ecf20Sopenharmony_ci	}
4978c2ecf20Sopenharmony_ci	mutex_unlock(&priv->gem_lock);
4988c2ecf20Sopenharmony_ci
4998c2ecf20Sopenharmony_ci	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
5008c2ecf20Sopenharmony_ci}
5018c2ecf20Sopenharmony_ci#endif
5028c2ecf20Sopenharmony_ci
5038c2ecf20Sopenharmony_cistatic void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
5048c2ecf20Sopenharmony_ci{
5058c2ecf20Sopenharmony_ci	vunmap(etnaviv_obj->vaddr);
5068c2ecf20Sopenharmony_ci	put_pages(etnaviv_obj);
5078c2ecf20Sopenharmony_ci}
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_cistatic const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
5108c2ecf20Sopenharmony_ci	.get_pages = etnaviv_gem_shmem_get_pages,
5118c2ecf20Sopenharmony_ci	.release = etnaviv_gem_shmem_release,
5128c2ecf20Sopenharmony_ci	.vmap = etnaviv_gem_vmap_impl,
5138c2ecf20Sopenharmony_ci	.mmap = etnaviv_gem_mmap_obj,
5148c2ecf20Sopenharmony_ci};
5158c2ecf20Sopenharmony_ci
5168c2ecf20Sopenharmony_civoid etnaviv_gem_free_object(struct drm_gem_object *obj)
5178c2ecf20Sopenharmony_ci{
5188c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
5198c2ecf20Sopenharmony_ci	struct etnaviv_drm_private *priv = obj->dev->dev_private;
5208c2ecf20Sopenharmony_ci	struct etnaviv_vram_mapping *mapping, *tmp;
5218c2ecf20Sopenharmony_ci
5228c2ecf20Sopenharmony_ci	/* object should not be active */
5238c2ecf20Sopenharmony_ci	WARN_ON(is_active(etnaviv_obj));
5248c2ecf20Sopenharmony_ci
5258c2ecf20Sopenharmony_ci	mutex_lock(&priv->gem_lock);
5268c2ecf20Sopenharmony_ci	list_del(&etnaviv_obj->gem_node);
5278c2ecf20Sopenharmony_ci	mutex_unlock(&priv->gem_lock);
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
5308c2ecf20Sopenharmony_ci				 obj_node) {
5318c2ecf20Sopenharmony_ci		struct etnaviv_iommu_context *context = mapping->context;
5328c2ecf20Sopenharmony_ci
5338c2ecf20Sopenharmony_ci		WARN_ON(mapping->use);
5348c2ecf20Sopenharmony_ci
5358c2ecf20Sopenharmony_ci		if (context) {
5368c2ecf20Sopenharmony_ci			etnaviv_iommu_unmap_gem(context, mapping);
5378c2ecf20Sopenharmony_ci			etnaviv_iommu_context_put(context);
5388c2ecf20Sopenharmony_ci		}
5398c2ecf20Sopenharmony_ci
5408c2ecf20Sopenharmony_ci		list_del(&mapping->obj_node);
5418c2ecf20Sopenharmony_ci		kfree(mapping);
5428c2ecf20Sopenharmony_ci	}
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci	drm_gem_free_mmap_offset(obj);
5458c2ecf20Sopenharmony_ci	etnaviv_obj->ops->release(etnaviv_obj);
5468c2ecf20Sopenharmony_ci	drm_gem_object_release(obj);
5478c2ecf20Sopenharmony_ci
5488c2ecf20Sopenharmony_ci	kfree(etnaviv_obj);
5498c2ecf20Sopenharmony_ci}
5508c2ecf20Sopenharmony_ci
5518c2ecf20Sopenharmony_civoid etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
5528c2ecf20Sopenharmony_ci{
5538c2ecf20Sopenharmony_ci	struct etnaviv_drm_private *priv = dev->dev_private;
5548c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
5558c2ecf20Sopenharmony_ci
5568c2ecf20Sopenharmony_ci	mutex_lock(&priv->gem_lock);
5578c2ecf20Sopenharmony_ci	list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
5588c2ecf20Sopenharmony_ci	mutex_unlock(&priv->gem_lock);
5598c2ecf20Sopenharmony_ci}
5608c2ecf20Sopenharmony_ci
5618c2ecf20Sopenharmony_cistatic int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
5628c2ecf20Sopenharmony_ci	const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
5638c2ecf20Sopenharmony_ci{
5648c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj;
5658c2ecf20Sopenharmony_ci	unsigned sz = sizeof(*etnaviv_obj);
5668c2ecf20Sopenharmony_ci	bool valid = true;
5678c2ecf20Sopenharmony_ci
5688c2ecf20Sopenharmony_ci	/* validate flags */
5698c2ecf20Sopenharmony_ci	switch (flags & ETNA_BO_CACHE_MASK) {
5708c2ecf20Sopenharmony_ci	case ETNA_BO_UNCACHED:
5718c2ecf20Sopenharmony_ci	case ETNA_BO_CACHED:
5728c2ecf20Sopenharmony_ci	case ETNA_BO_WC:
5738c2ecf20Sopenharmony_ci		break;
5748c2ecf20Sopenharmony_ci	default:
5758c2ecf20Sopenharmony_ci		valid = false;
5768c2ecf20Sopenharmony_ci	}
5778c2ecf20Sopenharmony_ci
5788c2ecf20Sopenharmony_ci	if (!valid) {
5798c2ecf20Sopenharmony_ci		dev_err(dev->dev, "invalid cache flag: %x\n",
5808c2ecf20Sopenharmony_ci			(flags & ETNA_BO_CACHE_MASK));
5818c2ecf20Sopenharmony_ci		return -EINVAL;
5828c2ecf20Sopenharmony_ci	}
5838c2ecf20Sopenharmony_ci
5848c2ecf20Sopenharmony_ci	etnaviv_obj = kzalloc(sz, GFP_KERNEL);
5858c2ecf20Sopenharmony_ci	if (!etnaviv_obj)
5868c2ecf20Sopenharmony_ci		return -ENOMEM;
5878c2ecf20Sopenharmony_ci
5888c2ecf20Sopenharmony_ci	etnaviv_obj->flags = flags;
5898c2ecf20Sopenharmony_ci	etnaviv_obj->ops = ops;
5908c2ecf20Sopenharmony_ci
5918c2ecf20Sopenharmony_ci	mutex_init(&etnaviv_obj->lock);
5928c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&etnaviv_obj->vram_list);
5938c2ecf20Sopenharmony_ci
5948c2ecf20Sopenharmony_ci	*obj = &etnaviv_obj->base;
5958c2ecf20Sopenharmony_ci
5968c2ecf20Sopenharmony_ci	return 0;
5978c2ecf20Sopenharmony_ci}
5988c2ecf20Sopenharmony_ci
5998c2ecf20Sopenharmony_ci/* convenience method to construct a GEM buffer object, and userspace handle */
6008c2ecf20Sopenharmony_ciint etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
6018c2ecf20Sopenharmony_ci	u32 size, u32 flags, u32 *handle)
6028c2ecf20Sopenharmony_ci{
6038c2ecf20Sopenharmony_ci	struct etnaviv_drm_private *priv = dev->dev_private;
6048c2ecf20Sopenharmony_ci	struct drm_gem_object *obj = NULL;
6058c2ecf20Sopenharmony_ci	int ret;
6068c2ecf20Sopenharmony_ci
6078c2ecf20Sopenharmony_ci	size = PAGE_ALIGN(size);
6088c2ecf20Sopenharmony_ci
6098c2ecf20Sopenharmony_ci	ret = etnaviv_gem_new_impl(dev, size, flags,
6108c2ecf20Sopenharmony_ci				   &etnaviv_gem_shmem_ops, &obj);
6118c2ecf20Sopenharmony_ci	if (ret)
6128c2ecf20Sopenharmony_ci		goto fail;
6138c2ecf20Sopenharmony_ci
6148c2ecf20Sopenharmony_ci	lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
6158c2ecf20Sopenharmony_ci
6168c2ecf20Sopenharmony_ci	ret = drm_gem_object_init(dev, obj, size);
6178c2ecf20Sopenharmony_ci	if (ret)
6188c2ecf20Sopenharmony_ci		goto fail;
6198c2ecf20Sopenharmony_ci
6208c2ecf20Sopenharmony_ci	/*
6218c2ecf20Sopenharmony_ci	 * Our buffers are kept pinned, so allocating them from the MOVABLE
6228c2ecf20Sopenharmony_ci	 * zone is a really bad idea, and conflicts with CMA. See comments
6238c2ecf20Sopenharmony_ci	 * above new_inode() why this is required _and_ expected if you're
6248c2ecf20Sopenharmony_ci	 * going to pin these pages.
6258c2ecf20Sopenharmony_ci	 */
6268c2ecf20Sopenharmony_ci	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
6278c2ecf20Sopenharmony_ci
6288c2ecf20Sopenharmony_ci	etnaviv_gem_obj_add(dev, obj);
6298c2ecf20Sopenharmony_ci
6308c2ecf20Sopenharmony_ci	ret = drm_gem_handle_create(file, obj, handle);
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	/* drop reference from allocate - handle holds it now */
6338c2ecf20Sopenharmony_cifail:
6348c2ecf20Sopenharmony_ci	drm_gem_object_put(obj);
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci	return ret;
6378c2ecf20Sopenharmony_ci}
6388c2ecf20Sopenharmony_ci
6398c2ecf20Sopenharmony_ciint etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
6408c2ecf20Sopenharmony_ci	const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
6418c2ecf20Sopenharmony_ci{
6428c2ecf20Sopenharmony_ci	struct drm_gem_object *obj;
6438c2ecf20Sopenharmony_ci	int ret;
6448c2ecf20Sopenharmony_ci
6458c2ecf20Sopenharmony_ci	ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
6468c2ecf20Sopenharmony_ci	if (ret)
6478c2ecf20Sopenharmony_ci		return ret;
6488c2ecf20Sopenharmony_ci
6498c2ecf20Sopenharmony_ci	drm_gem_private_object_init(dev, obj, size);
6508c2ecf20Sopenharmony_ci
6518c2ecf20Sopenharmony_ci	*res = to_etnaviv_bo(obj);
6528c2ecf20Sopenharmony_ci
6538c2ecf20Sopenharmony_ci	return 0;
6548c2ecf20Sopenharmony_ci}
6558c2ecf20Sopenharmony_ci
6568c2ecf20Sopenharmony_cistatic int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
6578c2ecf20Sopenharmony_ci{
6588c2ecf20Sopenharmony_ci	struct page **pvec = NULL;
6598c2ecf20Sopenharmony_ci	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
6608c2ecf20Sopenharmony_ci	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
6618c2ecf20Sopenharmony_ci
6628c2ecf20Sopenharmony_ci	might_lock_read(&current->mm->mmap_lock);
6638c2ecf20Sopenharmony_ci
6648c2ecf20Sopenharmony_ci	if (userptr->mm != current->mm)
6658c2ecf20Sopenharmony_ci		return -EPERM;
6668c2ecf20Sopenharmony_ci
6678c2ecf20Sopenharmony_ci	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
6688c2ecf20Sopenharmony_ci	if (!pvec)
6698c2ecf20Sopenharmony_ci		return -ENOMEM;
6708c2ecf20Sopenharmony_ci
6718c2ecf20Sopenharmony_ci	do {
6728c2ecf20Sopenharmony_ci		unsigned num_pages = npages - pinned;
6738c2ecf20Sopenharmony_ci		uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
6748c2ecf20Sopenharmony_ci		struct page **pages = pvec + pinned;
6758c2ecf20Sopenharmony_ci
6768c2ecf20Sopenharmony_ci		ret = pin_user_pages_fast(ptr, num_pages,
6778c2ecf20Sopenharmony_ci					  FOLL_WRITE | FOLL_FORCE, pages);
6788c2ecf20Sopenharmony_ci		if (ret < 0) {
6798c2ecf20Sopenharmony_ci			unpin_user_pages(pvec, pinned);
6808c2ecf20Sopenharmony_ci			kvfree(pvec);
6818c2ecf20Sopenharmony_ci			return ret;
6828c2ecf20Sopenharmony_ci		}
6838c2ecf20Sopenharmony_ci
6848c2ecf20Sopenharmony_ci		pinned += ret;
6858c2ecf20Sopenharmony_ci
6868c2ecf20Sopenharmony_ci	} while (pinned < npages);
6878c2ecf20Sopenharmony_ci
6888c2ecf20Sopenharmony_ci	etnaviv_obj->pages = pvec;
6898c2ecf20Sopenharmony_ci
6908c2ecf20Sopenharmony_ci	return 0;
6918c2ecf20Sopenharmony_ci}
6928c2ecf20Sopenharmony_ci
6938c2ecf20Sopenharmony_cistatic void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
6948c2ecf20Sopenharmony_ci{
6958c2ecf20Sopenharmony_ci	if (etnaviv_obj->sgt) {
6968c2ecf20Sopenharmony_ci		etnaviv_gem_scatterlist_unmap(etnaviv_obj);
6978c2ecf20Sopenharmony_ci		sg_free_table(etnaviv_obj->sgt);
6988c2ecf20Sopenharmony_ci		kfree(etnaviv_obj->sgt);
6998c2ecf20Sopenharmony_ci	}
7008c2ecf20Sopenharmony_ci	if (etnaviv_obj->pages) {
7018c2ecf20Sopenharmony_ci		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci		unpin_user_pages(etnaviv_obj->pages, npages);
7048c2ecf20Sopenharmony_ci		kvfree(etnaviv_obj->pages);
7058c2ecf20Sopenharmony_ci	}
7068c2ecf20Sopenharmony_ci}
7078c2ecf20Sopenharmony_ci
7088c2ecf20Sopenharmony_cistatic int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
7098c2ecf20Sopenharmony_ci		struct vm_area_struct *vma)
7108c2ecf20Sopenharmony_ci{
7118c2ecf20Sopenharmony_ci	return -EINVAL;
7128c2ecf20Sopenharmony_ci}
7138c2ecf20Sopenharmony_ci
7148c2ecf20Sopenharmony_cistatic const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
7158c2ecf20Sopenharmony_ci	.get_pages = etnaviv_gem_userptr_get_pages,
7168c2ecf20Sopenharmony_ci	.release = etnaviv_gem_userptr_release,
7178c2ecf20Sopenharmony_ci	.vmap = etnaviv_gem_vmap_impl,
7188c2ecf20Sopenharmony_ci	.mmap = etnaviv_gem_userptr_mmap_obj,
7198c2ecf20Sopenharmony_ci};
7208c2ecf20Sopenharmony_ci
7218c2ecf20Sopenharmony_ciint etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
7228c2ecf20Sopenharmony_ci	uintptr_t ptr, u32 size, u32 flags, u32 *handle)
7238c2ecf20Sopenharmony_ci{
7248c2ecf20Sopenharmony_ci	struct etnaviv_gem_object *etnaviv_obj;
7258c2ecf20Sopenharmony_ci	int ret;
7268c2ecf20Sopenharmony_ci
7278c2ecf20Sopenharmony_ci	ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
7288c2ecf20Sopenharmony_ci				      &etnaviv_gem_userptr_ops, &etnaviv_obj);
7298c2ecf20Sopenharmony_ci	if (ret)
7308c2ecf20Sopenharmony_ci		return ret;
7318c2ecf20Sopenharmony_ci
7328c2ecf20Sopenharmony_ci	lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
7338c2ecf20Sopenharmony_ci
7348c2ecf20Sopenharmony_ci	etnaviv_obj->userptr.ptr = ptr;
7358c2ecf20Sopenharmony_ci	etnaviv_obj->userptr.mm = current->mm;
7368c2ecf20Sopenharmony_ci	etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
7378c2ecf20Sopenharmony_ci
7388c2ecf20Sopenharmony_ci	etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
7398c2ecf20Sopenharmony_ci
7408c2ecf20Sopenharmony_ci	ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
7418c2ecf20Sopenharmony_ci
7428c2ecf20Sopenharmony_ci	/* drop reference from allocate - handle holds it now */
7438c2ecf20Sopenharmony_ci	drm_gem_object_put(&etnaviv_obj->base);
7448c2ecf20Sopenharmony_ci	return ret;
7458c2ecf20Sopenharmony_ci}
746