Skip to content
Snippets Groups Projects
Select Git revision
  • f19754a12d441c34fc440a207d1d43c0e9ef2ee9
  • virtio-gpu-shrinker-v20-part3
  • virtio-gpu-shrinker-v20-part2
  • rk3588-spi-atomic-wip
  • virtio-gpu-shrinker-v20-part1
  • hdmirx-v14
  • hdmirx-v13
  • hdmirx-v12
  • hdmirx-v11
  • hdmirx-v10
  • hdmirx-v9
  • hdmirx-v8
  • hdmirx-v7
  • 3568-its
  • hdmirx-v6.1
  • rk3588-hdmirx-v6
  • hdmirx-cleanup-v2
  • hdmirx-cleanup
  • virtio-gpu-shrinker-v19
  • virtio-gpu-shrinker-v18
  • virtio-gpu-shrinker-v17
21 results

drm_gem_shmem_helper.c

Blame
  • drm_gem_shmem_helper.c 28.37 KiB
    // SPDX-License-Identifier: GPL-2.0
    /*
     * Copyright 2018 Noralf Trønnes
     */
    
    #include <linux/dma-buf.h>
    #include <linux/export.h>
    #include <linux/module.h>
    #include <linux/mutex.h>
    #include <linux/shmem_fs.h>
    #include <linux/slab.h>
    #include <linux/vmalloc.h>
    #include <linux/module.h>
    
    #ifdef CONFIG_X86
    #include <asm/set_memory.h>
    #endif
    
    #include <drm/drm.h>
    #include <drm/drm_device.h>
    #include <drm/drm_drv.h>
    #include <drm/drm_gem_shmem_helper.h>
    #include <drm/drm_managed.h>
    #include <drm/drm_prime.h>
    #include <drm/drm_print.h>
    
    MODULE_IMPORT_NS(DMA_BUF);
    
    /**
     * DOC: overview
     *
     * This library provides helpers for GEM objects backed by shmem buffers
     * allocated using anonymous pageable memory.
     *
     * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
     * For GEM callback helpers in struct &drm_gem_object functions, see likewise
     * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
     * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
     */
    
    static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
    	.free = drm_gem_shmem_object_free,
    	.print_info = drm_gem_shmem_object_print_info,
    	.pin = drm_gem_shmem_object_pin,
    	.unpin = drm_gem_shmem_object_unpin,
    	.get_sg_table = drm_gem_shmem_object_get_sg_table,
    	.vmap = drm_gem_shmem_object_vmap,
    	.vunmap = drm_gem_shmem_object_vunmap,
    	.mmap = drm_gem_shmem_object_mmap,
    	.vm_ops = &drm_gem_shmem_vm_ops,
    };
    
    static struct drm_gem_shmem_object *
    __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
    {
    	struct drm_gem_shmem_object *shmem;
    	struct drm_gem_object *obj;
    	int ret = 0;
    
    	size = PAGE_ALIGN(size);
    
    	if (dev->driver->gem_create_object) {
    		obj = dev->driver->gem_create_object(dev, size);
    		if (IS_ERR(obj))
    			return ERR_CAST(obj);
    		shmem = to_drm_gem_shmem_obj(obj);
    	} else {
    		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
    		if (!shmem)
    			return ERR_PTR(-ENOMEM);
    		obj = &shmem->base;
    	}
    
    	if (!obj->funcs)
    		obj->funcs = &drm_gem_shmem_funcs;
    
    	if (private) {
    		drm_gem_private_object_init(dev, obj, size);
    		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
    	} else {
    		ret = drm_gem_object_init(dev, obj, size);
    	}
    	if (ret)
    		goto err_free;
    
    	ret = drm_gem_create_mmap_offset(obj);
    	if (ret)
    		goto err_release;
    
    	if (!private) {
    		/*
    		 * Our buffers are kept pinned, so allocating them
    		 * from the MOVABLE zone is a really bad idea, and
    		 * conflicts with CMA. See comments above new_inode()
    		 * why this is required _and_ expected if you're
    		 * going to pin these pages.
    		 */
    		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
    				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
    	}
    
    	return shmem;
    
    err_release:
    	drm_gem_object_release(obj);
    err_free:
    	kfree(obj);
    
    	return ERR_PTR(ret);
    }
    /**
     * drm_gem_shmem_create - Allocate an object with the given size
     * @dev: DRM device
     * @size: Size of the object to allocate
     *
     * This function creates a shmem GEM object.
     *
     * Returns:
     * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
     * error code on failure.
     */
    struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
    {
    	return __drm_gem_shmem_create(dev, size, false);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
    
    static void drm_gem_shmem_resv_assert_held(struct drm_gem_shmem_object *shmem)
    {
    	/*
    	 * Destroying the object is a special case.. drm_gem_shmem_free()
    	 * calls many things that WARN_ON if the obj lock is not held.  But
    	 * acquiring the obj lock in drm_gem_shmem_free() can cause a locking
    	 * order inversion between reservation_ww_class_mutex and fs_reclaim.
    	 *
    	 * This deadlock is not actually possible, because no one should
    	 * be already holding the lock when msm_gem_free_object() is called.
    	 * Unfortunately lockdep is not aware of this detail.  So when the
    	 * refcount drops to zero, we pretend it is already locked.
    	 */
    	if (kref_read(&shmem->base.refcount))
    		dma_resv_assert_held(shmem->base.resv);
    }
    
    static bool drm_gem_shmem_is_evictable(struct drm_gem_shmem_object *shmem)
    {
    	dma_resv_assert_held(shmem->base.resv);
    
    	return (shmem->madv >= 0) && shmem->base.funcs->evict &&
    		shmem->pages_use_count && !shmem->pages_pin_count &&
    		!shmem->base.dma_buf && !shmem->base.import_attach &&
    		shmem->sgt && !shmem->evicted;
    }
    
    static void
    drm_gem_shmem_update_pages_state(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	struct drm_gem_shmem *gem_shmem = obj->dev->shmem_mm;
    	struct drm_gem_shmem_shrinker *gem_shrinker = &gem_shmem->shrinker;
    
    	drm_gem_shmem_resv_assert_held(shmem);
    
    	if (!gem_shrinker || obj->import_attach)
    		return;
    
    	if (shmem->madv < 0)
    		drm_gem_lru_remove(&shmem->base);
    	else if (drm_gem_shmem_is_evictable(shmem) || drm_gem_shmem_is_purgeable(shmem))
    		drm_gem_lru_move_tail(&gem_shrinker->lru_evictable, &shmem->base);
    	else if (shmem->evicted)
    		drm_gem_lru_move_tail(&gem_shrinker->lru_evicted, &shmem->base);
    	else if (!shmem->pages)
    		drm_gem_lru_remove(&shmem->base);
    	else
    		drm_gem_lru_move_tail(&gem_shrinker->lru_pinned, &shmem->base);
    }
    
    /**
     * drm_gem_shmem_free - Free resources associated with a shmem GEM object
     * @shmem: shmem GEM object to free
     *
     * This function cleans up the GEM object state and frees the memory used to
     * store the object itself.
     */
    void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	if (obj->import_attach) {
    		drm_prime_gem_destroy(obj, shmem->sgt);
    	} else {
    		/* take out shmem GEM object from the memory shrinker */
    		drm_gem_shmem_madvise(shmem, -1);
    
    		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
    
    		if (shmem->sgt) {
    			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
    					  DMA_BIDIRECTIONAL, 0);
    			sg_free_table(shmem->sgt);
    			kfree(shmem->sgt);
    		}
    		if (shmem->pages_use_count)
    			drm_gem_shmem_put_pages(shmem);
    
    		drm_WARN_ON(obj->dev, shmem->pages_use_count);
    	}
    
    	drm_gem_object_release(obj);
    	kfree(shmem);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
    
    static int
    drm_gem_shmem_acquire_pages(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	struct page **pages;
    
    	dma_resv_assert_held(shmem->base.resv);
    
    	if (shmem->madv < 0) {
    		drm_WARN_ON(obj->dev, shmem->pages);
    		return -ENOMEM;
    	}
    
    	if (shmem->pages) {
    		drm_WARN_ON(obj->dev, !shmem->evicted);
    		return 0;
    	}
    
    	if (drm_WARN_ON(obj->dev, !shmem->pages_use_count))
    		return -EINVAL;
    
    	pages = drm_gem_get_pages(obj);
    	if (IS_ERR(pages)) {
    		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
    			    PTR_ERR(pages));
    		return PTR_ERR(pages);
    	}
    
    	/*
    	 * TODO: Allocating WC pages which are correctly flushed is only
    	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
    	 * ttm_pool.c could use.
    	 */
    #ifdef CONFIG_X86
    	if (shmem->map_wc)
    		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
    #endif
    
    	shmem->pages = pages;
    
    	return 0;
    }
    
    static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
    {
    	int err;
    
    	dma_resv_assert_held(shmem->base.resv);
    
    	if (shmem->madv < 0)
    		return -ENOMEM;
    
    	if (shmem->pages_use_count++ > 0) {
    		err = drm_gem_shmem_swap_in(shmem);
    		if (err)
    			goto err_zero_use;
    
    		return 0;
    	}
    
    	err = drm_gem_shmem_acquire_pages(shmem);
    	if (err)
    		goto err_zero_use;
    
    	drm_gem_shmem_update_pages_state(shmem);
    
    	return 0;
    
    err_zero_use:
    	shmem->pages_use_count = 0;
    
    	return err;
    }
    
    static void
    drm_gem_shmem_release_pages(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	if (!shmem->pages) {
    		drm_WARN_ON(obj->dev, 
    			    !shmem->evicted && shmem->madv >= 0);
    		return;
    	}
    
    #ifdef CONFIG_X86
    	if (shmem->map_wc)
    		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
    #endif
    
    	drm_gem_put_pages(obj, shmem->pages,
    			  shmem->pages_mark_dirty_on_put,
    			  shmem->pages_mark_accessed_on_put);
    	shmem->pages = NULL;
    }
    
    /*
     * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
     * @shmem: shmem GEM object
     *
     * This function decreases the use count and puts the backing pages when use drops to zero.
     */
    void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	drm_gem_shmem_resv_assert_held(shmem);
    
    	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
    		return;
    
    	if (--shmem->pages_use_count > 0)
    		return;
    
    	drm_gem_shmem_release_pages(shmem);
    
    	drm_gem_shmem_update_pages_state(shmem);
    }
    EXPORT_SYMBOL(drm_gem_shmem_put_pages);
    
    /**
     * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
     * @shmem: shmem GEM object
     *
     * This function makes sure the backing pages are pinned in memory while the
     * buffer is exported.
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	int ret;
    
    	dma_resv_assert_held(shmem->base.resv);
    
    	drm_WARN_ON(obj->dev, obj->import_attach);
    
    	ret = drm_gem_shmem_get_pages(shmem);
    	if (!ret)
    		shmem->pages_pin_count++;
    
    	return ret;
    }
    EXPORT_SYMBOL(drm_gem_shmem_pin);
    
    /**
     * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
     * @shmem: shmem GEM object
     *
     * This function removes the requirement that the backing pages are pinned in
     * memory.
     */
    void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	dma_resv_assert_held(shmem->base.resv);
    
    	drm_WARN_ON(obj->dev, obj->import_attach);
    
    	drm_gem_shmem_put_pages(shmem);
    
    	shmem->pages_pin_count--;
    }
    EXPORT_SYMBOL(drm_gem_shmem_unpin);
    
    /*
     * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
     * @shmem: shmem GEM object
     * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
     *       store.
     *
     * This function makes sure that a contiguous kernel virtual address mapping
     * exists for the buffer backing the shmem GEM object. It hides the differences
     * between dma-buf imported and natively allocated objects.
     *
     * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
    		       struct iosys_map *map)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	int ret = 0;
    
    	if (obj->import_attach) {
    		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
    		if (!ret) {
    			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
    				dma_buf_vunmap(obj->import_attach->dmabuf, map);
    				return -EIO;
    			}
    		}
    	} else {
    		pgprot_t prot = PAGE_KERNEL;
    
    		dma_resv_assert_held(shmem->base.resv);
    
    		if (shmem->vmap_use_count++ > 0) {
    			iosys_map_set_vaddr(map, shmem->vaddr);
    			return 0;
    		}
    
    		ret = drm_gem_shmem_pin(shmem);
    		if (ret)
    			goto err_zero_use;
    
    		if (shmem->map_wc)
    			prot = pgprot_writecombine(prot);
    		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
    				    VM_MAP, prot);
    		if (!shmem->vaddr)
    			ret = -ENOMEM;
    		else
    			iosys_map_set_vaddr(map, shmem->vaddr);
    	}
    
    	if (ret) {
    		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
    		goto err_put_pages;
    	}
    
    	return 0;
    
    err_put_pages:
    	if (!obj->import_attach)
    		drm_gem_shmem_unpin(shmem);
    err_zero_use:
    	shmem->vmap_use_count = 0;
    
    	return ret;
    }
    EXPORT_SYMBOL(drm_gem_shmem_vmap);
    
    /*
     * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
     * @shmem: shmem GEM object
     * @map: Kernel virtual address where the SHMEM GEM object was mapped
     *
     * This function cleans up a kernel virtual address mapping acquired by
     * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
     * zero.
     *
     * This function hides the differences between dma-buf imported and natively
     * allocated objects.
     */
    void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
    			  struct iosys_map *map)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	if (obj->import_attach) {
    		dma_buf_vunmap(obj->import_attach->dmabuf, map);
    	} else {
    		dma_resv_assert_held(shmem->base.resv);
    
    		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
    			return;
    
    		if (--shmem->vmap_use_count > 0)
    			return;
    
    		vunmap(shmem->vaddr);
    		drm_gem_shmem_unpin(shmem);
    	}
    
    	shmem->vaddr = NULL;
    }
    EXPORT_SYMBOL(drm_gem_shmem_vunmap);
    
    static struct drm_gem_shmem_object *
    drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
    				 struct drm_device *dev, size_t size,
    				 uint32_t *handle)
    {
    	struct drm_gem_shmem_object *shmem;
    	int ret;
    
    	shmem = drm_gem_shmem_create(dev, size);
    	if (IS_ERR(shmem))
    		return shmem;
    
    	/*
    	 * Allocate an id of idr table where the obj is registered
    	 * and handle has the id what user can see.
    	 */
    	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
    	/* drop reference from allocate - handle holds it now. */
    	drm_gem_object_put(&shmem->base);
    	if (ret)
    		return ERR_PTR(ret);
    
    	return shmem;
    }
    
    /* Update madvise status, returns true if not purged, else
     * false or -errno.
     */
    int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
    {
    	drm_gem_shmem_resv_assert_held(shmem);
    
    	if (shmem->madv >= 0)
    		shmem->madv = madv;
    
    	madv = shmem->madv;
    
    	drm_gem_shmem_update_pages_state(shmem);
    
    	return (madv >= 0);
    }
    EXPORT_SYMBOL(drm_gem_shmem_madvise);
    
    /**
     * drm_gem_shmem_swap_in() - Moves shmem GEM back to memory and enables
     *                           hardware access to the memory.
     * @shmem: shmem GEM object
     *
     * This function moves shmem GEM back to memory if it was previously evicted
     * by the memory shrinker. The GEM is ready to use on success.
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drm_gem_shmem_swap_in(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	struct sg_table *sgt;
    	int err;
    
    	dma_resv_assert_held(shmem->base.resv);
    
    	if (shmem->evicted) {
    		err = drm_gem_shmem_acquire_pages(shmem);
    		if (err)
    			return err;
    
    		sgt = drm_gem_shmem_get_sg_table(shmem);
    		if (IS_ERR(sgt))
    			return PTR_ERR(sgt);
    
    		err = dma_map_sgtable(obj->dev->dev, sgt,
    				      DMA_BIDIRECTIONAL, 0);
    		if (err) {
    			sg_free_table(sgt);
    			kfree(sgt);
    			return err;
    		}
    
    		shmem->sgt = sgt;
    		shmem->evicted = false;
    
    		drm_gem_shmem_update_pages_state(shmem);
    	}
    
    	if (!shmem->pages)
    		return -ENOMEM;
    
    	return 0;
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_swap_in);
    
    static void drm_gem_shmem_unpin_pages(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	struct drm_device *dev = obj->dev;
    
    	if (shmem->evicted)
    		return;
    
    	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
    	drm_gem_shmem_release_pages(shmem);
    	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
    
    	sg_free_table(shmem->sgt);
    	kfree(shmem->sgt);
    	shmem->sgt = NULL;
    }
    
    /**
     * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
     * @file: DRM file structure to create the dumb buffer for
     * @dev: DRM device
     * @args: IOCTL data
     *
     * This function computes the pitch of the dumb buffer and rounds it up to an
     * integer number of bytes per pixel. Drivers for hardware that doesn't have
     * any additional restrictions on the pitch can directly use this function as
     * their &drm_driver.dumb_create callback.
     *
     * For hardware with additional restrictions, drivers can adjust the fields
     * set up by userspace before calling into this function.
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
    			      struct drm_mode_create_dumb *args)
    {
    	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    	struct drm_gem_shmem_object *shmem;
    
    	if (!args->pitch || !args->size) {
    		args->pitch = min_pitch;
    		args->size = PAGE_ALIGN(args->pitch * args->height);
    	} else {
    		/* ensure sane minimum values */
    		if (args->pitch < min_pitch)
    			args->pitch = min_pitch;
    		if (args->size < args->pitch * args->height)
    			args->size = PAGE_ALIGN(args->pitch * args->height);
    	}
    
    	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
    
    	return PTR_ERR_OR_ZERO(shmem);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
    
    static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
    {
    	struct vm_area_struct *vma = vmf->vma;
    	struct drm_gem_object *obj = vma->vm_private_data;
    	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
    	loff_t num_pages = obj->size >> PAGE_SHIFT;
    	vm_fault_t ret;
    	struct page *page;
    	pgoff_t page_offset;
    	bool pages_unpinned;
    	int err;
    
    	/* We don't use vmf->pgoff since that has the fake offset */
    	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
    
    	dma_resv_lock(shmem->base.resv, NULL);
    
    	/* Sanity-check that we have the pages pointer when it should present */
    	pages_unpinned = (shmem->evicted || shmem->madv < 0 || !shmem->pages_use_count);
    	drm_WARN_ON_ONCE(obj->dev, !shmem->pages ^ pages_unpinned);
    
    	if (page_offset >= num_pages || (!shmem->pages && !shmem->evicted)) {
    		ret = VM_FAULT_SIGBUS;
    	} else {
    		err = drm_gem_shmem_swap_in(shmem);
    		if (err) {
    			ret = VM_FAULT_OOM;
    			goto unlock;
    		}
    
    		page = shmem->pages[page_offset];
    
    		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
    	}
    
    unlock:
    	dma_resv_unlock(shmem->base.resv);
    
    	return ret;
    }
    
    static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
    {
    	struct drm_gem_object *obj = vma->vm_private_data;
    	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
    
    	drm_WARN_ON(obj->dev, obj->import_attach);
    
    	dma_resv_lock(shmem->base.resv, NULL);
    
    	if (drm_gem_shmem_get_pages(shmem))
    		shmem->pages_use_count++;
    
    	drm_gem_shmem_update_pages_state(shmem);
    	dma_resv_unlock(shmem->base.resv);
    
    	drm_gem_vm_open(vma);
    }
    
    static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
    {
    	struct drm_gem_object *obj = vma->vm_private_data;
    	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
    
    	dma_resv_lock(shmem->base.resv, NULL);
    	drm_gem_shmem_put_pages(shmem);
    	dma_resv_unlock(shmem->base.resv);
    
    	drm_gem_vm_close(vma);
    }
    
    const struct vm_operations_struct drm_gem_shmem_vm_ops = {
    	.fault = drm_gem_shmem_fault,
    	.open = drm_gem_shmem_vm_open,
    	.close = drm_gem_shmem_vm_close,
    };
    EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
    
    /**
     * drm_gem_shmem_mmap - Memory-map a shmem GEM object
     * @shmem: shmem GEM object
     * @vma: VMA for the area to be mapped
     *
     * This function implements an augmented version of the GEM DRM file mmap
     * operation for shmem objects.
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	int ret;
    
    	if (obj->import_attach) {
    		/* Drop the reference drm_gem_mmap_obj() acquired.*/
    		drm_gem_object_put(obj);
    		vma->vm_private_data = NULL;
    
    		return dma_buf_mmap(obj->dma_buf, vma, 0);
    	}
    
    	dma_resv_lock(shmem->base.resv, NULL);
    	ret = drm_gem_shmem_get_pages(shmem);
    	dma_resv_unlock(shmem->base.resv);
    
    	if (ret) {
    		drm_gem_vm_close(vma);
    		return ret;
    	}
    
    	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
    	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
    	if (shmem->map_wc)
    		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
    
    	return 0;
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
    
    /**
     * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
     * @shmem: shmem GEM object
     * @p: DRM printer
     * @indent: Tab indentation level
     */
    void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
    			      struct drm_printer *p, unsigned int indent)
    {
    	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
    
    	if (shmem->base.import_attach)
    		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
    				  shmem->base.dma_buf->vmapping_counter);
    	else
    		drm_printf_indent(p, indent, "vmap_use_count=%u\n",
    				  shmem->vmap_use_count);
    
    	drm_printf_indent(p, indent, "evicted=%d\n", shmem->evicted);
    	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
    	drm_printf_indent(p, indent, "madv=%d\n", shmem->madv);
    }
    EXPORT_SYMBOL(drm_gem_shmem_print_info);
    
    /**
     * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
     *                              pages for a shmem GEM object
     * @shmem: shmem GEM object
     *
     * This function exports a scatter/gather table suitable for PRIME usage by
     * calling the standard DMA mapping API.
     *
     * Drivers who need to acquire an scatter/gather table for objects need to call
     * drm_gem_shmem_get_pages_sgt() instead.
     *
     * Returns:
     * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
     */
    struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	drm_WARN_ON(obj->dev, obj->import_attach);
    
    	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
    
    /**
     * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
     *				 scatter/gather table for a shmem GEM object.
     * @shmem: shmem GEM object
     *
     * This function returns a scatter/gather table suitable for driver usage. If
     * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
     * table created.
     *
     * This is the main function for drivers to get at backing storage, and it hides
     * and difference between dma-buf imported and natively allocated objects.
     * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
     *
     * Returns:
     * A pointer to the scatter/gather table of pinned pages or errno on failure.
     */
    struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    	int ret;
    	struct sg_table *sgt;
    
    	if (shmem->sgt)
    		return shmem->sgt;
    
    	drm_WARN_ON(obj->dev, obj->import_attach);
    
    	dma_resv_lock(shmem->base.resv, NULL);
    
    	ret = drm_gem_shmem_get_pages(shmem);
    	if (ret)
    		goto err_unlock;
    
    	sgt = drm_gem_shmem_get_sg_table(shmem);
    	if (IS_ERR(sgt)) {
    		ret = PTR_ERR(sgt);
    		goto err_put_pages;
    	}
    	/* Map the pages for use by the h/w. */
    	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
    	if (ret)
    		goto err_free_sgt;
    
    	shmem->sgt = sgt;
    
    	drm_gem_shmem_update_pages_state(shmem);
    
    	dma_resv_unlock(shmem->base.resv);
    
    	return sgt;
    
    err_free_sgt:
    	sg_free_table(sgt);
    	kfree(sgt);
    err_put_pages:
    	drm_gem_shmem_put_pages(shmem);
    err_unlock:
    	dma_resv_unlock(shmem->base.resv);
    	return ERR_PTR(ret);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
    
    /**
     * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
     *                 another driver's scatter/gather table of pinned pages
     * @dev: Device to import into
     * @attach: DMA-BUF attachment
     * @sgt: Scatter/gather table of pinned pages
     *
     * This function imports a scatter/gather table exported via DMA-BUF by
     * another driver. Drivers that use the shmem helpers should set this as their
     * &drm_driver.gem_prime_import_sg_table callback.
     *
     * Returns:
     * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
     * error code on failure.
     */
    struct drm_gem_object *
    drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
    				    struct dma_buf_attachment *attach,
    				    struct sg_table *sgt)
    {
    	size_t size = PAGE_ALIGN(attach->dmabuf->size);
    	struct drm_gem_shmem_object *shmem;
    
    	shmem = __drm_gem_shmem_create(dev, size, true);
    	if (IS_ERR(shmem))
    		return ERR_CAST(shmem);
    
    	shmem->sgt = sgt;
    
    	DRM_DEBUG_PRIME("size = %zu\n", size);
    
    	return &shmem->base;
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
    
    static struct drm_gem_shmem_shrinker *
    to_drm_shrinker(struct shrinker *shrinker)
    {
    	return container_of(shrinker, struct drm_gem_shmem_shrinker, base);
    }
    
    static unsigned long
    drm_gem_shmem_shrinker_count_objects(struct shrinker *shrinker,
    				     struct shrink_control *sc)
    {
    	struct drm_gem_shmem_shrinker *gem_shrinker = to_drm_shrinker(shrinker);
    	unsigned long count = gem_shrinker->lru_evictable.count;
    
    	if (count >= SHRINK_EMPTY)
    		return SHRINK_EMPTY - 1;
    
    	return count ?: SHRINK_EMPTY;
    }
    
    void drm_gem_shmem_evict(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_evictable(shmem));
    	drm_WARN_ON(obj->dev, shmem->evicted);
    
    	drm_gem_shmem_unpin_pages(shmem);
    
    	shmem->evicted = true;
    	drm_gem_shmem_update_pages_state(shmem);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_evict);
    
    void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
    {
    	struct drm_gem_object *obj = &shmem->base;
    
    	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
    
    	drm_gem_shmem_unpin_pages(shmem);
    	drm_gem_free_mmap_offset(obj);
    
    	/* Our goal here is to return as much of the memory as
    	 * is possible back to the system as we are called from OOM.
    	 * To do this we must instruct the shmfs to drop all of its
    	 * backing pages, *now*.
    	 */
    	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
    
    	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
    
    	shmem->madv = -1;
    	shmem->evicted = false;
    	drm_gem_shmem_update_pages_state(shmem);
    }
    EXPORT_SYMBOL_GPL(drm_gem_shmem_purge);
    
    static bool drm_gem_is_busy(struct drm_gem_object *obj)
    {
    	return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
    }
    
    static bool drm_gem_shmem_shrinker_evict(struct drm_gem_object *obj)
    {
    	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
    
    	if (!drm_gem_shmem_is_evictable(shmem) ||
    	    get_nr_swap_pages() < obj->size >> PAGE_SHIFT ||
    	    drm_gem_is_busy(obj))
    		return false;
    
    	return drm_gem_object_evict(obj);
    }
    
    static bool drm_gem_shmem_shrinker_purge(struct drm_gem_object *obj)
    {
    	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
    
    	if (!drm_gem_shmem_is_purgeable(shmem) ||
    	    drm_gem_is_busy(obj))
    		return false;
    
    	return drm_gem_object_evict(obj);
    }
    
    static unsigned long
    drm_gem_shmem_shrinker_scan_objects(struct shrinker *shrinker,
    				    struct shrink_control *sc)
    {
    	struct drm_gem_shmem_shrinker *gem_shrinker = to_drm_shrinker(shrinker);
    	unsigned long nr_to_scan = sc->nr_to_scan;
    	unsigned long remaining = 0;
    	unsigned long freed = 0;
    
    	/* purge as many objects as we can */
    	freed += drm_gem_lru_scan(&gem_shrinker->lru_evictable,
    				  nr_to_scan, &remaining,
    				  drm_gem_shmem_shrinker_purge);
    
    	/* evict as many objects as we can */
    	if (freed < nr_to_scan)
    		freed += drm_gem_lru_scan(&gem_shrinker->lru_evictable,
    					  nr_to_scan - freed, &remaining,
    					  drm_gem_shmem_shrinker_evict);
    
    	return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
    }
    
    static int drm_gem_shmem_shrinker_init(struct drm_gem_shmem *shmem_mm,
    				       const char *shrinker_name)
    {
    	struct drm_gem_shmem_shrinker *gem_shrinker = &shmem_mm->shrinker;
    	int err;
    
    	gem_shrinker->base.count_objects = drm_gem_shmem_shrinker_count_objects;
    	gem_shrinker->base.scan_objects = drm_gem_shmem_shrinker_scan_objects;
    	gem_shrinker->base.seeks = DEFAULT_SEEKS;
    
    	mutex_init(&gem_shrinker->lock);
    	drm_gem_lru_init(&gem_shrinker->lru_evictable, &gem_shrinker->lock);
    	drm_gem_lru_init(&gem_shrinker->lru_evicted, &gem_shrinker->lock);
    	drm_gem_lru_init(&gem_shrinker->lru_pinned, &gem_shrinker->lock);
    
    	err = register_shrinker(&gem_shrinker->base, shrinker_name);
    	if (err) {
    		mutex_destroy(&gem_shrinker->lock);
    		return err;
    	}
    
    	return 0;
    }
    
    static void drm_gem_shmem_shrinker_release(struct drm_device *dev,
    					   struct drm_gem_shmem *shmem_mm)
    {
    	struct drm_gem_shmem_shrinker *gem_shrinker = &shmem_mm->shrinker;
    
    	unregister_shrinker(&gem_shrinker->base);
    	drm_WARN_ON(dev, !list_empty(&gem_shrinker->lru_evictable.list));
    	drm_WARN_ON(dev, !list_empty(&gem_shrinker->lru_evicted.list));
    	drm_WARN_ON(dev, !list_empty(&gem_shrinker->lru_pinned.list));
    	mutex_destroy(&gem_shrinker->lock);
    }
    
    static int drm_gem_shmem_init(struct drm_device *dev)
    {
    	int err;
    
    	if (WARN_ON(dev->shmem_mm))
    		return -EBUSY;
    
    	dev->shmem_mm = kzalloc(sizeof(*dev->shmem_mm), GFP_KERNEL);
    	if (!dev->shmem_mm)
    		return -ENOMEM;
    
    	err = drm_gem_shmem_shrinker_init(dev->shmem_mm, dev->unique);
    	if (err)
    		goto free_gem_shmem;
    
    	return 0;
    
    free_gem_shmem:
    	kfree(dev->shmem_mm);
    	dev->shmem_mm = NULL;
    
    	return err;
    }
    
    static void drm_gem_shmem_release(struct drm_device *dev, void *ptr)
    {
    	struct drm_gem_shmem *shmem_mm = dev->shmem_mm;
    
    	drm_gem_shmem_shrinker_release(dev, shmem_mm);
    	dev->shmem_mm = NULL;
    	kfree(shmem_mm);
    }
    
    /**
     * drmm_gem_shmem_init() - Initialize drm-shmem internals
     * @dev: DRM device
     *
     * Cleanup is automatically managed as part of DRM device releasing.
     * Calling this function multiple times will result in a error.
     *
     * Returns:
     * 0 on success or a negative error code on failure.
     */
    int drmm_gem_shmem_init(struct drm_device *dev)
    {
    	int err;
    
    	err = drm_gem_shmem_init(dev);
    	if (err)
    		return err;
    
    	err = drmm_add_action_or_reset(dev, drm_gem_shmem_release, NULL);
    	if (err)
    		return err;
    
    	return 0;
    }
    EXPORT_SYMBOL_GPL(drmm_gem_shmem_init);
    
    MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
    MODULE_IMPORT_NS(DMA_BUF);
    MODULE_LICENSE("GPL v2");