Skip to content
Snippets Groups Projects

Panfrost handle page mapping failure patch

  • Clone with SSH
  • Clone with HTTPS
  • Embed
  • Share
    The snippet can be accessed without any authentication.
    Authored by Adrián Martínez Larumbe
    Edited
    0001-drm-panfrost-Handle-page-mapping-failure.patch 4.97 KiB
    From 8a9455e69e7e925b45e6c81624cee9a80b5dc686 Mon Sep 17 00:00:00 2001
    From: =?UTF-8?q?Adri=C3=A1n=20Larumbe?= <adrian.larumbe@collabora.com>
    Date: Sat, 9 Sep 2023 00:49:25 +0100
    Subject: [PATCH] drm/panfrost: Handle page mapping failure
    MIME-Version: 1.0
    Content-Type: text/plain; charset=UTF-8
    Content-Transfer-Encoding: 8bit
    
    Right now Panfrost's GPU page fault IRQ assumes the architectural page
    mapping function always suceeds. That might not always be the case, so we
    haravest its return status and unmap whatever we had mapped so far.
    
    Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
    ---
     drivers/gpu/drm/panfrost/panfrost_mmu.c | 66 +++++++++++++++++--------
     1 file changed, 46 insertions(+), 20 deletions(-)
    
    diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
    index a722b3223649..70656b858d57 100644
    --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
    +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
    @@ -287,13 +287,34 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
     	pm_runtime_put_autosuspend(pfdev->dev);
     }
     
    +static void mmu_unmap_range(size_t len, u64 iova, bool is_heap,
    +			    struct io_pgtable_ops *ops)
    +{
    +	size_t unmapped_len = 0;
    +
    +	while (unmapped_len < len) {
    +		size_t unmapped_page, pgcount;
    +		size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
    +
    +		if (is_heap)
    +			pgcount = 1;
    +		if (!is_heap || ops->iova_to_phys(ops, iova)) {
    +			unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
    +			WARN_ON(unmapped_page != pgsize * pgcount);
    +		}
    +		iova += pgsize * pgcount;
    +		unmapped_len += pgsize * pgcount;
    +	}
    +}
    +
     static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
    -		      u64 iova, int prot, struct sg_table *sgt)
    +		      u64 iova, int prot, struct sg_table *sgt, bool is_heap)
     {
     	unsigned int count;
     	struct scatterlist *sgl;
     	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
     	u64 start_iova = iova;
    +	int ret;
     
     	for_each_sgtable_dma_sg(sgt, sgl, count) {
     		unsigned long paddr = sg_dma_address(sgl);
    @@ -305,8 +326,13 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
     			size_t pgcount, mapped = 0;
     			size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
     
    -			ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
    +			ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
     				       GFP_KERNEL, &mapped);
    +			if (ret) {
    +				/* Unmap everything we mapped and bail out */
    +				mmu_unmap_range(mapped, start_iova, is_heap, ops);
    +				return ret;
    +			}
     			/* Don't get stuck if things have gone wrong */
     			mapped = max(mapped, pgsize);
     			iova += mapped;
    @@ -328,6 +354,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
     	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
     	struct sg_table *sgt;
     	int prot = IOMMU_READ | IOMMU_WRITE;
    +	int ret;
     
     	if (WARN_ON(mapping->active))
     		return 0;
    @@ -339,8 +366,13 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
     	if (WARN_ON(IS_ERR(sgt)))
     		return PTR_ERR(sgt);
     
    -	mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
    -		   prot, sgt);
    +	ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
    +		   prot, sgt, false);
    +	if (ret) {
    +		drm_gem_shmem_put_pages(shmem);
    +		return ret;
    +	}
    +
     	mapping->active = true;
     
     	return 0;
    @@ -354,7 +386,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
     	struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
     	u64 iova = mapping->mmnode.start << PAGE_SHIFT;
     	size_t len = mapping->mmnode.size << PAGE_SHIFT;
    -	size_t unmapped_len = 0;
    +
     
     	if (WARN_ON(!mapping->active))
     		return;
    @@ -362,19 +394,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
     	dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
     		mapping->mmu->as, iova, len);
     
    -	while (unmapped_len < len) {
    -		size_t unmapped_page, pgcount;
    -		size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
    -
    -		if (bo->is_heap)
    -			pgcount = 1;
    -		if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
    -			unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
    -			WARN_ON(unmapped_page != pgsize * pgcount);
    -		}
    -		iova += pgsize * pgcount;
    -		unmapped_len += pgsize * pgcount;
    -	}
    +	mmu_unmap_range(len, iova, bo->is_heap, ops);
     
     	panfrost_mmu_flush_range(pfdev, mapping->mmu,
     				 mapping->mmnode.start << PAGE_SHIFT, len);
    @@ -520,8 +540,11 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
     	if (ret)
     		goto err_map;
     
    -	mmu_map_sg(pfdev, bomapping->mmu, addr,
    -		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
    +	ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
    +			 IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC,
    +			 sgt, true);
    +	if (ret)
    +		goto err_mmu_map_sg;
     
     	bomapping->active = true;
     
    @@ -534,6 +557,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
     
     	return 0;
     
    +err_mmu_map_sg:
    +	dma_unmap_sgtable(pfdev->dev, sgt,
    +			  DMA_BIDIRECTIONAL, 0);
     err_map:
     	sg_free_table(sgt);
     err_pages:
    -- 
    2.42.0
    
    0% Loading or .
    You are about to add 0 people to the discussion. Proceed with caution.
    Finish editing this message first!
    Please register or to comment