ttm_bo_vm.c 18 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

32 33
#define pr_fmt(fmt) "[TTM] " fmt

34 35 36
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
37
#include <drm/drm_vma_manager.h>
38
#include <linux/mm.h>
39
#include <linux/pfn_t.h>
40 41 42
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
43
#include <linux/mem_encrypt.h>
44

45
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
46 47
				struct vm_fault *vmf)
{
48 49
	vm_fault_t ret = 0;
	int err = 0;
50

51
	if (likely(!bo->moving))
52 53 54 55 56
		goto out_unlock;

	/*
	 * Quick non-stalling check for idle.
	 */
57
	if (dma_fence_is_signaled(bo->moving))
58
		goto out_clear;
59 60 61 62 63 64 65 66 67 68

	/*
	 * If possible, avoid waiting for GPU with mmap_sem
	 * held.
	 */
	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
		ret = VM_FAULT_RETRY;
		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
			goto out_unlock;

69
		ttm_bo_get(bo);
70
		up_read(&vmf->vma->vm_mm->mmap_sem);
71
		(void) dma_fence_wait(bo->moving, true);
72
		dma_resv_unlock(bo->base.resv);
73
		ttm_bo_put(bo);
74 75 76 77 78 79
		goto out_unlock;
	}

	/*
	 * Ordinary wait.
	 */
80 81 82
	err = dma_fence_wait(bo->moving, true);
	if (unlikely(err != 0)) {
		ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
83
			VM_FAULT_NOPAGE;
84 85 86 87
		goto out_unlock;
	}

out_clear:
88
	dma_fence_put(bo->moving);
89
	bo->moving = NULL;
90 91 92 93 94

out_unlock:
	return ret;
}

95 96 97 98 99 100 101 102
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
				       unsigned long page_offset)
{
	struct ttm_bo_device *bdev = bo->bdev;

	if (bdev->driver->io_mem_pfn)
		return bdev->driver->io_mem_pfn(bo, page_offset);

103 104
	return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
		+ page_offset;
105 106
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
/**
 * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
 * @bo: The buffer object
 * @vmf: The fault structure handed to the callback
 *
 * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
 * during long waits, and after the wait the callback will be restarted. This
 * is to allow other threads using the same virtual memory space concurrent
 * access to map(), unmap() completely unrelated buffer objects. TTM buffer
 * object reservations sometimes wait for GPU and should therefore be
 * considered long waits. This function reserves the buffer object interruptibly
 * taking this into account. Starvation is avoided by the vm system not
 * allowing too many repeated restarts.
 * This function is intended to be used in customized fault() and _mkwrite()
 * handlers.
 *
 * Return:
 *    0 on success and the bo was reserved.
 *    VM_FAULT_RETRY if blocking wait.
 *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
 */
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
			     struct vm_fault *vmf)
130 131 132 133
{
	/*
	 * Work around locking order reversal in fault / nopfn
	 * between mmap_sem and bo_reserve: Perform a trylock operation
134 135
	 * for reserve, and if it fails, retry the fault after waiting
	 * for the buffer to become unreserved.
136
	 */
137
	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
138 139
		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
140
				ttm_bo_get(bo);
141
				up_read(&vmf->vma->vm_mm->mmap_sem);
142 143 144
				if (!dma_resv_lock_interruptible(bo->base.resv,
								 NULL))
					dma_resv_unlock(bo->base.resv);
145
				ttm_bo_put(bo);
146 147 148 149 150
			}

			return VM_FAULT_RETRY;
		}

151 152
		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
			return VM_FAULT_NOPAGE;
153 154
	}

155 156 157 158
	return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/**
 * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults
 * @vmf: Fault data
 * @bo: The buffer object
 * @page_offset: Page offset from bo start
 * @fault_page_size: The size of the fault in pages.
 * @pgprot: The page protections.
 * Does additional checking whether it's possible to insert a PUD or PMD
 * pfn and performs the insertion.
 *
 * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if
 * a huge fault was not possible, or on insertion error.
 */
static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
					struct ttm_buffer_object *bo,
					pgoff_t page_offset,
					pgoff_t fault_page_size,
					pgprot_t pgprot)
{
	pgoff_t i;
	vm_fault_t ret;
	unsigned long pfn;
	pfn_t pfnt;
	struct ttm_tt *ttm = bo->ttm;
	bool write = vmf->flags & FAULT_FLAG_WRITE;

	/* Fault should not cross bo boundary. */
	page_offset &= ~(fault_page_size - 1);
	if (page_offset + fault_page_size > bo->num_pages)
		goto out_fallback;

	if (bo->mem.bus.is_iomem)
		pfn = ttm_bo_io_mem_pfn(bo, page_offset);
	else
		pfn = page_to_pfn(ttm->pages[page_offset]);

	/* pfn must be fault_page_size aligned. */
	if ((pfn & (fault_page_size - 1)) != 0)
		goto out_fallback;

	/* Check that memory is contiguous. */
	if (!bo->mem.bus.is_iomem) {
		for (i = 1; i < fault_page_size; ++i) {
			if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
				goto out_fallback;
		}
	} else if (bo->bdev->driver->io_mem_pfn) {
		for (i = 1; i < fault_page_size; ++i) {
			if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
				goto out_fallback;
		}
	}

	pfnt = __pfn_to_pfn_t(pfn, PFN_DEV);
	if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT))
		ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
	else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT))
		ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write);
#endif
	else
		WARN_ON_ONCE(ret = VM_FAULT_FALLBACK);

	if (ret != VM_FAULT_NOPAGE)
		goto out_fallback;

	return VM_FAULT_NOPAGE;
out_fallback:
	count_vm_event(THP_FAULT_FALLBACK);
	return VM_FAULT_FALLBACK;
}
#else
static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
					struct ttm_buffer_object *bo,
					pgoff_t page_offset,
					pgoff_t fault_page_size,
					pgprot_t pgprot)
{
	return VM_FAULT_FALLBACK;
}
#endif

242 243 244 245 246 247 248
/**
 * ttm_bo_vm_fault_reserved - TTM fault helper
 * @vmf: The struct vm_fault given as argument to the fault callback
 * @prot: The page protection to be used for this memory area.
 * @num_prefault: Maximum number of prefault pages. The caller may want to
 * specify this based on madvice settings and the size of the GPU object
 * backed by the memory.
249
 * @fault_page_size: The size of the fault in pages.
250 251 252 253 254 255 256 257 258 259 260 261 262
 *
 * This function inserts one or more page table entries pointing to the
 * memory backing the buffer object, and then returns a return code
 * instructing the caller to retry the page access.
 *
 * Return:
 *   VM_FAULT_NOPAGE on success or pending signal
 *   VM_FAULT_SIGBUS on unspecified error
 *   VM_FAULT_OOM on out-of-memory
 *   VM_FAULT_RETRY if retryable wait
 */
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
				    pgprot_t prot,
263 264
				    pgoff_t num_prefault,
				    pgoff_t fault_page_size)
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
{
	struct vm_area_struct *vma = vmf->vma;
	struct ttm_buffer_object *bo = vma->vm_private_data;
	struct ttm_bo_device *bdev = bo->bdev;
	unsigned long page_offset;
	unsigned long page_last;
	unsigned long pfn;
	struct ttm_tt *ttm = NULL;
	struct page *page;
	int err;
	pgoff_t i;
	vm_fault_t ret = VM_FAULT_NOPAGE;
	unsigned long address = vmf->address;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

281 282 283 284
	/*
	 * Refuse to fault imported pages. This should be handled
	 * (if at all) by redirecting mmap to the exporter.
	 */
285 286
	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
		return VM_FAULT_SIGBUS;
287

288
	if (bdev->driver->fault_reserve_notify) {
289 290
		struct dma_fence *moving = dma_fence_get(bo->moving);

291 292
		err = bdev->driver->fault_reserve_notify(bo);
		switch (err) {
293 294 295 296
		case 0:
			break;
		case -EBUSY:
		case -ERESTARTSYS:
297
			return VM_FAULT_NOPAGE;
298
		default:
299
			return VM_FAULT_SIGBUS;
300
		}
301 302

		if (bo->moving != moving) {
303
			spin_lock(&ttm_bo_glob.lru_lock);
304
			ttm_bo_move_to_lru_tail(bo, NULL);
305
			spin_unlock(&ttm_bo_glob.lru_lock);
306 307
		}
		dma_fence_put(moving);
308
	}
309

310 311 312 313
	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */
314
	ret = ttm_bo_vm_fault_idle(bo, vmf);
315 316
	if (unlikely(ret != 0))
		return ret;
317

318
	err = ttm_mem_io_lock(man, true);
319 320
	if (unlikely(err != 0))
		return VM_FAULT_NOPAGE;
321 322
	err = ttm_mem_io_reserve_vm(bo);
	if (unlikely(err != 0)) {
323
		ret = VM_FAULT_SIGBUS;
324 325
		goto out_io_unlock;
	}
326 327

	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
Gerd Hoffmann's avatar
Gerd Hoffmann committed
328
		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
329
	page_last = vma_pages(vma) + vma->vm_pgoff -
Gerd Hoffmann's avatar
Gerd Hoffmann committed
330
		drm_vma_node_start(&bo->base.vma_node);
331 332

	if (unlikely(page_offset >= bo->num_pages)) {
333
		ret = VM_FAULT_SIGBUS;
334
		goto out_io_unlock;
335 336
	}

337
	prot = ttm_io_prot(bo->mem.placement, prot);
338
	if (!bo->mem.bus.is_iomem) {
339 340
		struct ttm_operation_ctx ctx = {
			.interruptible = false,
341 342 343
			.no_wait_gpu = false,
			.flags = TTM_OPT_FLAG_FORCE_ALLOC

344 345
		};

346
		ttm = bo->ttm;
347
		if (ttm_tt_populate(bo->ttm, &ctx)) {
348
			ret = VM_FAULT_OOM;
349 350
			goto out_io_unlock;
		}
351 352
	} else {
		/* Iomem should not be marked encrypted */
353
		prot = pgprot_decrypted(prot);
354 355
	}

356 357 358 359 360 361 362
	/* We don't prefault on huge faults. Yet. */
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
		ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
					    fault_page_size, prot);
		goto out_io_unlock;
	}

363 364 365 366
	/*
	 * Speculatively prefault a number of pages. Only error on
	 * first page.
	 */
367
	for (i = 0; i < num_prefault; ++i) {
368
		if (bo->mem.bus.is_iomem) {
369
			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
370
		} else {
371
			page = ttm->pages[page_offset];
372
			if (unlikely(!page && i == 0)) {
373
				ret = VM_FAULT_OOM;
374
				goto out_io_unlock;
375 376 377
			} else if (unlikely(!page)) {
				break;
			}
Gerd Hoffmann's avatar
Gerd Hoffmann committed
378
			page->index = drm_vma_node_start(&bo->base.vma_node) +
379
				page_offset;
380 381 382
			pfn = page_to_pfn(page);
		}

383 384 385 386 387 388 389 390
		/*
		 * Note that the value of @prot at this point may differ from
		 * the value of @vma->vm_page_prot in the caching- and
		 * encryption bits. This is because the exact location of the
		 * data may not be known at mmap() time and may also change
		 * at arbitrary times while the data is mmap'ed.
		 * See vmf_insert_mixed_prot() for a discussion.
		 */
391
		if (vma->vm_flags & VM_MIXEDMAP)
392 393 394
			ret = vmf_insert_mixed_prot(vma, address,
						    __pfn_to_pfn_t(pfn, PFN_DEV),
						    prot);
395
		else
396
			ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
397

398 399 400 401 402 403 404
		/* Never error on prefaulted PTEs */
		if (unlikely((ret & VM_FAULT_ERROR))) {
			if (i == 0)
				goto out_io_unlock;
			else
				break;
		}
405 406 407 408 409

		address += PAGE_SIZE;
		if (unlikely(++page_offset >= page_last))
			break;
	}
410
	ret = VM_FAULT_NOPAGE;
411 412
out_io_unlock:
	ttm_mem_io_unlock(man);
413 414 415 416
	return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);

417
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
418 419 420 421 422 423 424 425 426 427
{
	struct vm_area_struct *vma = vmf->vma;
	pgprot_t prot;
	struct ttm_buffer_object *bo = vma->vm_private_data;
	vm_fault_t ret;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

428
	prot = vma->vm_page_prot;
429
	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
430 431 432
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

433
	dma_resv_unlock(bo->base.resv);
434

435
	return ret;
436
}
437
EXPORT_SYMBOL(ttm_bo_vm_fault);
438

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/**
 * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting?
 * @prot: The page protection value
 *
 * Return: true if @prot is write-protecting. false otherwise.
 */
static bool ttm_pgprot_is_wrprotecting(pgprot_t prot)
{
	/*
	 * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic
	 * way. Unfortunately there is no generic pgprot_wrprotect.
	 */
	return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) ==
		pgprot_val(prot);
}

static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf,
				       enum page_entry_size pe_size)
{
	struct vm_area_struct *vma = vmf->vma;
	pgprot_t prot;
	struct ttm_buffer_object *bo = vma->vm_private_data;
	vm_fault_t ret;
	pgoff_t fault_page_size = 0;
	bool write = vmf->flags & FAULT_FLAG_WRITE;

	switch (pe_size) {
	case PE_SIZE_PMD:
		fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
		break;
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
	case PE_SIZE_PUD:
		fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
		break;
#endif
	default:
		WARN_ON_ONCE(1);
		return VM_FAULT_FALLBACK;
	}

	/* Fallback on write dirty-tracking or COW */
	if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot))
		return VM_FAULT_FALLBACK;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

	prot = vm_get_page_prot(vma->vm_flags);
	ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

	dma_resv_unlock(bo->base.resv);

	return ret;
}
#endif

499
void ttm_bo_vm_open(struct vm_area_struct *vma)
500
{
501
	struct ttm_buffer_object *bo = vma->vm_private_data;
502

503 504
	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);

505
	ttm_bo_get(bo);
506
}
507
EXPORT_SYMBOL(ttm_bo_vm_open);
508

509
void ttm_bo_vm_close(struct vm_area_struct *vma)
510
{
511
	struct ttm_buffer_object *bo = vma->vm_private_data;
512

513
	ttm_bo_put(bo);
514 515
	vma->vm_private_data = NULL;
}
516
EXPORT_SYMBOL(ttm_bo_vm_close);
517

518 519
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
				 unsigned long offset,
520
				 uint8_t *buf, int len, int write)
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
{
	unsigned long page = offset >> PAGE_SHIFT;
	unsigned long bytes_left = len;
	int ret;

	/* Copy a page at a time, that way no extra virtual address
	 * mapping is needed
	 */
	offset -= page << PAGE_SHIFT;
	do {
		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
		struct ttm_bo_kmap_obj map;
		void *ptr;
		bool is_iomem;

		ret = ttm_bo_kmap(bo, page, 1, &map);
		if (ret)
			return ret;

		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
		WARN_ON_ONCE(is_iomem);
		if (write)
			memcpy(ptr, buf, bytes);
		else
			memcpy(buf, ptr, bytes);
		ttm_bo_kunmap(&map);

		page++;
549
		buf += bytes;
550 551 552 553 554 555 556
		bytes_left -= bytes;
		offset = 0;
	} while (bytes_left);

	return len;
}

557 558
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
		     void *buf, int len, int write)
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
{
	unsigned long offset = (addr) - vma->vm_start;
	struct ttm_buffer_object *bo = vma->vm_private_data;
	int ret;

	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
		return -EIO;

	ret = ttm_bo_reserve(bo, true, false, NULL);
	if (ret)
		return ret;

	switch (bo->mem.mem_type) {
	case TTM_PL_SYSTEM:
		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
			ret = ttm_tt_swapin(bo->ttm);
			if (unlikely(ret != 0))
				return ret;
		}
		/* fall through */
	case TTM_PL_TT:
		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
		break;
	default:
		if (bo->bdev->driver->access_memory)
			ret = bo->bdev->driver->access_memory(
				bo, offset, buf, len, write);
		else
			ret = -EIO;
	}

	ttm_bo_unreserve(bo);

	return ret;
}
594
EXPORT_SYMBOL(ttm_bo_vm_access);
595

596
static const struct vm_operations_struct ttm_bo_vm_ops = {
597 598
	.fault = ttm_bo_vm_fault,
	.open = ttm_bo_vm_open,
599
	.close = ttm_bo_vm_close,
600 601 602 603
	.access = ttm_bo_vm_access,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	.huge_fault = ttm_bo_vm_huge_fault,
#endif
604 605
};

606 607 608 609 610 611 612
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
						  unsigned long offset,
						  unsigned long pages)
{
	struct drm_vma_offset_node *node;
	struct ttm_buffer_object *bo = NULL;

613
	drm_vma_offset_lock_lookup(bdev->vma_manager);
614

615
	node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
616
	if (likely(node)) {
Gerd Hoffmann's avatar
Gerd Hoffmann committed
617 618
		bo = container_of(node, struct ttm_buffer_object,
				  base.vma_node);
619
		bo = ttm_bo_get_unless_zero(bo);
620 621
	}

622
	drm_vma_offset_unlock_lookup(bdev->vma_manager);
623 624 625 626 627 628 629

	if (!bo)
		pr_err("Could not find buffer object to map\n");

	return bo;
}

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
{
	vma->vm_ops = &ttm_bo_vm_ops;

	/*
	 * Note: We're transferring the bo reference to
	 * vma->vm_private_data here.
	 */

	vma->vm_private_data = bo;

	/*
	 * We'd like to use VM_PFNMAP on shared mappings, where
	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
	 * bad for performance. Until that has been sorted out, use
	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
	 */
	vma->vm_flags |= VM_MIXEDMAP;
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
}

652 653 654 655 656 657 658
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
		struct ttm_bo_device *bdev)
{
	struct ttm_bo_driver *driver;
	struct ttm_buffer_object *bo;
	int ret;

659
	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
660 661
		return -EINVAL;

662 663
	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
	if (unlikely(!bo))
664 665 666 667 668 669 670 671 672 673 674
		return -EINVAL;

	driver = bo->bdev->driver;
	if (unlikely(!driver->verify_access)) {
		ret = -EPERM;
		goto out_unref;
	}
	ret = driver->verify_access(bo, filp);
	if (unlikely(ret != 0))
		goto out_unref;

675
	ttm_bo_mmap_vma_setup(bo, vma);
676 677
	return 0;
out_unref:
678
	ttm_bo_put(bo);
679 680 681 682
	return ret;
}
EXPORT_SYMBOL(ttm_bo_mmap);

Gerd Hoffmann's avatar
Gerd Hoffmann committed
683
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
684
{
685
	ttm_bo_get(bo);
686
	ttm_bo_mmap_vma_setup(bo, vma);
687 688
	return 0;
}
Gerd Hoffmann's avatar
Gerd Hoffmann committed
689
EXPORT_SYMBOL(ttm_bo_mmap_obj);