i915_gem.c 138 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
Chris Wilson's avatar
Chris Wilson committed
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_mocs.h"
36
#include <linux/shmem_fs.h>
37
#include <linux/slab.h>
38
#include <linux/swap.h>
39
#include <linux/pci.h>
40
#include <linux/dma-buf.h>
41

42
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
44
static void
45 46 47
i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
48

49 50 51 52 53 54
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

55 56 57 58 59 60 61 62
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

63 64 65 66
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
67
	spin_lock(&dev_priv->mm.object_stat_lock);
68 69
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
70
	spin_unlock(&dev_priv->mm.object_stat_lock);
71 72 73 74 75
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
76
	spin_lock(&dev_priv->mm.object_stat_lock);
77 78
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
79
	spin_unlock(&dev_priv->mm.object_stat_lock);
80 81
}

82
static int
83
i915_gem_wait_for_error(struct i915_gpu_error *error)
84 85 86
{
	int ret;

87
	if (!i915_reset_in_progress(error))
88 89
		return 0;

90 91 92 93 94
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
95
	ret = wait_event_interruptible_timeout(error->reset_queue,
96
					       !i915_reset_in_progress(error),
97
					       10*HZ);
98 99 100 101
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
102
		return ret;
103 104
	} else {
		return 0;
105
	}
106 107
}

108
int i915_mutex_lock_interruptible(struct drm_device *dev)
109
{
110
	struct drm_i915_private *dev_priv = dev->dev_private;
111 112
	int ret;

113
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
114 115 116 117 118 119 120
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

121
	WARN_ON(i915_verify_lists(dev));
122 123
	return 0;
}
124

125 126
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
127
			    struct drm_file *file)
128
{
129
	struct drm_i915_private *dev_priv = to_i915(dev);
130
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
131
	struct drm_i915_gem_get_aperture *args = data;
132
	struct i915_vma *vma;
133
	size_t pinned;
134

135
	pinned = 0;
136
	mutex_lock(&dev->struct_mutex);
137
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
138 139
		if (vma->pin_count)
			pinned += vma->node.size;
140
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
141 142
		if (vma->pin_count)
			pinned += vma->node.size;
143
	mutex_unlock(&dev->struct_mutex);
144

145
	args->aper_size = ggtt->base.total;
146
	args->aper_available_size = args->aper_size - pinned;
147

148 149 150
	return 0;
}

151 152
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
153
{
154 155 156 157 158
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
159

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
		return -EINVAL;

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
			return PTR_ERR(page);

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

176
		put_page(page);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		vaddr += PAGE_SIZE;
	}

	i915_gem_chipset_flush(obj->base.dev);

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
194

195 196 197 198 199 200 201 202 203 204 205 206 207
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->pages = st;
	return 0;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
208

209
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
210
	if (WARN_ON(ret)) {
211 212 213 214 215 216 217 218 219 220
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;

	if (obj->dirty) {
221
		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
222
		char *vaddr = obj->phys_handle->vaddr;
223 224 225
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
226 227 228 229 230 231 232 233 234 235 236 237 238 239
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
			if (obj->madv == I915_MADV_WILLNEED)
240
				mark_page_accessed(page);
241
			put_page(page);
242 243
			vaddr += PAGE_SIZE;
		}
244
		obj->dirty = 0;
245 246
	}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

static int
drop_pages(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma, *next;
	int ret;

	drm_gem_object_reference(&obj->base);
270
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
271 272 273 274 275 276 277
		if (i915_vma_unbind(vma))
			break;

	ret = i915_gem_object_put_pages(obj);
	drm_gem_object_unreference(&obj->base);

	return ret;
278 279 280 281 282 283 284
}

int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
285
	int ret;
286 287 288 289 290 291 292 293 294 295 296 297 298 299

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

300 301 302 303
	ret = drop_pages(obj);
	if (ret)
		return ret;

304 305 306 307 308 309
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
310 311 312
	obj->ops = &i915_gem_phys_ops;

	return i915_gem_object_get_pages(obj);
313 314 315 316 317 318 319 320 321 322
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
	char __user *user_data = to_user_ptr(args->data_ptr);
323
	int ret = 0;
324 325 326 327 328 329 330

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;
331

332
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
333 334 335 336 337 338 339 340 341 342
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
343 344 345 346
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
347 348
	}

349
	drm_clflush_virt_range(vaddr, args->size);
350
	i915_gem_chipset_flush(dev);
351 352

out:
353
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
354
	return ret;
355 356
}

357 358 359
void *i915_gem_object_alloc(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
360
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
361 362 363 364 365
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
366
	kmem_cache_free(dev_priv->objects, obj);
367 368
}

369 370 371 372 373
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
374
{
375
	struct drm_i915_gem_object *obj;
376 377
	int ret;
	u32 handle;
378

379
	size = roundup(size, PAGE_SIZE);
380 381
	if (size == 0)
		return -EINVAL;
382 383

	/* Allocate the new object */
384
	obj = i915_gem_object_create(dev, size);
385 386 387
	if (obj == NULL)
		return -ENOMEM;

388
	ret = drm_gem_handle_create(file, &obj->base, &handle);
389
	/* drop reference from allocate - handle holds it now */
390 391 392
	drm_gem_object_unreference_unlocked(&obj->base);
	if (ret)
		return ret;
393

394
	*handle_p = handle;
395 396 397
	return 0;
}

398 399 400 401 402 403
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
404
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
405 406
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
407
			       args->size, &args->handle);
408 409 410 411 412 413 414 415 416 417
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
418

419
	return i915_gem_create(file, dev,
420
			       args->size, &args->handle);
421 422
}

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

449
static inline int
450 451
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

475 476 477 478 479 480 481 482 483 484 485 486
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
				    int *needs_clflush)
{
	int ret;

	*needs_clflush = 0;

487
	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
		return -EINVAL;

	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
		/* If we're not in the cpu read domain, set ourself into the gtt
		 * read domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will dirty the data
		 * anyway again before the next pread happens. */
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
		ret = i915_gem_object_wait_rendering(obj, true);
		if (ret)
			return ret;
	}

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	return ret;
}

511 512 513
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
514
static int
515 516 517 518 519 520 521
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

522
	if (unlikely(page_do_bit17_swizzling))
523 524 525 526 527 528 529 530 531 532 533
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

534
	return ret ? -EFAULT : 0;
535 536
}

537 538 539 540
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
541
	if (unlikely(swizzled)) {
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

559 560 561 562 563 564 565 566 567 568 569 570
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
571 572 573
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
574 575 576 577 578 579 580 581 582 583 584

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

585
	return ret ? - EFAULT : 0;
586 587
}

588
static int
589 590 591 592
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
593
{
594
	char __user *user_data;
595
	ssize_t remain;
596
	loff_t offset;
597
	int shmem_page_offset, page_length, ret = 0;
598
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
599
	int prefaulted = 0;
600
	int needs_clflush = 0;
601
	struct sg_page_iter sg_iter;
602

603
	user_data = to_user_ptr(args->data_ptr);
604 605
	remain = args->size;

606
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
607

608
	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
609 610 611
	if (ret)
		return ret;

612
	offset = args->offset;
613

614 615
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
616
		struct page *page = sg_page_iter_page(&sg_iter);
617 618 619 620

		if (remain <= 0)
			break;

621 622 623 624 625
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
626
		shmem_page_offset = offset_in_page(offset);
627 628 629 630
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

631 632 633
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

634 635 636 637 638
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
639 640 641

		mutex_unlock(&dev->struct_mutex);

642
		if (likely(!i915.prefault_disable) && !prefaulted) {
643
			ret = fault_in_multipages_writeable(user_data, remain);
644 645 646 647 648 649 650
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
651

652 653 654
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
655

656
		mutex_lock(&dev->struct_mutex);
657 658

		if (ret)
659 660
			goto out;

661
next_page:
662
		remain -= page_length;
663
		user_data += page_length;
664 665 666
		offset += page_length;
	}

667
out:
668 669
	i915_gem_object_unpin_pages(obj);

670 671 672
	return ret;
}

673 674 675 676 677 678 679
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
680
		     struct drm_file *file)
681 682
{
	struct drm_i915_gem_pread *args = data;
683
	struct drm_i915_gem_object *obj;
684
	int ret = 0;
685

686 687 688 689
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
690
		       to_user_ptr(args->data_ptr),
691 692 693
		       args->size))
		return -EFAULT;

694
	ret = i915_mutex_lock_interruptible(dev);
695
	if (ret)
696
		return ret;
697

698
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
699
	if (&obj->base == NULL) {
700 701
		ret = -ENOENT;
		goto unlock;
702
	}
703

704
	/* Bounds check source.  */
705 706
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
707
		ret = -EINVAL;
708
		goto out;
709 710
	}

711 712 713 714 715 716 717 718
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

Chris Wilson's avatar
Chris Wilson committed
719 720
	trace_i915_gem_object_pread(obj, args->offset, args->size);

721
	ret = i915_gem_shmem_pread(dev, obj, args, file);
722

723
out:
724
	drm_gem_object_unreference(&obj->base);
725
unlock:
726
	mutex_unlock(&dev->struct_mutex);
727
	return ret;
728 729
}

730 731
/* This is the fast write path which cannot handle
 * page faults in the source data
732
 */
733 734 735 736 737 738

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
739
{
740 741
	void __iomem *vaddr_atomic;
	void *vaddr;
742
	unsigned long unwritten;
743

744
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
745 746 747
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
748
						      user_data, length);
749
	io_mapping_unmap_atomic(vaddr_atomic);
750
	return unwritten;
751 752
}

753 754 755 756
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
757
static int
758 759
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
760
			 struct drm_i915_gem_pwrite *args,
761
			 struct drm_file *file)
762
{
763 764
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
765
	ssize_t remain;
766
	loff_t offset, page_base;
767
	char __user *user_data;
Daniel Vetter's avatar
Daniel Vetter committed
768 769
	int page_offset, page_length, ret;

770
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
Daniel Vetter's avatar
Daniel Vetter committed
771 772 773 774 775 776 777 778 779 780
	if (ret)
		goto out;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		goto out_unpin;
781

782
	user_data = to_user_ptr(args->data_ptr);
783 784
	remain = args->size;

785
	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786

787
	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788

789 790 791
	while (remain > 0) {
		/* Operation in this page
		 *
792 793 794
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
795
		 */
796 797
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
798 799 800 801 802
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
803 804
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
805
		 */
806
		if (fast_user_write(ggtt->mappable, page_base,
Daniel Vetter's avatar
Daniel Vetter committed
807 808
				    page_offset, user_data, page_length)) {
			ret = -EFAULT;
809
			goto out_flush;
Daniel Vetter's avatar
Daniel Vetter committed
810
		}
811

812 813 814
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
815 816
	}

817
out_flush:
818
	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
Daniel Vetter's avatar
Daniel Vetter committed
819
out_unpin:
820
	i915_gem_object_ggtt_unpin(obj);
Daniel Vetter's avatar
Daniel Vetter committed
821
out:
822
	return ret;
823 824
}

825 826 827 828
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
829
static int
830 831 832 833 834
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
835
{
836
	char *vaddr;
837
	int ret;
838

839
	if (unlikely(page_do_bit17_swizzling))
840
		return -EINVAL;
841

842 843 844 845
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
846 847
	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
					user_data, page_length);
848 849 850 851
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
852

853
	return ret ? -EFAULT : 0;
854 855
}

856 857
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
858
static int
859 860 861 862 863
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
864
{
865 866
	char *vaddr;
	int ret;
867

868
	vaddr = kmap(page);
869
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
870 871 872
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
873 874
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
875 876
						user_data,
						page_length);
877 878 879 880 881
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
882 883 884
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
885
	kunmap(page);
886

887
	return ret ? -EFAULT : 0;
888 889 890
}

static int
891 892 893 894
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
895 896
{
	ssize_t remain;
897 898
	loff_t offset;
	char __user *user_data;
899
	int shmem_page_offset, page_length, ret = 0;
900
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
901
	int hit_slowpath = 0;
902 903
	int needs_clflush_after = 0;
	int needs_clflush_before = 0;
904
	struct sg_page_iter sg_iter;
905

906
	user_data = to_user_ptr(args->data_ptr);
907 908
	remain = args->size;

909
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
910

911 912 913 914 915
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
		/* If we're not in the cpu write domain, set ourself into the gtt
		 * write domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will use the data
		 * right away and we therefore have to clflush anyway. */
916
		needs_clflush_after = cpu_write_needs_clflush(obj);
917 918 919
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
			return ret;
920
	}
921 922 923 924 925
	/* Same trick applies to invalidate partially written cachelines read
	 * before writing. */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		needs_clflush_before =
			!cpu_cache_is_coherent(dev, obj->cache_level);
926

927 928 929 930
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

931
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
932

933 934
	i915_gem_object_pin_pages(obj);

935
	offset = args->offset;
936
	obj->dirty = 1;
937

938 939
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
940
		struct page *page = sg_page_iter_page(&sg_iter);
941
		int partial_cacheline_write;
942

943 944 945
		if (remain <= 0)
			break;

946 947 948 949 950
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
951
		shmem_page_offset = offset_in_page(offset);
952 953 954 955 956

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

957 958 959 960 961 962 963
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
		partial_cacheline_write = needs_clflush_before &&
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

964 965 966
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

967 968 969 970 971 972
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
		if (ret == 0)
			goto next_page;
973 974 975

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
976 977 978 979
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
980

981
		mutex_lock(&dev->struct_mutex);
982 983

		if (ret)
984 985
			goto out;

986
next_page:
987
		remain -= page_length;
988
		user_data += page_length;
989
		offset += page_length;
990 991
	}

992
out:
993 994
	i915_gem_object_unpin_pages(obj);

995
	if (hit_slowpath) {
996 997 998 999 1000 1001 1002
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
		if (!needs_clflush_after &&
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003
			if (i915_gem_clflush_object(obj, obj->pin_display))
1004
				needs_clflush_after = true;
1005
		}
1006
	}
1007

1008
	if (needs_clflush_after)
1009
		i915_gem_chipset_flush(dev);
1010 1011
	else
		obj->cache_dirty = true;
1012

1013
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1014
	return ret;
1015 1016 1017 1018 1019 1020 1021 1022 1023
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1024
		      struct drm_file *file)
1025
{
1026
	struct drm_i915_private *dev_priv = dev->dev_private;
1027
	struct drm_i915_gem_pwrite *args = data;
1028
	struct drm_i915_gem_object *obj;
1029 1030 1031 1032 1033 1034
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1035
		       to_user_ptr(args->data_ptr),
1036 1037 1038
		       args->size))
		return -EFAULT;

1039
	if (likely(!i915.prefault_disable)) {
1040 1041 1042 1043 1044
		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
						   args->size);
		if (ret)
			return -EFAULT;
	}
1045

1046 1047
	intel_runtime_pm_get(dev_priv);

1048
	ret = i915_mutex_lock_interruptible(dev);
1049
	if (ret)
1050
		goto put_rpm;
1051

1052
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1053
	if (&obj->base == NULL) {
1054 1055
		ret = -ENOENT;
		goto unlock;
1056
	}
1057

1058
	/* Bounds check destination. */
1059 1060
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
1061
		ret = -EINVAL;
1062
		goto out;
1063 1064
	}

1065 1066 1067 1068 1069 1070 1071 1072
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

Chris Wilson's avatar
Chris Wilson committed
1073 1074
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

Daniel Vetter's avatar
Daniel Vetter committed
1075
	ret = -EFAULT;
1076 1077 1078 1079 1080 1081
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1082 1083 1084
	if (obj->tiling_mode == I915_TILING_NONE &&
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
	    cpu_write_needs_clflush(obj)) {
1085
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter's avatar
Daniel Vetter committed
1086 1087 1088
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
1089
	}
1090

1091 1092 1093 1094 1095 1096
	if (ret == -EFAULT || ret == -ENOSPC) {
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
		else
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
	}
1097

1098
out:
1099
	drm_gem_object_unreference(&obj->base);
1100
unlock:
1101
	mutex_unlock(&dev->struct_mutex);
1102 1103 1104
put_rpm:
	intel_runtime_pm_put(dev_priv);

1105 1106 1107
	return ret;
}

1108 1109
static int
i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1110
{
1111 1112
	if (__i915_terminally_wedged(reset_counter))
		return -EIO;
1113

1114
	if (__i915_reset_in_progress(reset_counter)) {
1115 1116 1117 1118 1119
		/* Non-interruptible callers can't handle -EAGAIN, hence return
		 * -EIO unconditionally for these. */
		if (!interruptible)
			return -EIO;

1120
		return -EAGAIN;
1121 1122 1123 1124 1125
	}

	return 0;
}

1126 1127 1128 1129 1130 1131
static void