vc4_gem.c 31.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014 Broadcom
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/module.h>
#include <linux/platform_device.h>
Eric Anholt's avatar
Eric Anholt committed
26
#include <linux/pm_runtime.h>
27 28
#include <linux/device.h>
#include <linux/io.h>
29
#include <linux/sched/signal.h>
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
#include "vc4_trace.h"

static void
vc4_queue_hangcheck(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	mod_timer(&vc4->hangcheck.timer,
		  round_jiffies_up(jiffies + msecs_to_jiffies(100)));
}

45 46 47 48 49 50 51 52 53 54 55 56 57
struct vc4_hang_state {
	struct drm_vc4_get_hang_state user_state;

	u32 bo_count;
	struct drm_gem_object **bo;
};

static void
vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
	unsigned int i;

	for (i = 0; i < state->user_state.bo_count; i++)
58
		drm_gem_object_put_unlocked(state->bo[i]);
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

	kfree(state);
}

int
vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file_priv)
{
	struct drm_vc4_get_hang_state *get_state = data;
	struct drm_vc4_get_hang_state_bo *bo_state;
	struct vc4_hang_state *kernel_state;
	struct drm_vc4_get_hang_state *state;
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	unsigned long irqflags;
	u32 i;
74
	int ret = 0;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

	spin_lock_irqsave(&vc4->job_lock, irqflags);
	kernel_state = vc4->hang_state;
	if (!kernel_state) {
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		return -ENOENT;
	}
	state = &kernel_state->user_state;

	/* If the user's array isn't big enough, just return the
	 * required array size.
	 */
	if (get_state->bo_count < state->bo_count) {
		get_state->bo_count = state->bo_count;
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		return 0;
	}

	vc4->hang_state = NULL;
	spin_unlock_irqrestore(&vc4->job_lock, irqflags);

	/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
	state->bo = get_state->bo;
	memcpy(get_state, state, sizeof(*state));

	bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
	if (!bo_state) {
		ret = -ENOMEM;
		goto err_free;
	}

	for (i = 0; i < state->bo_count; i++) {
		struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
		u32 handle;

		ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
					    &handle);

		if (ret) {
114 115
			state->bo_count = i;
			goto err_delete_handle;
116 117 118 119 120 121
		}
		bo_state[i].handle = handle;
		bo_state[i].paddr = vc4_bo->base.paddr;
		bo_state[i].size = vc4_bo->base.base.size;
	}

122
	if (copy_to_user(u64_to_user_ptr(get_state->bo),
123 124 125 126
			 bo_state,
			 state->bo_count * sizeof(*bo_state)))
		ret = -EFAULT;

127 128 129 130 131
err_delete_handle:
	if (ret) {
		for (i = 0; i < state->bo_count; i++)
			drm_gem_handle_delete(file_priv, bo_state[i].handle);
	}
132 133 134

err_free:
	vc4_free_hang_state(dev, kernel_state);
135
	kfree(bo_state);
136 137 138 139 140 141 142 143 144 145

	return ret;
}

static void
vc4_save_hang_state(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	struct drm_vc4_get_hang_state *state;
	struct vc4_hang_state *kernel_state;
146
	struct vc4_exec_info *exec[2];
147 148
	struct vc4_bo *bo;
	unsigned long irqflags;
149
	unsigned int i, j, k, unref_list_count;
150

151
	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
152 153 154 155 156 157
	if (!kernel_state)
		return;

	state = &kernel_state->user_state;

	spin_lock_irqsave(&vc4->job_lock, irqflags);
158 159 160
	exec[0] = vc4_first_bin_job(vc4);
	exec[1] = vc4_first_render_job(vc4);
	if (!exec[0] && !exec[1]) {
161 162 163 164
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		return;
	}

165 166 167 168 169 170 171 172 173 174 175 176 177 178
	/* Get the bos from both binner and renderer into hang state. */
	state->bo_count = 0;
	for (i = 0; i < 2; i++) {
		if (!exec[i])
			continue;

		unref_list_count = 0;
		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
			unref_list_count++;
		state->bo_count += exec[i]->bo_count + unref_list_count;
	}

	kernel_state->bo = kcalloc(state->bo_count,
				   sizeof(*kernel_state->bo), GFP_ATOMIC);
179 180 181 182 183 184

	if (!kernel_state->bo) {
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		return;
	}

185
	k = 0;
186 187 188
	for (i = 0; i < 2; i++) {
		if (!exec[i])
			continue;
189

190
		for (j = 0; j < exec[i]->bo_count; j++) {
191 192 193 194 195 196 197 198
			bo = to_vc4_bo(&exec[i]->bo[j]->base);

			/* Retain BOs just in case they were marked purgeable.
			 * This prevents the BO from being purged before
			 * someone had a chance to dump the hang state.
			 */
			WARN_ON(!refcount_read(&bo->usecnt));
			refcount_inc(&bo->usecnt);
199
			drm_gem_object_get(&exec[i]->bo[j]->base);
200
			kernel_state->bo[k++] = &exec[i]->bo[j]->base;
201 202 203
		}

		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
204 205 206
			/* No need to retain BOs coming from the ->unref_list
			 * because they are naturally unpurgeable.
			 */
207
			drm_gem_object_get(&bo->base.base);
208
			kernel_state->bo[k++] = &bo->base.base;
209
		}
210 211
	}

212 213
	WARN_ON_ONCE(k != state->bo_count);

214 215 216 217
	if (exec[0])
		state->start_bin = exec[0]->ct0ca;
	if (exec[1])
		state->start_render = exec[1]->ct1ca;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	spin_unlock_irqrestore(&vc4->job_lock, irqflags);

	state->ct0ca = V3D_READ(V3D_CTNCA(0));
	state->ct0ea = V3D_READ(V3D_CTNEA(0));

	state->ct1ca = V3D_READ(V3D_CTNCA(1));
	state->ct1ea = V3D_READ(V3D_CTNEA(1));

	state->ct0cs = V3D_READ(V3D_CTNCS(0));
	state->ct1cs = V3D_READ(V3D_CTNCS(1));

	state->ct0ra0 = V3D_READ(V3D_CT00RA0);
	state->ct1ra0 = V3D_READ(V3D_CT01RA0);

	state->bpca = V3D_READ(V3D_BPCA);
	state->bpcs = V3D_READ(V3D_BPCS);
	state->bpoa = V3D_READ(V3D_BPOA);
	state->bpos = V3D_READ(V3D_BPOS);

	state->vpmbase = V3D_READ(V3D_VPMBASE);

	state->dbge = V3D_READ(V3D_DBGE);
	state->fdbgo = V3D_READ(V3D_FDBGO);
	state->fdbgb = V3D_READ(V3D_FDBGB);
	state->fdbgr = V3D_READ(V3D_FDBGR);
	state->fdbgs = V3D_READ(V3D_FDBGS);
	state->errstat = V3D_READ(V3D_ERRSTAT);

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	/* We need to turn purgeable BOs into unpurgeable ones so that
	 * userspace has a chance to dump the hang state before the kernel
	 * decides to purge those BOs.
	 * Note that BO consistency at dump time cannot be guaranteed. For
	 * example, if the owner of these BOs decides to re-use them or mark
	 * them purgeable again there's nothing we can do to prevent it.
	 */
	for (i = 0; i < kernel_state->user_state.bo_count; i++) {
		struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);

		if (bo->madv == __VC4_MADV_NOTSUPP)
			continue;

		mutex_lock(&bo->madv_lock);
		if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
			bo->madv = VC4_MADV_WILLNEED;
		refcount_dec(&bo->usecnt);
		mutex_unlock(&bo->madv_lock);
	}

267 268 269 270 271 272 273 274 275 276
	spin_lock_irqsave(&vc4->job_lock, irqflags);
	if (vc4->hang_state) {
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		vc4_free_hang_state(dev, kernel_state);
	} else {
		vc4->hang_state = kernel_state;
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
	}
}

277 278 279 280 281 282
static void
vc4_reset(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	DRM_INFO("Resetting GPU.\n");
283 284 285 286 287 288 289 290 291 292

	mutex_lock(&vc4->power_lock);
	if (vc4->power_refcount) {
		/* Power the device off and back on the by dropping the
		 * reference on runtime PM.
		 */
		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
	}
	mutex_unlock(&vc4->power_lock);
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

	vc4_irq_reset(dev);

	/* Rearm the hangcheck -- another job might have been waiting
	 * for our hung one to get kicked off, and vc4_irq_reset()
	 * would have started it.
	 */
	vc4_queue_hangcheck(dev);
}

static void
vc4_reset_work(struct work_struct *work)
{
	struct vc4_dev *vc4 =
		container_of(work, struct vc4_dev, hangcheck.reset_work);

309 310
	vc4_save_hang_state(vc4->dev);

311 312 313 314
	vc4_reset(vc4->dev);
}

static void
315
vc4_hangcheck_elapsed(struct timer_list *t)
316
{
317 318
	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
	struct drm_device *dev = vc4->dev;
319
	uint32_t ct0ca, ct1ca;
320
	unsigned long irqflags;
321
	struct vc4_exec_info *bin_exec, *render_exec;
322 323

	spin_lock_irqsave(&vc4->job_lock, irqflags);
324 325 326

	bin_exec = vc4_first_bin_job(vc4);
	render_exec = vc4_first_render_job(vc4);
327 328

	/* If idle, we can stop watching for hangs. */
329
	if (!bin_exec && !render_exec) {
330
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
331
		return;
332
	}
333 334 335 336 337 338 339

	ct0ca = V3D_READ(V3D_CTNCA(0));
	ct1ca = V3D_READ(V3D_CTNCA(1));

	/* If we've made any progress in execution, rearm the timer
	 * and wait.
	 */
340 341 342 343 344 345
	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
		if (bin_exec)
			bin_exec->last_ct0ca = ct0ca;
		if (render_exec)
			render_exec->last_ct1ca = ct1ca;
346
		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
347 348 349 350
		vc4_queue_hangcheck(dev);
		return;
	}

351 352
	spin_unlock_irqrestore(&vc4->job_lock, irqflags);

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	/* We've gone too long with no progress, reset.  This has to
	 * be done from a work struct, since resetting can sleep and
	 * this timer hook isn't allowed to.
	 */
	schedule_work(&vc4->hangcheck.reset_work);
}

static void
submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	/* Set the current and end address of the control list.
	 * Writing the end register is what starts the job.
	 */
	V3D_WRITE(V3D_CTNCA(thread), start);
	V3D_WRITE(V3D_CTNEA(thread), end);
}

int
vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
		   bool interruptible)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	int ret = 0;
	unsigned long timeout_expire;
	DEFINE_WAIT(wait);

	if (vc4->finished_seqno >= seqno)
		return 0;

	if (timeout_ns == 0)
		return -ETIME;

	timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);

	trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
	for (;;) {
		prepare_to_wait(&vc4->job_wait_queue, &wait,
				interruptible ? TASK_INTERRUPTIBLE :
				TASK_UNINTERRUPTIBLE);

		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		if (vc4->finished_seqno >= seqno)
			break;

		if (timeout_ns != ~0ull) {
			if (time_after_eq(jiffies, timeout_expire)) {
				ret = -ETIME;
				break;
			}
			schedule_timeout(timeout_expire - jiffies);
		} else {
			schedule();
		}
	}

	finish_wait(&vc4->job_wait_queue, &wait);
	trace_vc4_wait_for_seqno_end(dev, seqno);

417
	return ret;
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
}

static void
vc4_flush_caches(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	/* Flush the GPU L2 caches.  These caches sit on top of system
	 * L3 (the 128kb or so shared with the CPU), and are
	 * non-allocating in the L3.
	 */
	V3D_WRITE(V3D_L2CACTL,
		  V3D_L2CACTL_L2CCLR);

	V3D_WRITE(V3D_SLCACTL,
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}

439 440 441 442 443 444 445 446 447 448 449 450 451
static void
vc4_flush_texture_caches(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	V3D_WRITE(V3D_L2CACTL,
		  V3D_L2CACTL_L2CCLR);

	V3D_WRITE(V3D_SLCACTL,
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
}

452 453 454 455 456 457
/* Sets the registers for the next job to be actually be executed in
 * the hardware.
 *
 * The job_lock should be held during this.
 */
void
458
vc4_submit_next_bin_job(struct drm_device *dev)
459 460
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
461
	struct vc4_exec_info *exec;
462

463 464
again:
	exec = vc4_first_bin_job(vc4);
465 466 467 468 469
	if (!exec)
		return;

	vc4_flush_caches(dev);

470 471 472 473
	/* Either put the job in the binner if it uses the binner, or
	 * immediately move it to the to-be-rendered queue.
	 */
	if (exec->ct0ca != exec->ct0ea) {
474
		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	} else {
		vc4_move_job_to_render(dev, exec);
		goto again;
	}
}

void
vc4_submit_next_render_job(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	struct vc4_exec_info *exec = vc4_first_render_job(vc4);

	if (!exec)
		return;

490 491 492 493 494 495 496 497
	/* A previous RCL may have written to one of our textures, and
	 * our full cache flush at bin time may have occurred before
	 * that RCL completed.  Flush the texture cache now, but not
	 * the instructions or uniforms (since we don't write those
	 * from an RCL).
	 */
	vc4_flush_texture_caches(dev);

498 499 500
	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}

501 502 503 504 505 506 507 508 509 510 511
void
vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	bool was_empty = list_empty(&vc4->render_job_list);

	list_move_tail(&exec->head, &vc4->render_job_list);
	if (was_empty)
		vc4_submit_next_render_job(dev);
}

512 513 514 515 516 517 518 519 520
static void
vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
{
	struct vc4_bo *bo;
	unsigned i;

	for (i = 0; i < exec->bo_count; i++) {
		bo = to_vc4_bo(&exec->bo[i]->base);
		bo->seqno = seqno;
521 522

		reservation_object_add_shared_fence(bo->resv, exec->fence);
523 524 525 526 527
	}

	list_for_each_entry(bo, &exec->unref_list, unref_head) {
		bo->seqno = seqno;
	}
528 529 530 531

	for (i = 0; i < exec->rcl_write_bo_count; i++) {
		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
		bo->write_seqno = seqno;
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

		reservation_object_add_excl_fence(bo->resv, exec->fence);
	}
}

static void
vc4_unlock_bo_reservations(struct drm_device *dev,
			   struct vc4_exec_info *exec,
			   struct ww_acquire_ctx *acquire_ctx)
{
	int i;

	for (i = 0; i < exec->bo_count; i++) {
		struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);

		ww_mutex_unlock(&bo->resv->lock);
548
	}
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628

	ww_acquire_fini(acquire_ctx);
}

/* Takes the reservation lock on all the BOs being referenced, so that
 * at queue submit time we can update the reservations.
 *
 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
 * (all of which are on exec->unref_list).  They're entirely private
 * to vc4, so we don't attach dma-buf fences to them.
 */
static int
vc4_lock_bo_reservations(struct drm_device *dev,
			 struct vc4_exec_info *exec,
			 struct ww_acquire_ctx *acquire_ctx)
{
	int contended_lock = -1;
	int i, ret;
	struct vc4_bo *bo;

	ww_acquire_init(acquire_ctx, &reservation_ww_class);

retry:
	if (contended_lock != -1) {
		bo = to_vc4_bo(&exec->bo[contended_lock]->base);
		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
						       acquire_ctx);
		if (ret) {
			ww_acquire_done(acquire_ctx);
			return ret;
		}
	}

	for (i = 0; i < exec->bo_count; i++) {
		if (i == contended_lock)
			continue;

		bo = to_vc4_bo(&exec->bo[i]->base);

		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
		if (ret) {
			int j;

			for (j = 0; j < i; j++) {
				bo = to_vc4_bo(&exec->bo[j]->base);
				ww_mutex_unlock(&bo->resv->lock);
			}

			if (contended_lock != -1 && contended_lock >= i) {
				bo = to_vc4_bo(&exec->bo[contended_lock]->base);

				ww_mutex_unlock(&bo->resv->lock);
			}

			if (ret == -EDEADLK) {
				contended_lock = i;
				goto retry;
			}

			ww_acquire_done(acquire_ctx);
			return ret;
		}
	}

	ww_acquire_done(acquire_ctx);

	/* Reserve space for our shared (read-only) fence references,
	 * before we commit the CL to the hardware.
	 */
	for (i = 0; i < exec->bo_count; i++) {
		bo = to_vc4_bo(&exec->bo[i]->base);

		ret = reservation_object_reserve_shared(bo->resv);
		if (ret) {
			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
			return ret;
		}
	}

	return 0;
629 630 631 632 633 634 635 636 637 638 639
}

/* Queues a struct vc4_exec_info for execution.  If no job is
 * currently executing, then submits it.
 *
 * Unlike most GPUs, our hardware only handles one command list at a
 * time.  To queue multiple jobs at once, we'd need to edit the
 * previous command list to have a jump to the new one at the end, and
 * then bump the end address.  That's a change for a later date,
 * though.
 */
640 641 642
static int
vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
		 struct ww_acquire_ctx *acquire_ctx)
643 644 645 646
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	uint64_t seqno;
	unsigned long irqflags;
647 648 649 650 651 652
	struct vc4_fence *fence;

	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
	if (!fence)
		return -ENOMEM;
	fence->dev = dev;
653 654 655 656 657

	spin_lock_irqsave(&vc4->job_lock, irqflags);

	seqno = ++vc4->emit_seqno;
	exec->seqno = seqno;
658 659 660 661 662 663

	dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
		       vc4->dma_fence_context, exec->seqno);
	fence->seqno = exec->seqno;
	exec->fence = &fence->base;

664 665
	vc4_update_bo_seqnos(exec, seqno);

666 667
	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);

668
	list_add_tail(&exec->head, &vc4->bin_job_list);
669 670

	/* If no job was executing, kick ours off.  Otherwise, it'll
671
	 * get started when the previous job's flush done interrupt
672 673
	 * occurs.
	 */
674 675
	if (vc4_first_bin_job(vc4) == exec) {
		vc4_submit_next_bin_job(dev);
676 677 678 679
		vc4_queue_hangcheck(dev);
	}

	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
680 681

	return 0;
682 683 684
}

/**
685 686 687 688 689 690 691 692 693
 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
 * referenced by the job.
 * @dev: DRM device
 * @file_priv: DRM file for this fd
 * @exec: V3D job being set up
 *
 * The command validator needs to reference BOs by their index within
 * the submitted job's BO list.  This does the validation of the job's
 * BO list and reference counting for the lifetime of the job.
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
 */
static int
vc4_cl_lookup_bos(struct drm_device *dev,
		  struct drm_file *file_priv,
		  struct vc4_exec_info *exec)
{
	struct drm_vc4_submit_cl *args = exec->args;
	uint32_t *handles;
	int ret = 0;
	int i;

	exec->bo_count = args->bo_handle_count;

	if (!exec->bo_count) {
		/* See comment on bo_index for why we have to check
		 * this.
		 */
711
		DRM_DEBUG("Rendering requires BOs to validate\n");
712 713 714
		return -EINVAL;
	}

715 716 717
	exec->bo = kvmalloc_array(exec->bo_count,
				    sizeof(struct drm_gem_cma_object *),
				    GFP_KERNEL | __GFP_ZERO);
718 719 720 721 722
	if (!exec->bo) {
		DRM_ERROR("Failed to allocate validated BO pointers\n");
		return -ENOMEM;
	}

723
	handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
724
	if (!handles) {
725
		ret = -ENOMEM;
726 727 728 729
		DRM_ERROR("Failed to allocate incoming GEM handles\n");
		goto fail;
	}

730
	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
731 732
			   exec->bo_count * sizeof(uint32_t))) {
		ret = -EFAULT;
733 734 735 736 737 738 739 740 741
		DRM_ERROR("Failed to copy in GEM handles\n");
		goto fail;
	}

	spin_lock(&file_priv->table_lock);
	for (i = 0; i < exec->bo_count; i++) {
		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
						     handles[i]);
		if (!bo) {
742
			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
743 744
				  i, handles[i]);
			ret = -EINVAL;
745
			break;
746
		}
747

748
		drm_gem_object_get(bo);
749 750 751 752
		exec->bo[i] = (struct drm_gem_cma_object *)bo;
	}
	spin_unlock(&file_priv->table_lock);

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
	if (ret)
		goto fail_put_bo;

	for (i = 0; i < exec->bo_count; i++) {
		ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
		if (ret)
			goto fail_dec_usecnt;
	}

	kvfree(handles);
	return 0;

fail_dec_usecnt:
	/* Decrease usecnt on acquired objects.
	 * We cannot rely on  vc4_complete_exec() to release resources here,
	 * because vc4_complete_exec() has no information about which BO has
	 * had its ->usecnt incremented.
	 * To make things easier we just free everything explicitly and set
	 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
	 * step.
	 */
	for (i-- ; i >= 0; i--)
		vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));

fail_put_bo:
	/* Release any reference to acquired objects. */
	for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
		drm_gem_object_put_unlocked(&exec->bo[i]->base);

782
fail:
783
	kvfree(handles);
784 785
	kvfree(exec->bo);
	exec->bo = NULL;
786
	return ret;
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
}

static int
vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
	struct drm_vc4_submit_cl *args = exec->args;
	void *temp = NULL;
	void *bin;
	int ret = 0;
	uint32_t bin_offset = 0;
	uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
					     16);
	uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
	uint32_t exec_size = uniforms_offset + args->uniforms_size;
	uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
					  args->shader_rec_count);
	struct vc4_bo *bo;

805 806
	if (shader_rec_offset < args->bin_cl_size ||
	    uniforms_offset < shader_rec_offset ||
807 808 809 810
	    exec_size < uniforms_offset ||
	    args->shader_rec_count >= (UINT_MAX /
					  sizeof(struct vc4_shader_state)) ||
	    temp_size < exec_size) {
811
		DRM_DEBUG("overflow in exec arguments\n");
812
		ret = -EINVAL;
813 814 815 816 817 818 819 820 821 822
		goto fail;
	}

	/* Allocate space where we'll store the copied in user command lists
	 * and shader records.
	 *
	 * We don't just copy directly into the BOs because we need to
	 * read the contents back for validation, and I think the
	 * bo->vaddr is uncached access.
	 */
823
	temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
824 825 826 827 828 829 830 831 832 833 834 835
	if (!temp) {
		DRM_ERROR("Failed to allocate storage for copying "
			  "in bin/render CLs.\n");
		ret = -ENOMEM;
		goto fail;
	}
	bin = temp + bin_offset;
	exec->shader_rec_u = temp + shader_rec_offset;
	exec->uniforms_u = temp + uniforms_offset;
	exec->shader_state = temp + exec_size;
	exec->shader_state_size = args->shader_rec_count;

836
	if (copy_from_user(bin,
837
			   u64_to_user_ptr(args->bin_cl),
838 839
			   args->bin_cl_size)) {
		ret = -EFAULT;
840 841 842
		goto fail;
	}

843
	if (copy_from_user(exec->shader_rec_u,
844
			   u64_to_user_ptr(args->shader_rec),
845 846
			   args->shader_rec_size)) {
		ret = -EFAULT;
847 848 849
		goto fail;
	}

850
	if (copy_from_user(exec->uniforms_u,
851
			   u64_to_user_ptr(args->uniforms),
852 853
			   args->uniforms_size)) {
		ret = -EFAULT;
854 855 856
		goto fail;
	}

857
	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
858
	if (IS_ERR(bo)) {
859
		DRM_ERROR("Couldn't allocate BO for binning\n");
860
		ret = PTR_ERR(bo);
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
		goto fail;
	}
	exec->exec_bo = &bo->base;

	list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
		      &exec->unref_list);

	exec->ct0ca = exec->exec_bo->paddr + bin_offset;

	exec->bin_u = bin;

	exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
	exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
	exec->shader_rec_size = args->shader_rec_size;

	exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
	exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
	exec->uniforms_size = args->uniforms_size;

	ret = vc4_validate_bin_cl(dev,
				  exec->exec_bo->vaddr + bin_offset,
				  bin,
				  exec);
	if (ret)
		goto fail;

	ret = vc4_validate_shader_recs(dev, exec);
888 889 890 891 892 893 894 895
	if (ret)
		goto fail;

	/* Block waiting on any previous rendering into the CS's VBO,
	 * IB, or textures, so that pixels are actually written by the
	 * time we try to read them.
	 */
	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
896 897

fail:
898
	kvfree(temp);
899 900 901 902 903 904
	return ret;
}

static void
vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
{
Eric Anholt's avatar
Eric Anholt committed
905
	struct vc4_dev *vc4 = to_vc4_dev(dev);
906
	unsigned long irqflags;
907 908
	unsigned i;

909 910 911
	/* If we got force-completed because of GPU reset rather than
	 * through our IRQ handler, signal the fence now.
	 */
912
	if (exec->fence) {
913
		dma_fence_signal(exec->fence);
914 915
		dma_fence_put(exec->fence);
	}
916

917
	if (exec->bo) {
918 919 920 921
		for (i = 0; i < exec->bo_count; i++) {
			struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);

			vc4_bo_dec_usecnt(bo);
922
			drm_gem_object_put_unlocked(&exec->bo[i]->base);
923
		}
924
		kvfree(exec->bo);
925 926 927 928 929 930
	}

	while (!list_empty(&exec->unref_list)) {
		struct vc4_bo *bo = list_first_entry(&exec->unref_list,
						     struct vc4_bo, unref_head);
		list_del(&bo->unref_head);
931
		drm_gem_object_put_unlocked(&bo->base.base);
932 933
	}

934 935 936 937 938
	/* Free up the allocation of any bin slots we used. */
	spin_lock_irqsave(&vc4->job_lock, irqflags);
	vc4->bin_alloc_used &= ~exec->bin_slots;
	spin_unlock_irqrestore(&vc4->job_lock, irqflags);

939
	mutex_lock(&vc4->power_lock);
940 941 942 943
	if (--vc4->power_refcount == 0) {
		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
	}
944
	mutex_unlock(&vc4->power_lock);
Eric Anholt's avatar
Eric Anholt committed
945

946 947 948 949 950 951 952
	kfree(exec);
}

void
vc4_job_handle_completed(struct vc4_dev *vc4)
{
	unsigned long irqflags;
953
	struct vc4_seqno_cb *cb, *cb_temp;
954 955 956 957 958 959 960 961 962 963 964 965

	spin_lock_irqsave(&vc4->job_lock, irqflags);
	while (!list_empty(&vc4->job_done_list)) {
		struct vc4_exec_info *exec =
			list_first_entry(&vc4->job_done_list,
					 struct vc4_exec_info, head);
		list_del(&exec->head);

		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
		vc4_complete_exec(vc4->dev, exec);
		spin_lock_irqsave(&vc4->job_lock, irqflags);
	}
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001

	list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
		if (cb->seqno <= vc4->finished_seqno) {
			list_del_init(&cb->work.entry);
			schedule_work(&cb->work);
		}
	}

	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}

static void vc4_seqno_cb_work(struct work_struct *work)
{
	struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);

	cb->func(cb);
}

int vc4_queue_seqno_cb(struct drm_device *dev,
		       struct vc4_seqno_cb *cb, uint64_t seqno,
		       void (*func)(struct vc4_seqno_cb *cb))
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	int ret = 0;
	unsigned long irqflags;

	cb->func = func;
	INIT_WORK(&cb->work, vc4_seqno_cb_work);

	spin_lock_irqsave(&vc4->job_lock, irqflags);
	if (seqno > vc4->finished_seqno) {
		cb->seqno = seqno;
		list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
	} else {
		schedule_work(&cb->work);
	}
1002
	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003 1004

	return ret;
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
}

/* Scheduled when any job has been completed, this walks the list of
 * jobs that had completed and unrefs their BOs and frees their exec
 * structs.
 */
static void
vc4_job_done_work(struct work_struct *work)
{
	struct vc4_dev *vc4 =
		container_of(work, struct vc4_dev, job_done_work);

	vc4_job_handle_completed(vc4);
}

static int
vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
				uint64_t seqno,
				uint64_t *timeout_ns)
{
	unsigned long start = jiffies;
	int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);

	if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
		uint64_t delta = jiffies_to_nsecs(jiffies - start);

		if (*timeout_ns >= delta)
			*timeout_ns -= delta;
	}

	return ret;
}

int
vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	struct drm_vc4_wait_seqno *args = data;

	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
					       &args->timeout_ns);
}

int
vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
		  struct drm_file *file_priv)
{
	int ret;
	struct drm_vc4_wait_bo *args = data;
	struct drm_gem_object *gem_obj;
	struct vc4_bo *bo;

1057 1058 1059
	if (args->pad != 0)
		return -EINVAL;

1060
	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1061
	if (!gem_obj) {
1062
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1063 1064 1065 1066 1067 1068 1069
		return -EINVAL;
	}
	bo = to_vc4_bo(gem_obj);

	ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
					      &args->timeout_ns);

1070
	drm_gem_object_put_unlocked(gem_obj);
1071 1072 1073 1074
	return ret;
}

/**
1075 1076 1077 1078
 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
 * @dev: DRM device
 * @data: ioctl argument
 * @file_priv: DRM file for this fd
1079
 *
1080 1081 1082 1083 1084
 * This is the main entrypoint for userspace to submit a 3D frame to
 * the GPU.  Userspace provides the binner command list (if
 * applicable), and the kernel sets up the render command list to draw
 * to the framebuffer described in the ioctl, using the command lists
 * that the 3D engine's binner will produce.
1085 1086 1087 1088 1089 1090 1091 1092
 */
int
vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);
	struct drm_vc4_submit_cl *args = data;
	struct vc4_exec_info *exec;
1093
	struct ww_acquire_ctx acquire_ctx;
1094
	int ret = 0;
1095

1096 1097 1098 1099
	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
			     VC4_SUBMIT_CL_FIXED_RCL_ORDER |
			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1100
		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1101 1102 1103 1104 1105 1106 1107 1108 1109
		return -EINVAL;
	}

	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
	if (!exec) {
		DRM_ERROR("malloc failure on exec struct\n");
		return -ENOMEM;
	}

1110
	mutex_lock(&vc4->power_lock);
1111
	if (vc4->power_refcount++ == 0) {
1112
		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
1113 1114 1115 1116 1117 1118
		if (ret < 0) {
			mutex_unlock(&vc4->power_lock);
			vc4->power_refcount--;
			kfree(exec);
			return ret;
		}
Eric Anholt's avatar
Eric Anholt committed
1119
	}
1120
	mutex_unlock(&vc4->power_lock);
Eric Anholt's avatar
Eric Anholt committed
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	exec->args = args;
	INIT_LIST_HEAD(&exec->unref_list);

	ret = vc4_cl_lookup_bos(dev, file_priv, exec);
	if (ret)
		goto fail;

	if (exec->args->bin_cl_size != 0) {
		ret = vc4_get_bcl(dev, exec);
		if (ret)
			goto fail;
	} else {
		exec->ct0ca = 0;
		exec->ct0ea = 0;
	}

	ret = vc4_get_rcl(dev, exec);
	if (ret)
		goto fail;

1142 1143 1144 1145
	ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
	if (ret)
		goto fail;

1146 1147 1148 1149 1150
	/* Clear this out of the struct we'll be putting in the queue,
	 * since it's part of our stack.
	 */
	exec->args = NULL;

1151 1152 1153
	ret = vc4_queue_submit(dev, exec, &acquire_ctx);
	if (ret)
		goto fail;
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	/* Return the seqno for our job. */
	args->seqno = vc4->emit_seqno;

	return 0;

fail:
	vc4_complete_exec(vc4->dev, exec);

	return ret;
}

void
vc4_gem_init(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

1171 1172
	vc4->dma_fence_context = dma_fence_context_alloc(1);

1173 1174
	INIT_LIST_HEAD(&vc4->bin_job_list);
	INIT_LIST_HEAD(&vc4->render_job_list);
1175
	INIT_LIST_HEAD(&vc4->job_done_list);
1176
	INIT_LIST_HEAD(&vc4->seqno_cb_list);
1177 1178 1179
	spin_lock_init(&vc4->job_lock);

	INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1180
	timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1181 1182

	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1183 1184

	mutex_init(&vc4->power_lock);
1185 1186 1187

	INIT_LIST_HEAD(&vc4->purgeable.list);
	mutex_init(&vc4->purgeable.lock);
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
}

void
vc4_gem_destroy(struct drm_device *dev)
{
	struct vc4_dev *vc4 = to_vc4_dev(dev);

	/* Waiting for exec to finish would need to be done before
	 * unregistering V3D.
	 */
	WARN_ON(vc4->emit_seqno != vc4->finished_seqno);

	/* V3D should already have disabled its interrupt and cleared
	 * the overflow allocation registers.  Now free the object.
	 */
1203 1204 1205
	if (vc4->bin_bo) {
		drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
		vc4->bin_bo = NULL;
1206 1207
	}

1208 1209
	if (vc4->hang_state)
		vc4_free_hang_state(dev, vc4->hang_state);
1210
}
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288

int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
	struct drm_vc4_gem_madvise *args = data;
	struct drm_gem_object *gem_obj;
	struct vc4_bo *bo;
	int ret;

	switch (args->madv) {
	case VC4_MADV_DONTNEED:
	case VC4_MADV_WILLNEED:
		break;
	default:
		return -EINVAL;
	}

	if (args->pad != 0)
		return -EINVAL;

	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
	if (!gem_obj) {
		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
		return -ENOENT;
	}

	bo = to_vc4_bo(gem_obj);

	/* Only BOs exposed to userspace can be purged. */
	if (bo->madv == __VC4_MADV_NOTSUPP) {
		DRM_DEBUG("madvise not supported on this BO\n");
		ret = -EINVAL;
		goto out_put_gem;
	}

	/* Not sure it's safe to purge imported BOs. Let's just assume it's
	 * not until proven otherwise.
	 */
	if (gem_obj->import_attach) {
		DRM_DEBUG("madvise not supported on imported BOs\n");
		ret = -EINVAL;
		goto out_put_gem;
	}

	mutex_lock(&bo->madv_lock);

	if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
	    !refcount_read(&bo->usecnt)) {
		/* If the BO is about to be marked as purgeable, is not used
		 * and is not already purgeable or purged, add it to the
		 * purgeable list.
		 */
		vc4_bo_add_to_purgeable_pool(bo);
	} else if (args->madv == VC4_MADV_WILLNEED &&
		   bo->madv == VC4_MADV_DONTNEED &&
		   !refcount_read(&bo->usecnt)) {
		/* The BO has not been purged yet, just remove it from
		 * the purgeable list.
		 */
		vc4_bo_remove_from_purgeable_pool(bo);
	}

	/* Save the purged state. */
	args->retained = bo->madv != __VC4_MADV_PURGED;

	/* Update internal madv state only if the bo was not purged. */
	if (bo->madv != __VC4_MADV_PURGED)
		bo->madv = args->madv;

	mutex_unlock(&bo->madv_lock);

	ret = 0;

out_put_gem:
	drm_gem_object_put_unlocked(gem_obj);

	return ret;
}