r600_state_common.c 23.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
/*
 * Copyright 2010 Red Hat Inc.
 *           2010 Jerome Glisse
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * on the rights to use, copy, modify, merge, publish, distribute, sub
 * license, and/or sell copies of the Software, and to permit persons to whom
 * the Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie <airlied@redhat.com>
 *          Jerome Glisse <jglisse@redhat.com>
 */
27
#include "util/u_blitter.h"
Kai Wasserbäch's avatar
Kai Wasserbäch committed
28
29
30
#include "util/u_memory.h"
#include "util/u_format.h"
#include "pipebuffer/pb_buffer.h"
31
#include "pipe/p_shader_tokens.h"
32
#include "tgsi/tgsi_parse.h"
33
#include "r600_formats.h"
34
#include "r600_pipe.h"
35
#include "r600d.h"
36

37
static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
38
{
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
	static const int prim_conv[] = {
		V_008958_DI_PT_POINTLIST,
		V_008958_DI_PT_LINELIST,
		V_008958_DI_PT_LINELOOP,
		V_008958_DI_PT_LINESTRIP,
		V_008958_DI_PT_TRILIST,
		V_008958_DI_PT_TRISTRIP,
		V_008958_DI_PT_TRIFAN,
		V_008958_DI_PT_QUADLIST,
		V_008958_DI_PT_QUADSTRIP,
		V_008958_DI_PT_POLYGON,
		-1,
		-1,
		-1,
		-1
	};

	*prim = prim_conv[pprim];
	if (*prim == -1) {
58
		fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
59
		return false;
60
	}
61
	return true;
62
63
}

64
/* common state between evergreen and r600 */
65
66
67
68
69
70
71
72
73
74
75
76
77
78
void r600_bind_blend_state(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
	struct r600_pipe_state *rstate;

	if (state == NULL)
		return;
	rstate = &blend->rstate;
	rctx->states[rstate->id] = rstate;
	rctx->cb_target_mask = blend->cb_target_mask;
	r600_context_pipe_state_set(&rctx->ctx, rstate);
}

79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_dsa *dsa = state;
	struct r600_pipe_state *rstate;

	if (state == NULL)
		return;
	rstate = &dsa->rstate;
	rctx->states[rstate->id] = rstate;
	rctx->alpha_ref = dsa->alpha_ref;
	rctx->alpha_ref_dirty = true;
	r600_context_pipe_state_set(&rctx->ctx, rstate);
}

94
95
96
97
98
99
100
101
102
void r600_bind_rs_state(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;

	if (state == NULL)
		return;

	rctx->sprite_coord_enable = rs->sprite_coord_enable;
103
	rctx->two_side = rs->two_side;
104

105
106
107
108
	rctx->rasterizer = rs;

	rctx->states[rs->rstate.id] = &rs->rstate;
	r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
109

110
	if (rctx->chip_class >= EVERGREEN) {
111
112
113
114
		evergreen_polygon_offset_update(rctx);
	} else {
		r600_polygon_offset_update(rctx);
	}
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
}

void r600_delete_rs_state(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;

	if (rctx->rasterizer == rs) {
		rctx->rasterizer = NULL;
	}
	if (rctx->states[rs->rstate.id] == &rs->rstate) {
		rctx->states[rs->rstate.id] = NULL;
	}
	free(rs);
}

void r600_sampler_view_destroy(struct pipe_context *ctx,
			       struct pipe_sampler_view *state)
{
	struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;

	pipe_resource_reference(&state->texture, NULL);
	FREE(resource);
}

void r600_delete_state(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;

	if (rctx->states[rstate->id] == rstate) {
		rctx->states[rstate->id] = NULL;
	}
	for (int i = 0; i < rstate->nregs; i++) {
149
		pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
150
151
152
153
	}
	free(rstate);
}

154
155
156
157
158
159
void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_vertex_element *v = (struct r600_vertex_element*)state;

	rctx->vertex_elements = v;
160
	if (v) {
161
		u_vbuf_bind_vertex_elements(rctx->vbuf_mgr, state,
162
163
						v->vmgr_elements);

164
165
		rctx->states[v->rstate.id] = &v->rstate;
		r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
166
167
168
	}
}

169
170
171
void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
172
	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
173

174
175
176
	if (rctx->states[v->rstate.id] == &v->rstate) {
		rctx->states[v->rstate.id] = NULL;
	}
177
178
	if (rctx->vertex_elements == state)
		rctx->vertex_elements = NULL;
179

180
	pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
181
	u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
182
	FREE(state);
183
184
}

185
186
187
188
189
190

void r600_set_index_buffer(struct pipe_context *ctx,
			   const struct pipe_index_buffer *ib)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;

191
	u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
192
193
194
195
196
197
}

void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
			     const struct pipe_vertex_buffer *buffers)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
198
	int i;
199

200
	/* Zero states. */
201
	for (i = 0; i < count; i++) {
202
		if (!buffers[i].buffer) {
203
			if (rctx->chip_class >= EVERGREEN) {
204
205
206
207
				evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
			} else {
				r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
			}
208
209
		}
	}
210
	for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
211
		if (rctx->chip_class >= EVERGREEN) {
212
213
214
215
216
217
			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
		} else {
			r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
		}
	}

218
	u_vbuf_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
219
220
221
222
223
224
}

void *r600_create_vertex_elements(struct pipe_context *ctx,
				  unsigned count,
				  const struct pipe_vertex_element *elements)
{
225
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
226
227
228
229
230
231
232
	struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);

	assert(count < 32);
	if (!v)
		return NULL;

	v->count = count;
233
	v->vmgr_elements =
234
		u_vbuf_create_vertex_elements(rctx->vbuf_mgr, count,
235
						  elements, v->elements);
236

237
238
239
240
241
	if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
		FREE(v);
		return NULL;
	}

242
243
	return v;
}
244
245
246
247

void *r600_create_shader_state(struct pipe_context *ctx,
			       const struct pipe_shader_state *state)
{
248
	struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
249
250
	int r;

251
	shader->tokens = tgsi_dup_tokens(state->tokens);
252
	shader->so = state->stream_output;
253
254

	r =  r600_pipe_shader_create(ctx, shader);
255
256
257
258
259
260
261
262
263
264
265
266
	if (r) {
		return NULL;
	}
	return shader;
}

void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;

	/* TODO delete old shader */
	rctx->ps_shader = (struct r600_pipe_shader *)state;
267
268
269
	if (state) {
		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
	}
270
271
272
	if (rctx->ps_shader && rctx->vs_shader) {
		r600_adjust_gprs(rctx);
	}
273
274
275
276
277
278
279
280
}

void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;

	/* TODO delete old shader */
	rctx->vs_shader = (struct r600_pipe_shader *)state;
281
282
283
	if (state) {
		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
	}
284
285
286
	if (rctx->ps_shader && rctx->vs_shader) {
		r600_adjust_gprs(rctx);
	}
287
288
289
290
291
292
293
294
295
296
297
}

void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;

	if (rctx->ps_shader == shader) {
		rctx->ps_shader = NULL;
	}

298
	free(shader->tokens);
299
300
301
302
303
304
305
306
307
308
309
310
311
	r600_pipe_shader_destroy(ctx, shader);
	free(shader);
}

void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;

	if (rctx->vs_shader == shader) {
		rctx->vs_shader = NULL;
	}

312
	free(shader->tokens);
313
314
315
	r600_pipe_shader_destroy(ctx, shader);
	free(shader);
}
316

317
318
static void r600_update_alpha_ref(struct r600_pipe_context *rctx)
{
319
	unsigned alpha_ref;
320
321
	struct r600_pipe_state rstate;

322
	alpha_ref = rctx->alpha_ref;
323
324
325
	rstate.nregs = 0;
	if (rctx->export_16bpc)
		alpha_ref &= ~0x1FFF;
326
	r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL, 0);
327
328
329
330
331

	r600_context_pipe_state_set(&rctx->ctx, &rstate);
	rctx->alpha_ref_dirty = false;
}

332
333
334
335
void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
			      struct pipe_resource *buffer)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
336
	struct r600_resource *rbuffer = r600_resource(buffer);
337
	struct r600_pipe_resource_state *rstate;
338
	uint64_t va_offset;
339
340
341
342
343
344
345
346
347
348
	uint32_t offset;

	/* Note that the state tracker can unbind constant buffers by
	 * passing NULL here.
	 */
	if (buffer == NULL) {
		return;
	}

	r600_upload_const_buffer(rctx, &rbuffer, &offset);
349
350
351
	va_offset = r600_resource_va(ctx->screen, (void*)rbuffer);
	va_offset += offset;
	va_offset >>= 8;
352
353
354
355
356

	switch (shader) {
	case PIPE_SHADER_VERTEX:
		rctx->vs_const_buffer.nregs = 0;
		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
Vadim Girlin's avatar
Vadim Girlin committed
357
					R_028180_ALU_CONST_BUFFER_SIZE_VS_0 + index * 4,
358
					ALIGN_DIVUP(buffer->width0 >> 4, 16),
359
					0xFFFFFFFF, NULL, 0);
360
		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
Vadim Girlin's avatar
Vadim Girlin committed
361
					R_028980_ALU_CONST_CACHE_VS_0 + index * 4,
362
					va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
363
		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
364
365

		rstate = &rctx->vs_const_buffer_resource[index];
366
		if (!rstate->id) {
367
			if (rctx->chip_class >= EVERGREEN) {
368
				evergreen_pipe_init_buffer_resource(rctx, rstate);
369
			} else {
370
				r600_pipe_init_buffer_resource(rctx, rstate);
371
372
373
			}
		}

374
		if (rctx->chip_class >= EVERGREEN) {
375
			evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
376
377
			evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
		} else {
378
			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
379
380
			r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
		}
381
382
383
384
385
386
		break;
	case PIPE_SHADER_FRAGMENT:
		rctx->ps_const_buffer.nregs = 0;
		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
					R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
					ALIGN_DIVUP(buffer->width0 >> 4, 16),
387
					0xFFFFFFFF, NULL, 0);
388
389
		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
					R_028940_ALU_CONST_CACHE_PS_0,
390
					va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
391
		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
392
393

		rstate = &rctx->ps_const_buffer_resource[index];
394
		if (!rstate->id) {
395
			if (rctx->chip_class >= EVERGREEN) {
396
				evergreen_pipe_init_buffer_resource(rctx, rstate);
397
			} else {
398
				r600_pipe_init_buffer_resource(rctx, rstate);
399
400
			}
		}
401
		if (rctx->chip_class >= EVERGREEN) {
402
			evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
403
404
			evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
		} else {
405
			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
406
407
			r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
		}
408
409
410
411
412
413
		break;
	default:
		R600_ERR("unsupported %d\n", shader);
		return;
	}

414
	if (buffer != &rbuffer->b.b.b)
415
416
417
		pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
}

418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
struct pipe_stream_output_target *
r600_create_so_target(struct pipe_context *ctx,
		      struct pipe_resource *buffer,
		      unsigned buffer_offset,
		      unsigned buffer_size)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	struct r600_so_target *t;
	void *ptr;

	t = CALLOC_STRUCT(r600_so_target);
	if (!t) {
		return NULL;
	}

	t->b.reference.count = 1;
	t->b.context = ctx;
	pipe_resource_reference(&t->b.buffer, buffer);
	t->b.buffer_offset = buffer_offset;
	t->b.buffer_size = buffer_size;

	t->filled_size = (struct r600_resource*)
		pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
	ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
	memset(ptr, 0, t->filled_size->buf->size);
	rctx->ws->buffer_unmap(t->filled_size->buf);

	return &t->b;
}

void r600_so_target_destroy(struct pipe_context *ctx,
			    struct pipe_stream_output_target *target)
{
	struct r600_so_target *t = (struct r600_so_target*)target;
	pipe_resource_reference(&t->b.buffer, NULL);
	pipe_resource_reference((struct pipe_resource**)&t->filled_size, NULL);
	FREE(t);
}

void r600_set_so_targets(struct pipe_context *ctx,
			 unsigned num_targets,
			 struct pipe_stream_output_target **targets,
			 unsigned append_bitmask)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	unsigned i;

	/* Stop streamout. */
	if (rctx->ctx.num_so_targets) {
		r600_context_streamout_end(&rctx->ctx);
	}

	/* Set the new targets. */
	for (i = 0; i < num_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], targets[i]);
	}
	for (; i < rctx->ctx.num_so_targets; i++) {
		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], NULL);
	}

	rctx->ctx.num_so_targets = num_targets;
	rctx->ctx.streamout_start = num_targets != 0;
	rctx->ctx.streamout_append_bitmask = append_bitmask;
}

483
static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
484
{
485
	struct r600_pipe_resource_state *rstate;
486
487
	struct r600_resource *rbuffer;
	struct pipe_vertex_buffer *vertex_buffer;
488
	unsigned i, count, offset;
489
490
491

	if (rctx->vertex_elements->vbuffer_need_offset) {
		/* one resource per vertex elements */
492
		count = rctx->vertex_elements->count;
493
494
	} else {
		/* bind vertex buffer once */
495
		count = rctx->vbuf_mgr->nr_real_vertex_buffers;
496
497
	}

498
499
	for (i = 0 ; i < count; i++) {
		rstate = &rctx->fs_resource[i];
500
501
502
503
504

		if (rctx->vertex_elements->vbuffer_need_offset) {
			/* one resource per vertex elements */
			unsigned vbuffer_index;
			vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
505
506
			vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
			rbuffer = (struct r600_resource*)vertex_buffer->buffer;
507
508
509
			offset = rctx->vertex_elements->vbuffer_offset[i];
		} else {
			/* bind vertex buffer once */
510
511
			vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[i];
			rbuffer = (struct r600_resource*)vertex_buffer->buffer;
512
513
514
515
			offset = 0;
		}
		if (vertex_buffer == NULL || rbuffer == NULL)
			continue;
516
		offset += vertex_buffer->buffer_offset;
517

518
		if (!rstate->id) {
519
			if (rctx->chip_class >= EVERGREEN) {
520
				evergreen_pipe_init_buffer_resource(rctx, rstate);
521
			} else {
522
				r600_pipe_init_buffer_resource(rctx, rstate);
523
524
525
			}
		}

526
		if (rctx->chip_class >= EVERGREEN) {
527
			evergreen_pipe_mod_buffer_resource(&rctx->context, rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
528
			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
529
		} else {
530
			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
531
			r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
532
533
534
535
		}
	}
}

536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
static int r600_shader_rebuild(struct pipe_context * ctx, struct r600_pipe_shader * shader)
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
	int r;

	r600_pipe_shader_destroy(ctx, shader);
	r = r600_pipe_shader_create(ctx, shader);
	if (r) {
		return r;
	}
	r600_context_pipe_state_set(&rctx->ctx, &shader->rstate);

	return 0;
}

551
552
static void r600_update_derived_state(struct r600_pipe_context *rctx)
{
553
	struct pipe_context * ctx = (struct pipe_context*)rctx;
Vadim Girlin's avatar
Vadim Girlin committed
554
555
556
557
558
559
560
561
562
	struct r600_pipe_state rstate;
	unsigned user_clip_plane_enable;
	unsigned clip_dist_enable;

	if (rctx->vs_shader->shader.clip_dist_write || rctx->vs_shader->shader.vs_prohibit_ucps)
		user_clip_plane_enable = 0;
	else
		user_clip_plane_enable = rctx->rasterizer->clip_plane_enable & 0x3F;

Vadim Girlin's avatar
Vadim Girlin committed
563
	clip_dist_enable = rctx->rasterizer->clip_plane_enable & rctx->vs_shader->shader.clip_dist_write;
Vadim Girlin's avatar
Vadim Girlin committed
564
565
566
567
568
569
570
571
572
573
574
575
576
577
	rstate.nregs = 0;

	if (user_clip_plane_enable != rctx->user_clip_plane_enable) {
		r600_pipe_state_add_reg(&rstate, R_028810_PA_CL_CLIP_CNTL, user_clip_plane_enable , 0x3F, NULL, 0);
		rctx->user_clip_plane_enable = user_clip_plane_enable;
	}

	if (clip_dist_enable != rctx->clip_dist_enable) {
		r600_pipe_state_add_reg(&rstate, R_02881C_PA_CL_VS_OUT_CNTL, clip_dist_enable, 0xFF, NULL, 0);
		rctx->clip_dist_enable = clip_dist_enable;
	}

	if (rstate.nregs)
		r600_context_pipe_state_set(&rctx->ctx, &rstate);
578

579
	if (!rctx->blitter->running) {
580
581
582
583
584
585
586
587
		if (rctx->have_depth_fb || rctx->have_depth_texture)
			r600_flush_depth_textures(rctx);
	}

	if (rctx->chip_class < EVERGREEN) {
		r600_update_sampler_states(rctx);
	}

588
	if ((rctx->ps_shader->shader.two_side != rctx->two_side) ||
589
590
591
592
593
	    ((rctx->chip_class >= EVERGREEN) && rctx->ps_shader->shader.fs_write_all &&
	     (rctx->ps_shader->shader.nr_cbufs != rctx->nr_cbufs))) {
		r600_shader_rebuild(&rctx->context, rctx->ps_shader);
	}

594
595
	if (rctx->alpha_ref_dirty) {
		r600_update_alpha_ref(rctx);
596
	}
597
598
599
600
601
602
603
604
605
606
607
608

	if (rctx->ps_shader && rctx->sprite_coord_enable &&
		(rctx->ps_shader->sprite_coord_enable != rctx->sprite_coord_enable)) {

		if (rctx->chip_class >= EVERGREEN)
			evergreen_pipe_shader_ps(ctx, rctx->ps_shader);
		else
			r600_pipe_shader_ps(ctx, rctx->ps_shader);

		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
	}

609
610
}

611
void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
612
613
{
	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
614
615
616
	struct pipe_draw_info info = *dinfo;
	struct r600_draw rdraw = {};
	struct pipe_index_buffer ib = {};
617
	unsigned prim, mask, ls_mask = 0;
618

619
	if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
620
	    (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
621
	    !r600_conv_pipe_prim(info.mode, &prim)) {
622
623
624
		return;
	}

625
626
627
	if (!rctx->ps_shader || !rctx->vs_shader)
		return;

628
	r600_update_derived_state(rctx);
629

630
	u_vbuf_draw_begin(rctx->vbuf_mgr, &info);
631
632
	r600_vertex_buffer_update(rctx);

633
634
635
636
637
	rdraw.vgt_num_indices = info.count;
	rdraw.vgt_num_instances = info.instance_count;

	if (info.indexed) {
		/* Initialize the index buffer struct. */
638
639
640
		pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
		ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
		ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
641

642
643
		/* Translate or upload, if needed. */
		r600_translate_index_buffer(rctx, &ib, info.count);
644

645
646
647
		if (u_vbuf_resource(ib.buffer)->user_ptr) {
			r600_upload_index_buffer(rctx, &ib, info.count);
		}
648

649
650
651
652
653
654
655
		/* Initialize the r600_draw struct with index buffer info. */
		if (ib.index_size == 4) {
			rdraw.vgt_index_type = VGT_INDEX_32 |
				(R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0);
		} else {
			rdraw.vgt_index_type = VGT_INDEX_16 |
				(R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0);
656
		}
657
658
659
		rdraw.indices = (struct r600_resource*)ib.buffer;
		rdraw.indices_bo_offset = ib.offset;
		rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_DMA;
660
	} else {
661
662
		info.index_bias = info.start;
		rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
663
664
665
666
667
		if (info.count_from_stream_output) {
			rdraw.vgt_draw_initiator |= S_0287F0_USE_OPAQUE(1);

			r600_context_draw_opaque_count(&rctx->ctx, (struct r600_so_target*)info.count_from_stream_output);
		}
668
669
	}

670
	rctx->ctx.vs_so_stride_in_dw = rctx->vs_shader->so.stride;
671

672
	mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1;
673

674
675
676
	if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
		rctx->vgt.id = R600_PIPE_STATE_VGT;
		rctx->vgt.nregs = 0;
677
678
		r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL, 0);
		r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL, 0);
679
680
		r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, ~0, 0xFFFFFFFF, NULL, 0);
		r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, 0, 0xFFFFFFFF, NULL, 0);
681
682
683
		r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias, 0xFFFFFFFF, NULL, 0);
		r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index, 0xFFFFFFFF, NULL, 0);
		r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart, 0xFFFFFFFF, NULL, 0);
684
		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL, 0);
685
		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance, 0xFFFFFFFF, NULL, 0);
686
687
688
		r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE,
					0,
					S_028A0C_AUTO_RESET_CNTL(3), NULL, 0);
689
690
		r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL,
					0,
691
					S_028814_PROVOKING_VTX_LAST(1), NULL, 0);
692
693
694
695
696
	}

	rctx->vgt.nregs = 0;
	r600_pipe_state_mod_reg(&rctx->vgt, prim);
	r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
697
698
	r600_pipe_state_mod_reg(&rctx->vgt, ~0);
	r600_pipe_state_mod_reg(&rctx->vgt, 0);
699
700
701
	r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias);
	r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index);
	r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart);
702
	r600_pipe_state_mod_reg(&rctx->vgt, 0);
703
	r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance);
704
705
706
707
708
709
710

	if (prim == V_008958_DI_PT_LINELIST)
		ls_mask = 1;
	else if (prim == V_008958_DI_PT_LINESTRIP) 
		ls_mask = 2;
	r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask));

711
	if (info.mode == PIPE_PRIM_QUADS || info.mode == PIPE_PRIM_QUAD_STRIP || info.mode == PIPE_PRIM_POLYGON) {
712
		r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1));
713
714
715
	}

	r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt);
716

717
	if (rctx->chip_class >= EVERGREEN) {
718
719
720
721
722
		evergreen_context_draw(&rctx->ctx, &rdraw);
	} else {
		r600_context_draw(&rctx->ctx, &rdraw);
	}

723
724
725
	if (rctx->framebuffer.zsbuf)
	{
		struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
726
		((struct r600_resource_texture *)tex)->dirty_db = TRUE;
727
728
	}

729
	pipe_resource_reference(&ib.buffer, NULL);
730
	u_vbuf_draw_end(rctx->vbuf_mgr);
731
}
732
733
734
735

void _r600_pipe_state_add_reg(struct r600_context *ctx,
			      struct r600_pipe_state *state,
			      u32 offset, u32 value, u32 mask,
736
			      u32 range_id, u32 block_id,
737
			      struct r600_resource *bo,
738
			      enum radeon_bo_usage usage)
739
{
740
741
742
	struct r600_range *range;
	struct r600_block *block;

743
744
	if (bo) assert(usage);

745
746
	range = &ctx->range[range_id];
	block = range->blocks[block_id];
747
748
	state->regs[state->nregs].block = block;
	state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
749

750
751
752
	state->regs[state->nregs].value = value;
	state->regs[state->nregs].mask = mask;
	state->regs[state->nregs].bo = bo;
753
	state->regs[state->nregs].bo_usage = usage;
754
755
756
757
758
759
760

	state->nregs++;
	assert(state->nregs < R600_BLOCK_MAX_REG);
}

void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
				     u32 offset, u32 value, u32 mask,
761
				     struct r600_resource *bo,
762
				     enum radeon_bo_usage usage)
763
{
764
765
	if (bo) assert(usage);

766
767
768
769
770
	state->regs[state->nregs].id = offset;
	state->regs[state->nregs].block = NULL;
	state->regs[state->nregs].value = value;
	state->regs[state->nregs].mask = mask;
	state->regs[state->nregs].bo = bo;
771
	state->regs[state->nregs].bo_usage = usage;
772

773
774
775
	state->nregs++;
	assert(state->nregs < R600_BLOCK_MAX_REG);
}