Commit 811c05df authored by Jason Ekstrand's avatar Jason Ekstrand

intel/nir: Take nir_shader*s in brw_nir_link_shaders

Since NIR_PASS no longer swaps out the NIR pointer when NIR_TEST_* is
enabled, we can just take a single pointer and not a pointer to pointer.
Reviewed-by: default avatarKenneth Graunke <kenneth@whitecape.org>
parent bb67a99a
......@@ -796,63 +796,61 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
nir_shader **producer, nir_shader **consumer)
nir_shader *producer, nir_shader *consumer)
{
nir_lower_io_arrays_to_elements(*producer, *consumer);
nir_validate_shader(*producer, "after nir_lower_io_arrays_to_elements");
nir_validate_shader(*consumer, "after nir_lower_io_arrays_to_elements");
nir_lower_io_arrays_to_elements(producer, consumer);
nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
const bool p_is_scalar =
compiler->scalar_stage[(*producer)->info.stage];
const bool c_is_scalar =
compiler->scalar_stage[(*consumer)->info.stage];
const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
if (p_is_scalar && c_is_scalar) {
NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
brw_nir_optimize(*producer, compiler, p_is_scalar, false);
brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
if (nir_link_opt_varyings(*producer, *consumer))
brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out);
NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in);
if (nir_remove_unused_varyings(*producer, *consumer)) {
NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
NIR_PASS_V(*consumer, nir_lower_global_vars_to_local);
if (nir_remove_unused_varyings(producer, consumer)) {
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
/* The backend might not be able to handle indirects on
* temporaries so we need to lower indirects on any of the
* varyings we have demoted here.
*/
NIR_PASS_V(*producer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, (*producer)->info.stage));
NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
NIR_PASS_V(producer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, producer->info.stage));
NIR_PASS_V(consumer, nir_lower_indirect_derefs,
brw_nir_no_indirect_mask(compiler, consumer->info.stage));
brw_nir_optimize(*producer, compiler, p_is_scalar, false);
brw_nir_optimize(*consumer, compiler, c_is_scalar, false);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
}
NIR_PASS_V(*producer, nir_lower_io_to_vector, nir_var_shader_out);
NIR_PASS_V(*producer, nir_opt_combine_stores, nir_var_shader_out);
NIR_PASS_V(*consumer, nir_lower_io_to_vector, nir_var_shader_in);
NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
if ((*producer)->info.stage != MESA_SHADER_TESS_CTRL) {
if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
/* Calling lower_io_to_vector creates output variable writes with
* write-masks. On non-TCS outputs, the back-end can't handle it and we
* need to call nir_lower_io_to_temporaries to get rid of them. This,
* in turn, creates temporary variables and extra copy_deref intrinsics
* that we need to clean up.
*/
NIR_PASS_V(*producer, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(*producer), true, false);
NIR_PASS_V(*producer, nir_lower_global_vars_to_local);
NIR_PASS_V(*producer, nir_split_var_copies);
NIR_PASS_V(*producer, nir_lower_var_copies);
NIR_PASS_V(producer, nir_lower_io_to_temporaries,
nir_shader_get_entrypoint(producer), true, false);
NIR_PASS_V(producer, nir_lower_global_vars_to_local);
NIR_PASS_V(producer, nir_split_var_copies);
NIR_PASS_V(producer, nir_lower_var_copies);
}
}
......
......@@ -98,7 +98,7 @@ void brw_preprocess_nir(const struct brw_compiler *compiler,
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
nir_shader **producer, nir_shader **consumer);
nir_shader *producer, nir_shader *consumer);
bool brw_nir_lower_cs_intrinsics(nir_shader *nir,
unsigned dispatch_width);
......
......@@ -656,7 +656,7 @@ anv_pipeline_link_vs(const struct brw_compiler *compiler,
struct anv_pipeline_stage *next_stage)
{
if (next_stage)
brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
brw_nir_link_shaders(compiler, vs_stage->nir, next_stage->nir);
}
static const unsigned *
......@@ -720,7 +720,7 @@ anv_pipeline_link_tcs(const struct brw_compiler *compiler,
{
assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
brw_nir_link_shaders(compiler, tcs_stage->nir, tes_stage->nir);
nir_lower_patch_vertices(tes_stage->nir,
tcs_stage->nir->info.tess.tcs_vertices_out,
......@@ -764,7 +764,7 @@ anv_pipeline_link_tes(const struct brw_compiler *compiler,
struct anv_pipeline_stage *next_stage)
{
if (next_stage)
brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
brw_nir_link_shaders(compiler, tes_stage->nir, next_stage->nir);
}
static const unsigned *
......@@ -791,7 +791,7 @@ anv_pipeline_link_gs(const struct brw_compiler *compiler,
struct anv_pipeline_stage *next_stage)
{
if (next_stage)
brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
brw_nir_link_shaders(compiler, gs_stage->nir, next_stage->nir);
}
static const unsigned *
......
......@@ -306,8 +306,8 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
continue;
brw_nir_link_shaders(compiler,
&shProg->_LinkedShaders[i]->Program->nir,
&shProg->_LinkedShaders[next]->Program->nir);
shProg->_LinkedShaders[i]->Program->nir,
shProg->_LinkedShaders[next]->Program->nir);
next = i;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment