X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_linking_helpers.c;h=74508440eb1a744e9c0f3582be93357469ecfbea;hp=3128ede2f15ba551aacc719f34f4bbd1afe5582f;hb=5746af444606b77e30309d5b85bc116d64df2cf4;hpb=7b01d5c354c60058a7ab4312d6e08f1cd71e5449 diff --git a/src/compiler/nir/nir_linking_helpers.c b/src/compiler/nir/nir_linking_helpers.c index 3128ede2f15..74508440eb1 100644 --- a/src/compiler/nir/nir_linking_helpers.c +++ b/src/compiler/nir/nir_linking_helpers.c @@ -50,7 +50,7 @@ get_variable_io_mask(nir_variable *var, gl_shader_stage stage) assert(var->data.location >= 0); const struct glsl_type *type = var->type; - if (nir_is_per_vertex_io(var, stage)) { + if (nir_is_per_vertex_io(var, stage) || var->data.per_view) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } @@ -59,6 +59,15 @@ get_variable_io_mask(nir_variable *var, gl_shader_stage stage) return ((1ull << slots) - 1) << location; } +static uint8_t +get_num_components(nir_variable *var) +{ + if (glsl_type_is_struct_or_ifc(glsl_without_array(var->type))) + return 4; + + return glsl_get_vector_elements(glsl_without_array(var->type)); +} + static void tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read) { @@ -80,12 +89,14 @@ tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read) continue; nir_variable *var = nir_deref_instr_get_variable(deref); - if (var->data.patch) { - patches_read[var->data.location_frac] |= - get_variable_io_mask(var, shader->info.stage); - } else { - read[var->data.location_frac] |= - get_variable_io_mask(var, shader->info.stage); + for (unsigned i = 0; i < get_num_components(var); i++) { + if (var->data.patch) { + patches_read[var->data.location_frac + i] |= + get_variable_io_mask(var, shader->info.stage); + } else { + read[var->data.location_frac + i] |= + get_variable_io_mask(var, shader->info.stage); + } } } } @@ -98,8 +109,7 @@ tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read) * * Example usage is: * - * progress = nir_remove_unused_io_vars(producer, - * &producer->outputs, + * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out, * read, patches_read) || * progress; * @@ -109,13 +119,18 @@ tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read) * variable is used! */ bool -nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list, +nir_remove_unused_io_vars(nir_shader *shader, + nir_variable_mode mode, uint64_t *used_by_other_stage, uint64_t *used_by_other_stage_patches) { bool progress = false; uint64_t *used; + assert(mode == nir_var_shader_in || mode == nir_var_shader_out); + struct exec_list *var_list = + mode == nir_var_shader_in ? &shader->inputs : &shader->outputs; + nir_foreach_variable_safe(var, var_list) { if (var->data.patch) used = used_by_other_stage_patches; @@ -161,22 +176,26 @@ nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer) uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 }; nir_foreach_variable(var, &producer->outputs) { - if (var->data.patch) { - patches_written[var->data.location_frac] |= - get_variable_io_mask(var, producer->info.stage); - } else { - written[var->data.location_frac] |= - get_variable_io_mask(var, producer->info.stage); + for (unsigned i = 0; i < get_num_components(var); i++) { + if (var->data.patch) { + patches_written[var->data.location_frac + i] |= + get_variable_io_mask(var, producer->info.stage); + } else { + written[var->data.location_frac + i] |= + get_variable_io_mask(var, producer->info.stage); + } } } nir_foreach_variable(var, &consumer->inputs) { - if (var->data.patch) { - patches_read[var->data.location_frac] |= - get_variable_io_mask(var, consumer->info.stage); - } else { - read[var->data.location_frac] |= - get_variable_io_mask(var, consumer->info.stage); + for (unsigned i = 0; i < get_num_components(var); i++) { + if (var->data.patch) { + patches_read[var->data.location_frac + i] |= + get_variable_io_mask(var, consumer->info.stage); + } else { + read[var->data.location_frac + i] |= + get_variable_io_mask(var, consumer->info.stage); + } } } @@ -188,10 +207,10 @@ nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer) tcs_add_output_reads(producer, read, patches_read); bool progress = false; - progress = nir_remove_unused_io_vars(producer, &producer->outputs, read, + progress = nir_remove_unused_io_vars(producer, nir_var_shader_out, read, patches_read); - progress = nir_remove_unused_io_vars(consumer, &consumer->inputs, written, + progress = nir_remove_unused_io_vars(consumer, nir_var_shader_in, written, patches_written) || progress; return progress; @@ -226,60 +245,91 @@ get_interp_loc(nir_variable *var) return INTERPOLATE_LOC_CENTER; } +static bool +is_packing_supported_for_type(const struct glsl_type *type) +{ + /* We ignore complex types such as arrays, matrices, structs and bitsizes + * other then 32bit. All other vector types should have been split into + * scalar variables by the lower_io_to_scalar pass. The only exception + * should be OpenGL xfb varyings. + * TODO: add support for more complex types? + */ + return glsl_type_is_scalar(type) && glsl_type_is_32bit(type); +} + +struct assigned_comps +{ + uint8_t comps; + uint8_t interp_type; + uint8_t interp_loc; + bool is_32bit; +}; + +/* Packing arrays and dual slot varyings is difficult so to avoid complex + * algorithms this function just assigns them their existing location for now. + * TODO: allow better packing of complex types. + */ static void -get_slot_component_masks_and_interp_types(struct exec_list *var_list, - uint8_t *comps, - uint8_t *interp_type, - uint8_t *interp_loc, - gl_shader_stage stage, - bool default_to_smooth_interp) +get_unmoveable_components_masks(struct exec_list *var_list, + struct assigned_comps *comps, + gl_shader_stage stage, + bool default_to_smooth_interp) { nir_foreach_variable_safe(var, var_list) { assert(var->data.location >= 0); - /* Only remap things that aren't built-ins. - * TODO: add TES patch support. - */ + /* Only remap things that aren't built-ins. */ if (var->data.location >= VARYING_SLOT_VAR0 && - var->data.location - VARYING_SLOT_VAR0 < 32) { + var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) { const struct glsl_type *type = var->type; - if (nir_is_per_vertex_io(var, stage)) { + if (nir_is_per_vertex_io(var, stage) || var->data.per_view) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } + /* If we can pack this varying then don't mark the components as + * used. + */ + if (is_packing_supported_for_type(type)) + continue; + unsigned location = var->data.location - VARYING_SLOT_VAR0; + unsigned elements = - glsl_get_vector_elements(glsl_without_array(type)); + glsl_type_is_vector_or_scalar(glsl_without_array(type)) ? + glsl_get_vector_elements(glsl_without_array(type)) : 4; bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type)); unsigned slots = glsl_count_attribute_slots(type, false); + unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1; unsigned comps_slot2 = 0; for (unsigned i = 0; i < slots; i++) { - interp_type[location + i] = - get_interp_type(var, type, default_to_smooth_interp); - interp_loc[location + i] = get_interp_loc(var); - if (dual_slot) { if (i & 1) { - comps[location + i] |= ((1 << comps_slot2) - 1); + comps[location + i].comps |= ((1 << comps_slot2) - 1); } else { unsigned num_comps = 4 - var->data.location_frac; - comps_slot2 = (elements * 2) - num_comps; + comps_slot2 = (elements * dmul) - num_comps; /* Assume ARB_enhanced_layouts packing rules for doubles */ assert(var->data.location_frac == 0 || var->data.location_frac == 2); assert(comps_slot2 <= 4); - comps[location + i] |= + comps[location + i].comps |= ((1 << num_comps) - 1) << var->data.location_frac; } } else { - comps[location + i] |= - ((1 << elements) - 1) << var->data.location_frac; + comps[location + i].comps |= + ((1 << (elements * dmul)) - 1) << var->data.location_frac; } + + comps[location + i].interp_type = + get_interp_type(var, type, default_to_smooth_interp); + comps[location + i].interp_loc = get_interp_loc(var); + comps[location + i].is_32bit = + glsl_type_is_32bit(glsl_without_array(type)); } } } @@ -330,7 +380,7 @@ remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage, var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) { const struct glsl_type *type = var->type; - if (nir_is_per_vertex_io(var, stage)) { + if (nir_is_per_vertex_io(var, stage) || var->data.per_view) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } @@ -391,127 +441,352 @@ remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage, *p_out_slots_read = out_slots_read_tmp[1]; } -/* If there are empty components in the slot compact the remaining components - * as close to component 0 as possible. This will make it easier to fill the - * empty components with components from a different slot in a following pass. - */ -static void -compact_components(nir_shader *producer, nir_shader *consumer, uint8_t *comps, - uint8_t *interp_type, uint8_t *interp_loc, - bool default_to_smooth_interp) +struct varying_component { + nir_variable *var; + uint8_t interp_type; + uint8_t interp_loc; + bool is_32bit; + bool is_patch; + bool is_intra_stage_only; + bool initialised; +}; + +static int +cmp_varying_component(const void *comp1_v, const void *comp2_v) { - struct exec_list *input_list = &consumer->inputs; - struct exec_list *output_list = &producer->outputs; - struct varying_loc remap[MAX_VARYINGS_INCL_PATCH][4] = {{{0}, {0}}}; + struct varying_component *comp1 = (struct varying_component *) comp1_v; + struct varying_component *comp2 = (struct varying_component *) comp2_v; - /* Create a cursor for each interpolation type */ - unsigned cursor[4] = {0}; + /* We want patches to be order at the end of the array */ + if (comp1->is_patch != comp2->is_patch) + return comp1->is_patch ? 1 : -1; - /* We only need to pass over one stage and we choose the consumer as it seems - * to cause a larger reduction in instruction counts (tested on i965). + /* We want to try to group together TCS outputs that are only read by other + * TCS invocations and not consumed by the follow stage. */ - nir_foreach_variable(var, input_list) { + if (comp1->is_intra_stage_only != comp2->is_intra_stage_only) + return comp1->is_intra_stage_only ? 1 : -1; - /* Only remap things that aren't builtins. - * TODO: add TES patch support. - */ + /* We can only pack varyings with matching interpolation types so group + * them together. + */ + if (comp1->interp_type != comp2->interp_type) + return comp1->interp_type - comp2->interp_type; + + /* Interpolation loc must match also. */ + if (comp1->interp_loc != comp2->interp_loc) + return comp1->interp_loc - comp2->interp_loc; + + /* If everything else matches just use the original location to sort */ + return comp1->var->data.location - comp2->var->data.location; +} + +static void +gather_varying_component_info(nir_shader *producer, nir_shader *consumer, + struct varying_component **varying_comp_info, + unsigned *varying_comp_info_size, + bool default_to_smooth_interp) +{ + unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = {{0}}; + unsigned num_of_comps_to_pack = 0; + + /* Count the number of varying that can be packed and create a mapping + * of those varyings to the array we will pass to qsort. + */ + nir_foreach_variable(var, &producer->outputs) { + + /* Only remap things that aren't builtins. */ if (var->data.location >= VARYING_SLOT_VAR0 && - var->data.location - VARYING_SLOT_VAR0 < 32) { + var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) { /* We can't repack xfb varyings. */ if (var->data.always_active_io) continue; const struct glsl_type *type = var->type; - if (nir_is_per_vertex_io(var, consumer->info.stage)) { + if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } - /* Skip types that require more complex packing handling. - * TODO: add support for these types. - */ - if (glsl_type_is_array(type) || - glsl_type_is_dual_slot(type) || - glsl_type_is_matrix(type) || - glsl_type_is_struct(type) || - glsl_type_is_64bit(type)) + if (!is_packing_supported_for_type(type)) continue; - /* We ignore complex types above and all other vector types should - * have been split into scalar variables by the lower_io_to_scalar - * pass. The only exception should by OpenGL xfb varyings. - */ - if (glsl_get_vector_elements(type) != 1) + unsigned loc = var->data.location - VARYING_SLOT_VAR0; + store_varying_info_idx[loc][var->data.location_frac] = + ++num_of_comps_to_pack; + } + } + + *varying_comp_info_size = num_of_comps_to_pack; + *varying_comp_info = rzalloc_array(NULL, struct varying_component, + num_of_comps_to_pack); + + nir_function_impl *impl = nir_shader_get_entrypoint(consumer); + + /* Walk over the shader and populate the varying component info array */ + nir_foreach_block(block, impl) { + nir_foreach_instr(instr, block) { + if (instr->type != nir_instr_type_intrinsic) continue; - unsigned location = var->data.location - VARYING_SLOT_VAR0; - uint8_t used_comps = comps[location]; + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); + if (intr->intrinsic != nir_intrinsic_load_deref && + intr->intrinsic != nir_intrinsic_interp_deref_at_centroid && + intr->intrinsic != nir_intrinsic_interp_deref_at_sample && + intr->intrinsic != nir_intrinsic_interp_deref_at_offset && + intr->intrinsic != nir_intrinsic_interp_deref_at_vertex) + continue; - /* If there are no empty components there is nothing more for us to do. - */ - if (used_comps == 0xf) + nir_deref_instr *deref = nir_src_as_deref(intr->src[0]); + if (deref->mode != nir_var_shader_in) continue; - bool found_new_offset = false; - uint8_t interp = get_interp_type(var, type, default_to_smooth_interp); - for (; cursor[interp] < 32; cursor[interp]++) { - uint8_t cursor_used_comps = comps[cursor[interp]]; + /* We only remap things that aren't builtins. */ + nir_variable *in_var = nir_deref_instr_get_variable(deref); + if (in_var->data.location < VARYING_SLOT_VAR0) + continue; - /* We couldn't find anywhere to pack the varying continue on. */ - if (cursor[interp] == location && - (var->data.location_frac == 0 || - cursor_used_comps & ((1 << (var->data.location_frac)) - 1))) - break; + unsigned location = in_var->data.location - VARYING_SLOT_VAR0; + if (location >= MAX_VARYINGS_INCL_PATCH) + continue; - /* We can only pack varyings with matching interpolation types */ - if (interp_type[cursor[interp]] != interp) + unsigned var_info_idx = + store_varying_info_idx[location][in_var->data.location_frac]; + if (!var_info_idx) + continue; + + struct varying_component *vc_info = + &(*varying_comp_info)[var_info_idx-1]; + + if (!vc_info->initialised) { + const struct glsl_type *type = in_var->type; + if (nir_is_per_vertex_io(in_var, consumer->info.stage) || + in_var->data.per_view) { + assert(glsl_type_is_array(type)); + type = glsl_get_array_element(type); + } + + vc_info->var = in_var; + vc_info->interp_type = + get_interp_type(in_var, type, default_to_smooth_interp); + vc_info->interp_loc = get_interp_loc(in_var); + vc_info->is_32bit = glsl_type_is_32bit(type); + vc_info->is_patch = in_var->data.patch; + vc_info->is_intra_stage_only = false; + vc_info->initialised = true; + } + } + } + + /* Walk over the shader and populate the varying component info array + * for varyings which are read by other TCS instances but are not consumed + * by the TES. + */ + if (producer->info.stage == MESA_SHADER_TESS_CTRL) { + impl = nir_shader_get_entrypoint(producer); + + nir_foreach_block(block, impl) { + nir_foreach_instr(instr, block) { + if (instr->type != nir_instr_type_intrinsic) continue; - /* Interpolation loc must match also. - * TODO: i965 can handle these if they don't match, but the - * radeonsi nir backend handles everything as vec4s and so expects - * this to be the same for all components. We could make this - * check driver specfific or drop it if NIR ever become the only - * radeonsi backend. - */ - if (interp_loc[cursor[interp]] != get_interp_loc(var)) + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); + if (intr->intrinsic != nir_intrinsic_load_deref) continue; - /* If the slot is empty just skip it for now, compact_var_list() - * can be called after this function to remove empty slots for us. - * TODO: finish implementing compact_var_list() requires array and - * matrix splitting. - */ - if (!cursor_used_comps) + nir_deref_instr *deref = nir_src_as_deref(intr->src[0]); + if (deref->mode != nir_var_shader_out) + continue; + + /* We only remap things that aren't builtins. */ + nir_variable *out_var = nir_deref_instr_get_variable(deref); + if (out_var->data.location < VARYING_SLOT_VAR0) continue; - uint8_t unused_comps = ~cursor_used_comps; + unsigned location = out_var->data.location - VARYING_SLOT_VAR0; + if (location >= MAX_VARYINGS_INCL_PATCH) + continue; - for (unsigned i = 0; i < 4; i++) { - uint8_t new_var_comps = 1 << i; - if (unused_comps & new_var_comps) { - remap[location][var->data.location_frac].component = i; - remap[location][var->data.location_frac].location = - cursor[interp] + VARYING_SLOT_VAR0; + unsigned var_info_idx = + store_varying_info_idx[location][out_var->data.location_frac]; + if (!var_info_idx) { + /* Something went wrong, the shader interfaces didn't match, so + * abandon packing. This can happen for example when the + * inputs are scalars but the outputs are struct members. + */ + *varying_comp_info_size = 0; + break; + } - found_new_offset = true; + struct varying_component *vc_info = + &(*varying_comp_info)[var_info_idx-1]; - /* Turn off the mask for the component we are remapping */ - if (comps[location] & 1 << var->data.location_frac) { - comps[location] ^= 1 << var->data.location_frac; - comps[cursor[interp]] |= new_var_comps; - } - break; + if (!vc_info->initialised) { + const struct glsl_type *type = out_var->type; + if (nir_is_per_vertex_io(out_var, producer->info.stage)) { + assert(glsl_type_is_array(type)); + type = glsl_get_array_element(type); } + + vc_info->var = out_var; + vc_info->interp_type = + get_interp_type(out_var, type, default_to_smooth_interp); + vc_info->interp_loc = get_interp_loc(out_var); + vc_info->is_32bit = glsl_type_is_32bit(type); + vc_info->is_patch = out_var->data.patch; + vc_info->is_intra_stage_only = true; + vc_info->initialised = true; } + } + } + } - if (found_new_offset) - break; + for (unsigned i = 0; i < *varying_comp_info_size; i++ ) { + struct varying_component *vc_info = &(*varying_comp_info)[i]; + if (!vc_info->initialised) { + /* Something went wrong, the shader interfaces didn't match, so + * abandon packing. This can happen for example when the outputs are + * scalars but the inputs are struct members. + */ + *varying_comp_info_size = 0; + break; + } + } +} + +static void +assign_remap_locations(struct varying_loc (*remap)[4], + struct assigned_comps *assigned_comps, + struct varying_component *info, + unsigned *cursor, unsigned *comp, + unsigned max_location) +{ + unsigned tmp_cursor = *cursor; + unsigned tmp_comp = *comp; + + for (; tmp_cursor < max_location; tmp_cursor++) { + + if (assigned_comps[tmp_cursor].comps) { + /* We can only pack varyings with matching interpolation types, + * interpolation loc must match also. + * TODO: i965 can handle interpolation locations that don't match, + * but the radeonsi nir backend handles everything as vec4s and so + * expects this to be the same for all components. We could make this + * check driver specfific or drop it if NIR ever become the only + * radeonsi backend. + */ + if (assigned_comps[tmp_cursor].interp_type != info->interp_type || + assigned_comps[tmp_cursor].interp_loc != info->interp_loc) { + tmp_comp = 0; + continue; + } + + /* We can only pack varyings with matching types, and the current + * algorithm only supports packing 32-bit. + */ + if (!assigned_comps[tmp_cursor].is_32bit) { + tmp_comp = 0; + continue; + } + + while (tmp_comp < 4 && + (assigned_comps[tmp_cursor].comps & (1 << tmp_comp))) { + tmp_comp++; + } + } + + if (tmp_comp == 4) { + tmp_comp = 0; + continue; + } + + unsigned location = info->var->data.location - VARYING_SLOT_VAR0; + + /* Once we have assigned a location mark it as used */ + assigned_comps[tmp_cursor].comps |= (1 << tmp_comp); + assigned_comps[tmp_cursor].interp_type = info->interp_type; + assigned_comps[tmp_cursor].interp_loc = info->interp_loc; + assigned_comps[tmp_cursor].is_32bit = info->is_32bit; + + /* Assign remap location */ + remap[location][info->var->data.location_frac].component = tmp_comp++; + remap[location][info->var->data.location_frac].location = + tmp_cursor + VARYING_SLOT_VAR0; + + break; + } + + *cursor = tmp_cursor; + *comp = tmp_comp; +} + +/* If there are empty components in the slot compact the remaining components + * as close to component 0 as possible. This will make it easier to fill the + * empty components with components from a different slot in a following pass. + */ +static void +compact_components(nir_shader *producer, nir_shader *consumer, + struct assigned_comps *assigned_comps, + bool default_to_smooth_interp) +{ + struct exec_list *input_list = &consumer->inputs; + struct exec_list *output_list = &producer->outputs; + struct varying_loc remap[MAX_VARYINGS_INCL_PATCH][4] = {{{0}, {0}}}; + struct varying_component *varying_comp_info; + unsigned varying_comp_info_size; + + /* Gather varying component info */ + gather_varying_component_info(producer, consumer, &varying_comp_info, + &varying_comp_info_size, + default_to_smooth_interp); + + /* Sort varying components. */ + qsort(varying_comp_info, varying_comp_info_size, + sizeof(struct varying_component), cmp_varying_component); + + unsigned cursor = 0; + unsigned comp = 0; + + /* Set the remap array based on the sorted components */ + for (unsigned i = 0; i < varying_comp_info_size; i++ ) { + struct varying_component *info = &varying_comp_info[i]; + + assert(info->is_patch || cursor < MAX_VARYING); + if (info->is_patch) { + /* The list should be sorted with all non-patch inputs first followed + * by patch inputs. When we hit our first patch input, we need to + * reset the cursor to MAX_VARYING so we put them in the right slot. + */ + if (cursor < MAX_VARYING) { + cursor = MAX_VARYING; + comp = 0; + } + + assign_remap_locations(remap, assigned_comps, info, + &cursor, &comp, MAX_VARYINGS_INCL_PATCH); + } else { + assign_remap_locations(remap, assigned_comps, info, + &cursor, &comp, MAX_VARYING); + + /* Check if we failed to assign a remap location. This can happen if + * for example there are a bunch of unmovable components with + * mismatching interpolation types causing us to skip over locations + * that would have been useful for packing later components. + * The solution is to iterate over the locations again (this should + * happen very rarely in practice). + */ + if (cursor == MAX_VARYING) { + cursor = 0; + comp = 0; + assign_remap_locations(remap, assigned_comps, info, + &cursor, &comp, MAX_VARYING); } } } + ralloc_free(varying_comp_info); + uint64_t zero = 0; uint32_t zero32 = 0; remap_slots_and_components(input_list, consumer->info.stage, remap, @@ -541,20 +816,16 @@ nir_compact_varyings(nir_shader *producer, nir_shader *consumer, assert(producer->info.stage != MESA_SHADER_FRAGMENT); assert(consumer->info.stage != MESA_SHADER_VERTEX); - uint8_t comps[32] = {0}; - uint8_t interp_type[32] = {0}; - uint8_t interp_loc[32] = {0}; + struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = {{0}}; - get_slot_component_masks_and_interp_types(&producer->outputs, comps, - interp_type, interp_loc, - producer->info.stage, - default_to_smooth_interp); - get_slot_component_masks_and_interp_types(&consumer->inputs, comps, - interp_type, interp_loc, - consumer->info.stage, - default_to_smooth_interp); + get_unmoveable_components_masks(&producer->outputs, assigned_comps, + producer->info.stage, + default_to_smooth_interp); + get_unmoveable_components_masks(&consumer->inputs, assigned_comps, + consumer->info.stage, + default_to_smooth_interp); - compact_components(producer, consumer, comps, interp_type, interp_loc, + compact_components(producer, consumer, assigned_comps, default_to_smooth_interp); } @@ -618,7 +889,7 @@ can_replace_varying(nir_variable *out_var) if (glsl_type_is_array(out_var->type) || glsl_type_is_dual_slot(out_var->type) || glsl_type_is_matrix(out_var->type) || - glsl_type_is_struct(out_var->type)) + glsl_type_is_struct_or_ifc(out_var->type)) return false; /* Limit this pass to scalars for now to keep things simple. Most varyings @@ -789,3 +1060,247 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer) return progress; } + +/* TODO any better helper somewhere to sort a list? */ + +static void +insert_sorted(struct exec_list *var_list, nir_variable *new_var) +{ + nir_foreach_variable(var, var_list) { + if (var->data.location > new_var->data.location) { + exec_node_insert_node_before(&var->node, &new_var->node); + return; + } + } + exec_list_push_tail(var_list, &new_var->node); +} + +static void +sort_varyings(struct exec_list *var_list) +{ + struct exec_list new_list; + exec_list_make_empty(&new_list); + nir_foreach_variable_safe(var, var_list) { + exec_node_remove(&var->node); + insert_sorted(&new_list, var); + } + exec_list_move_nodes_to(&new_list, var_list); +} + +void +nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size, + gl_shader_stage stage) +{ + unsigned location = 0; + unsigned assigned_locations[VARYING_SLOT_TESS_MAX]; + uint64_t processed_locs[2] = {0}; + + sort_varyings(var_list); + + int UNUSED last_loc = 0; + bool last_partial = false; + nir_foreach_variable(var, var_list) { + const struct glsl_type *type = var->type; + if (nir_is_per_vertex_io(var, stage) || var->data.per_view) { + assert(glsl_type_is_array(type)); + type = glsl_get_array_element(type); + } + + int base; + if (var->data.mode == nir_var_shader_in && stage == MESA_SHADER_VERTEX) + base = VERT_ATTRIB_GENERIC0; + else if (var->data.mode == nir_var_shader_out && + stage == MESA_SHADER_FRAGMENT) + base = FRAG_RESULT_DATA0; + else + base = VARYING_SLOT_VAR0; + + unsigned var_size; + if (var->data.compact) { + /* If we are inside a partial compact, + * don't allow another compact to be in this slot + * if it starts at component 0. + */ + if (last_partial && var->data.location_frac == 0) { + location++; + } + + /* compact variables must be arrays of scalars */ + assert(glsl_type_is_array(type)); + assert(glsl_type_is_scalar(glsl_get_array_element(type))); + unsigned start = 4 * location + var->data.location_frac; + unsigned end = start + glsl_get_length(type); + var_size = end / 4 - location; + last_partial = end % 4 != 0; + } else { + /* Compact variables bypass the normal varying compacting pass, + * which means they cannot be in the same vec4 slot as a normal + * variable. If part of the current slot is taken up by a compact + * variable, we need to go to the next one. + */ + if (last_partial) { + location++; + last_partial = false; + } + var_size = glsl_count_attribute_slots(type, false); + } + + /* Builtins don't allow component packing so we only need to worry about + * user defined varyings sharing the same location. + */ + bool processed = false; + if (var->data.location >= base) { + unsigned glsl_location = var->data.location - base; + + for (unsigned i = 0; i < var_size; i++) { + if (processed_locs[var->data.index] & + ((uint64_t)1 << (glsl_location + i))) + processed = true; + else + processed_locs[var->data.index] |= + ((uint64_t)1 << (glsl_location + i)); + } + } + + /* Because component packing allows varyings to share the same location + * we may have already have processed this location. + */ + if (processed) { + unsigned driver_location = assigned_locations[var->data.location]; + var->data.driver_location = driver_location; + + /* An array may be packed such that is crosses multiple other arrays + * or variables, we need to make sure we have allocated the elements + * consecutively if the previously proccessed var was shorter than + * the current array we are processing. + * + * NOTE: The code below assumes the var list is ordered in ascending + * location order. + */ + assert(last_loc <= var->data.location); + last_loc = var->data.location; + unsigned last_slot_location = driver_location + var_size; + if (last_slot_location > location) { + unsigned num_unallocated_slots = last_slot_location - location; + unsigned first_unallocated_slot = var_size - num_unallocated_slots; + for (unsigned i = first_unallocated_slot; i < var_size; i++) { + assigned_locations[var->data.location + i] = location; + location++; + } + } + continue; + } + + for (unsigned i = 0; i < var_size; i++) { + assigned_locations[var->data.location + i] = location + i; + } + + var->data.driver_location = location; + location += var_size; + } + + if (last_partial) + location++; + + *size = location; +} + +static uint64_t +get_linked_variable_location(unsigned location, bool patch) +{ + if (!patch) + return location; + + /* Reserve locations 0...3 for special patch variables + * like tess factors and bounding boxes, and the generic patch + * variables will come after them. + */ + if (location >= VARYING_SLOT_PATCH0) + return location - VARYING_SLOT_PATCH0 + 4; + else if (location >= VARYING_SLOT_TESS_LEVEL_OUTER && + location <= VARYING_SLOT_BOUNDING_BOX1) + return location - VARYING_SLOT_TESS_LEVEL_OUTER; + else + unreachable("Unsupported variable in get_linked_variable_location."); +} + +static uint64_t +get_linked_variable_io_mask(nir_variable *variable, gl_shader_stage stage) +{ + const struct glsl_type *type = variable->type; + + if (nir_is_per_vertex_io(variable, stage)) { + assert(glsl_type_is_array(type)); + type = glsl_get_array_element(type); + } + + unsigned slots = glsl_count_attribute_slots(type, false); + if (variable->data.compact) { + unsigned component_count = variable->data.location_frac + glsl_get_length(type); + slots = DIV_ROUND_UP(component_count, 4); + } + + uint64_t mask = u_bit_consecutive64(0, slots); + return mask; +} + +nir_linked_io_var_info +nir_assign_linked_io_var_locations(nir_shader *producer, nir_shader *consumer) +{ + assert(producer); + assert(consumer); + + uint64_t producer_output_mask = 0; + uint64_t producer_patch_output_mask = 0; + + nir_foreach_variable(variable, &producer->outputs) { + uint64_t mask = get_linked_variable_io_mask(variable, producer->info.stage); + uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch); + + if (variable->data.patch) + producer_patch_output_mask |= mask << loc; + else + producer_output_mask |= mask << loc; + } + + uint64_t consumer_input_mask = 0; + uint64_t consumer_patch_input_mask = 0; + + nir_foreach_variable(variable, &consumer->inputs) { + uint64_t mask = get_linked_variable_io_mask(variable, consumer->info.stage); + uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch); + + if (variable->data.patch) + consumer_patch_input_mask |= mask << loc; + else + consumer_input_mask |= mask << loc; + } + + uint64_t io_mask = producer_output_mask | consumer_input_mask; + uint64_t patch_io_mask = producer_patch_output_mask | consumer_patch_input_mask; + + nir_foreach_variable(variable, &producer->outputs) { + uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch); + + if (variable->data.patch) + variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc)) * 4; + else + variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc)) * 4; + } + + nir_foreach_variable(variable, &consumer->inputs) { + uint64_t loc = get_linked_variable_location(variable->data.location, variable->data.patch); + + if (variable->data.patch) + variable->data.driver_location = util_bitcount64(patch_io_mask & u_bit_consecutive64(0, loc)) * 4; + else + variable->data.driver_location = util_bitcount64(io_mask & u_bit_consecutive64(0, loc)) * 4; + } + + nir_linked_io_var_info result = { + .num_linked_io_vars = util_bitcount64(io_mask), + .num_linked_patch_io_vars = util_bitcount64(patch_io_mask), + }; + + return result; +}