}
static enum mali_format
-pan_format_from_glsl(const struct glsl_type *type, unsigned frac)
+pan_format_from_glsl(const struct glsl_type *type, unsigned precision, unsigned frac)
{
const struct glsl_type *column = glsl_without_array_or_matrix(type);
enum glsl_base_type glsl_base = glsl_get_base_type(column);
unsigned base = nir_alu_type_get_base_type(t);
unsigned size = nir_alu_type_get_type_size(t);
+ /* Demote to fp16 where possible. int16 varyings are TODO as the hw
+ * will saturate instead of wrap which is not conformant, so we need to
+ * insert i2i16/u2u16 instructions before the st_vary_32i/32u to get
+ * the intended behaviour */
+
+ bool is_16 = (precision == GLSL_PRECISION_MEDIUM)
+ || (precision == GLSL_PRECISION_LOW);
+
+ if (is_16 && base == nir_type_float)
+ size = 16;
+ else
+ size = 32;
+
return pan_format_from_nir_base(base) |
pan_format_from_nir_size(base, size) |
MALI_NR_CHANNELS(chan);
case nir_type_uint16:
return BIFROST_BLEND_U16;
default:
- DBG("Unsupported blend shader type for NIR alu type %d", nir_type);
- assert(0);
+ unreachable("Unsupported blend shader type for NIR alu type");
return 0;
}
}
.alpha_ref = state->alpha_state.ref_value
};
+ memcpy(program.rt_formats, state->rt_formats, sizeof(program.rt_formats));
+
if (dev->quirks & IS_BIFROST) {
bifrost_compile_shader_nir(s, &program, dev->gpu_id);
} else {
midgard_compile_shader_nir(s, &program, false, 0, dev->gpu_id,
- pan_debug & PAN_DBG_PRECOMPILE);
+ dev->debug & PAN_DBG_PRECOMPILE, false);
}
/* Prepare the compiled binary for upload */
* that's how I'd do it. */
if (size) {
- state->bo = pan_bo_create(dev, size, PAN_BO_EXECUTE);
+ state->bo = panfrost_bo_create(dev, size, PAN_BO_EXECUTE);
memcpy(state->bo->cpu, dst, size);
}
if (s->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL))
state->writes_stencil = true;
+ uint64_t outputs_read = s->info.outputs_read;
+ if (outputs_read & BITFIELD64_BIT(FRAG_RESULT_COLOR))
+ outputs_read |= BITFIELD64_BIT(FRAG_RESULT_DATA0);
+
+ state->outputs_read = outputs_read >> FRAG_RESULT_DATA0;
+
/* List of reasons we need to execute frag shaders when things
* are masked off */
/* Record the varying mapping for the command stream's bookkeeping */
- struct exec_list *l_varyings =
- stage == MESA_SHADER_VERTEX ? &s->outputs : &s->inputs;
+ nir_variable_mode varying_mode =
+ stage == MESA_SHADER_VERTEX ? nir_var_shader_out : nir_var_shader_in;
- nir_foreach_variable(var, l_varyings) {
+ nir_foreach_variable_with_modes(var, s, varying_mode) {
unsigned loc = var->data.driver_location;
unsigned sz = glsl_count_attribute_slots(var->type, FALSE);
for (int c = 0; c < sz; ++c) {
state->varyings_loc[loc + c] = var->data.location + c;
- state->varyings[loc + c] = pan_format_from_glsl(var->type, var->data.location_frac);
+ state->varyings[loc + c] = pan_format_from_glsl(var->type,
+ var->data.precision, var->data.location_frac);
}
}
}