#include "compiler/shader_enums.h"
#include "compiler/nir/nir.h"
#include "util/bitscan.h"
+#include "util/disk_cache.h"
-#include "ir3.h"
+#include "ir3_compiler.h"
struct glsl_type;
IR3_DP_CS_COUNT = 8, /* must be aligned to vec4 */
/* vertex shader driver params: */
- IR3_DP_VTXID_BASE = 0,
- IR3_DP_VTXCNT_MAX = 1,
+ IR3_DP_DRAWID = 0,
+ IR3_DP_VTXID_BASE = 1,
IR3_DP_INSTID_BASE = 2,
+ IR3_DP_VTXCNT_MAX = 3,
/* user-clip-plane components, up to 8x vec4's: */
IR3_DP_UCP0_X = 4,
/* .... */
#define IR3_MAX_SO_OUTPUTS 64
#define IR3_MAX_UBO_PUSH_RANGES 32
+/* mirrors SYSTEM_VALUE_BARYCENTRIC_ but starting from 0 */
+enum ir3_bary {
+ IJ_PERSP_PIXEL,
+ IJ_PERSP_SAMPLE,
+ IJ_PERSP_CENTROID,
+ IJ_PERSP_SIZE,
+ IJ_LINEAR_PIXEL,
+ IJ_LINEAR_CENTROID,
+ IJ_LINEAR_SAMPLE,
+ IJ_COUNT,
+};
+
/**
* Description of a lowered UBO.
*/
uint32_t off[IR3_MAX_SHADER_IMAGES];
} image_dims;
- unsigned immediate_idx;
unsigned immediates_count;
unsigned immediates_size;
- struct {
- uint32_t val[4];
- } *immediates;
+ uint32_t *immediates;
/* State of ubo access lowered to push consts: */
struct ir3_ubo_analysis_state ubo_state;
unsigned tessellation : 2;
unsigned has_gs : 1;
+
+ /* Whether this variant sticks to the "safe" maximum constlen,
+ * which guarantees that the combined stages will never go over
+ * the limit:
+ */
+ unsigned safe_constlen : 1;
+
+ /* Whether gl_Layer must be forced to 0 because it isn't written. */
+ unsigned layer_zero : 1;
};
uint32_t global;
};
if (last_key->rasterflat != key->rasterflat)
return true;
+ if (last_key->layer_zero != key->layer_zero)
+ return true;
+
if (last_key->ucp_enables != key->ucp_enables)
return true;
+ if (last_key->safe_constlen != key->safe_constlen)
+ return true;
+
return false;
}
if (last_key->ucp_enables != key->ucp_enables)
return true;
+ if (last_key->safe_constlen != key->safe_constlen)
+ return true;
+
return false;
}
/* Represents half register in regid */
#define HALF_REG_ID 0x100
+/**
+ * Shader variant which contains the actual hw shader instructions,
+ * and necessary info for shader state setup.
+ */
struct ir3_shader_variant {
struct fd_bo *bo;
struct ir3_shader_variant *nonbinning;
// };
- struct ir3_info info;
- struct ir3 *ir;
+ struct ir3 *ir; /* freed after assembling machine instructions */
+
+ /* shader variants form a linked list: */
+ struct ir3_shader_variant *next;
+
+ /* replicated here to avoid passing extra ptrs everywhere: */
+ gl_shader_stage type;
+ struct ir3_shader *shader;
+
+ /*
+ * Below here is serialized when written to disk cache:
+ */
/* The actual binary shader instructions, size given by info.sizedwords: */
uint32_t *bin;
+ struct ir3_const_state *const_state;
+
+ /*
+ * The following macros are used by the shader disk cache save/
+ * restore paths to serialize/deserialize the variant. Any
+ * pointers that require special handling in store_variant()
+ * and retrieve_variant() should go above here.
+ */
+#define VARIANT_CACHE_START offsetof(struct ir3_shader_variant, info)
+#define VARIANT_CACHE_PTR(v) (((char *)v) + VARIANT_CACHE_START)
+#define VARIANT_CACHE_SIZE (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
+
+ struct ir3_info info;
+
/* Levels of nesting of flow control:
*/
unsigned branchstack;
*/
unsigned constlen;
- struct ir3_const_state *const_state;
-
/* About Linkage:
* + Let the frag shader determine the position/compmask for the
* varyings, since it is the place where we know if the varying
uint8_t regid;
bool half : 1;
} outputs[32 + 2]; /* +POSITION +PSIZE */
- bool writes_pos, writes_smask, writes_psize;
+ bool writes_pos, writes_smask, writes_psize, writes_stencilref;
/* Size in dwords of all outputs for VS, size of entire patch for HS. */
uint32_t output_size;
unsigned orig_idx[16];
} astc_srgb;
- /* shader variants form a linked list: */
- struct ir3_shader_variant *next;
-
- /* replicated here to avoid passing extra ptrs everywhere: */
- gl_shader_stage type;
- struct ir3_shader *shader;
-
/* texture sampler pre-dispatches */
uint32_t num_sampler_prefetch;
struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
}
}
+/* Currently we do not do binning for tess. And for GS there is no
+ * cross-stage VS+GS optimization, so the full VS+GS is used in
+ * the binning pass.
+ */
+static inline bool
+ir3_has_binning_vs(const struct ir3_shader_key *key)
+{
+ if (key->tessellation || key->has_gs)
+ return false;
+ return true;
+}
+/**
+ * Represents a shader at the API level, before state-specific variants are
+ * generated.
+ */
struct ir3_shader {
gl_shader_stage type;
unsigned num_reserved_user_consts;
+ bool nir_finalized;
struct nir_shader *nir;
struct ir3_stream_output_info stream_output;
struct ir3_shader_variant *variants;
mtx_t variants_lock;
+ cache_key cache_key; /* shader disk-cache key */
+
/* Bitmask of bits of the shader key used by this shader. Used to avoid
* recompiles for GL NOS that doesn't actually apply to the shader.
*/
return v->const_state;
}
+/* Given a variant, calculate the maximum constlen it can have.
+ */
+
+static inline unsigned
+ir3_max_const(const struct ir3_shader_variant *v)
+{
+ const struct ir3_compiler *compiler = v->shader->compiler;
+
+ if (v->shader->type == MESA_SHADER_COMPUTE) {
+ return compiler->max_const_compute;
+ } else if (v->key.safe_constlen) {
+ return compiler->max_const_safe;
+ } else if (v->shader->type == MESA_SHADER_FRAGMENT) {
+ return compiler->max_const_frag;
+ } else {
+ return compiler->max_const_geom;
+ }
+}
+
void * ir3_shader_assemble(struct ir3_shader_variant *v);
struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
const struct ir3_shader_key *key, bool binning_pass, bool *created);
struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
+uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
+ const struct ir3_compiler *compiler);
void ir3_shader_destroy(struct ir3_shader *shader);
void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
uint64_t ir3_shader_outputs(const struct ir3_shader *so);
/* location for fixed-function gl_PrimitiveID passthrough */
uint8_t primid_loc;
+
+ /* location for fixed-function gl_ViewIndex passthrough */
+ uint8_t viewid_loc;
};
static inline void
int j = -1, k;
l->primid_loc = 0xff;
+ l->viewid_loc = 0xff;
while (l->cnt < ARRAY_SIZE(l->var)) {
j = ir3_next_varying(fs, j);
l->primid_loc = fs->inputs[j].inloc;
}
+ if (fs->inputs[j].slot == VARYING_SLOT_VIEW_INDEX) {
+ assert(k < 0);
+ l->viewid_loc = fs->inputs[j].inloc;
+ }
+
ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
fs->inputs[j].compmask, fs->inputs[j].inloc);
}