unsigned nr_ps_max_color_exports;
};
+struct r600_shader_key {
+ unsigned color_two_side:1;
+ unsigned alpha_to_one:1;
+ unsigned dual_src_blend:1;
+ unsigned nr_cbufs:4;
+};
+
struct r600_pipe_shader {
struct r600_pipe_shader_selector *selector;
struct r600_pipe_shader *next_variant;
unsigned flatshade;
unsigned pa_cl_vs_out_cntl;
unsigned nr_ps_color_outputs;
- unsigned key;
+ struct r600_shader_key key;
unsigned db_shader_control;
unsigned ps_depth_export;
};
void r600_init_context_resource_functions(struct r600_context *r600);
/* r600_shader.c */
-int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+int r600_pipe_shader_create(struct pipe_context *ctx,
+ struct r600_pipe_shader *shader,
+ struct r600_shader_key key);
#ifdef HAVE_OPENCL
int r600_compute_shader_create(struct pipe_context * ctx,
LLVMModuleRef mod, struct r600_bytecode * bytecode);
return 0;
}
-static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader);
+static int r600_shader_from_tgsi(struct r600_screen *rscreen,
+ struct r600_pipe_shader *pipeshader,
+ struct r600_shader_key key);
-int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+int r600_pipe_shader_create(struct pipe_context *ctx,
+ struct r600_pipe_shader *shader,
+ struct r600_shader_key key)
{
static int dump_shaders = -1;
struct r600_context *rctx = (struct r600_context *)ctx;
}
}
}
- r = r600_shader_from_tgsi(rctx, shader);
+ r = r600_shader_from_tgsi(rctx->screen, shader, key);
if (r) {
R600_ERR("translation from TGSI failed !\n");
return r;
return 0;
}
-static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader)
+static int r600_shader_from_tgsi(struct r600_screen *rscreen,
+ struct r600_pipe_shader *pipeshader,
+ struct r600_shader_key key)
{
struct r600_shader *shader = &pipeshader->shader;
struct tgsi_token *tokens = pipeshader->selector->tokens;
ctx.shader = shader;
ctx.native_integers = true;
- r600_bytecode_init(ctx.bc, rctx->chip_class, rctx->family);
+ r600_bytecode_init(ctx.bc, rscreen->chip_class, rscreen->family);
ctx.tokens = tokens;
tgsi_scan_shader(tokens, &ctx.info);
tgsi_parse_init(&ctx.parse, tokens);
shader->nr_ps_color_exports = 0;
shader->nr_ps_max_color_exports = 0;
- shader->two_side = (ctx.type == TGSI_PROCESSOR_FRAGMENT) && rctx->two_side;
+ shader->two_side = key.color_two_side;
/* register allocations */
/* Values [0,127] correspond to GPR[0..127].
}
}
- if (shader->fs_write_all && rctx->chip_class >= EVERGREEN)
+ if (shader->fs_write_all && rscreen->chip_class >= EVERGREEN)
shader->nr_ps_max_color_exports = 8;
if (ctx.fragcoord_input >= 0) {
case TGSI_PROCESSOR_FRAGMENT:
if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
/* never export more colors than the number of CBs */
- if (next_pixel_base && next_pixel_base >= (rctx->nr_cbufs + rctx->dual_src_blend * 1)) {
+ if (next_pixel_base && next_pixel_base >= key.nr_cbufs + key.dual_src_blend) {
/* skip export */
j--;
continue;
}
- output[j].swizzle_w = rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer ? 5 : 3;
+ output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
output[j].array_base = next_pixel_base++;
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
shader->nr_ps_color_exports++;
- if (shader->fs_write_all && (rctx->chip_class >= EVERGREEN)) {
- for (k = 1; k < rctx->nr_cbufs; k++) {
+ if (shader->fs_write_all && (rscreen->chip_class >= EVERGREEN)) {
+ for (k = 1; k < key.nr_cbufs; k++) {
j++;
memset(&output[j], 0, sizeof(struct r600_bytecode_output));
output[j].gpr = shader->output[i].gpr;
output[j].swizzle_x = 0;
output[j].swizzle_y = 1;
output[j].swizzle_z = 2;
- output[j].swizzle_w = rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer ? 5 : 3;
+ output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
output[j].burst_count = 1;
output[j].barrier = 1;
output[j].array_base = next_pixel_base++;
}
/* Compute the key for the hw shader variant */
-static INLINE unsigned r600_shader_selector_key(struct pipe_context * ctx,
+static INLINE struct r600_shader_key r600_shader_selector_key(struct pipe_context * ctx,
struct r600_pipe_shader_selector * sel)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- unsigned key;
+ struct r600_shader_key key;
+ memset(&key, 0, sizeof(key));
if (sel->type == PIPE_SHADER_FRAGMENT) {
- key = rctx->two_side |
- ((rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer) << 1) |
- (MIN2(sel->nr_ps_max_color_exports, rctx->nr_cbufs + rctx->dual_src_blend) << 2);
- } else
- key = 0;
-
+ key.color_two_side = rctx->two_side;
+ key.alpha_to_one = rctx->alpha_to_one && rctx->multisample_enable && !rctx->cb0_is_integer;
+ key.dual_src_blend = rctx->dual_src_blend;
+ key.nr_cbufs = rctx->nr_cbufs;
+ }
return key;
}
struct r600_pipe_shader_selector* sel,
unsigned *dirty)
{
- unsigned key;
+ struct r600_shader_key key;
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_pipe_shader * shader = NULL;
int r;
* This path is also used for most shaders that don't need multiple
* variants, it will cost just a computation of the key and this
* test. */
- if (likely(sel->current && sel->current->key == key)) {
+ if (likely(sel->current && memcmp(&sel->current->key, &key, sizeof(key)) == 0)) {
return 0;
}
if (sel->num_shaders > 1) {
struct r600_pipe_shader *p = sel->current, *c = p->next_variant;
- while (c && c->key != key) {
+ while (c && memcmp(&c->key, &key, sizeof(key)) != 0) {
p = c;
c = c->next_variant;
}
shader = CALLOC(1, sizeof(struct r600_pipe_shader));
shader->selector = sel;
- r = r600_pipe_shader_create(ctx, shader);
+ r = r600_pipe_shader_create(ctx, shader, key);
if (unlikely(r)) {
- R600_ERR("Failed to build shader variant (type=%u, key=%u) %d\n",
- sel->type, key, r);
+ R600_ERR("Failed to build shader variant (type=%u) %d\n",
+ sel->type, r);
sel->current = NULL;
return r;
}