};
-static const struct shader_asm_info shaders_asm[] = {
- /* fills */
+/* paint types */
+static const struct shader_asm_info shaders_paint_asm[] = {
{VEGA_SOLID_FILL_SHADER, solid_fill,
VG_FALSE, 0, 1, 0, 0, 0, 0},
{VEGA_LINEAR_GRADIENT_SHADER, linear_grad,
{VEGA_RADIAL_GRADIENT_SHADER, radial_grad,
VG_TRUE, 0, 5, 0, 1, 0, 6},
{VEGA_PATTERN_SHADER, pattern,
- VG_TRUE, 1, 4, 0, 1, 0, 5},
+ VG_TRUE, 1, 4, 0, 1, 0, 5}
+};
- /* image draw modes */
+/* image draw modes */
+static const struct shader_asm_info shaders_image_asm[] = {
{VEGA_IMAGE_NORMAL_SHADER, image_normal,
VG_TRUE, 0, 0, 3, 1, 0, 0},
{VEGA_IMAGE_MULTIPLY_SHADER, image_multiply,
VG_TRUE, 0, 0, 3, 1, 0, 2},
{VEGA_IMAGE_STENCIL_SHADER, image_stencil,
- VG_TRUE, 0, 0, 3, 1, 0, 2},
+ VG_TRUE, 0, 0, 3, 1, 0, 2}
+};
+static const struct shader_asm_info shaders_mask_asm[] = {
{VEGA_MASK_SHADER, mask,
- VG_TRUE, 0, 0, 1, 1, 0, 2},
+ VG_TRUE, 0, 0, 1, 1, 0, 2}
+};
- /* extra blend modes */
+/* extra blend modes */
+static const struct shader_asm_info shaders_blend_asm[] = {
{VEGA_BLEND_MULTIPLY_SHADER, blend_multiply,
VG_TRUE, 1, 1, 2, 1, 0, 5},
{VEGA_BLEND_SCREEN_SHADER, blend_screen,
VG_TRUE, 1, 1, 2, 1, 0, 6},
{VEGA_BLEND_LIGHTEN_SHADER, blend_lighten,
VG_TRUE, 1, 1, 2, 1, 0, 6},
+};
- /* premultiply */
+/* premultiply */
+static const struct shader_asm_info shaders_premultiply_asm[] = {
{VEGA_PREMULTIPLY_SHADER, premultiply,
VG_FALSE, 0, 0, 0, 0, 0, 1},
{VEGA_UNPREMULTIPLY_SHADER, unpremultiply,
VG_FALSE, 0, 0, 0, 0, 0, 1},
+};
- /* color transform to black and white */
+/* color transform to black and white */
+static const struct shader_asm_info shaders_bw_asm[] = {
{VEGA_BW_SHADER, color_bw,
VG_FALSE, 1, 1, 0, 0, 0, 3},
};
+
#endif
/* Essentially we construct an ubber-shader based on the state
* of the pipeline. The stages are:
- * 1) Fill (mandatory, solid color/gradient/pattern/image draw)
- * 2) Image composition (image mode multiply and stencil)
+ * 1) Paint generation (color/gradient/pattern)
+ * 2) Image composition (normal/multiply/stencil)
* 3) Mask
* 4) Extended blend (multiply/screen/darken/lighten)
* 5) Premultiply/Unpremultiply
return tokens;
}
-#define ALL_FILLS (VEGA_SOLID_FILL_SHADER | \
- VEGA_LINEAR_GRADIENT_SHADER | \
- VEGA_RADIAL_GRADIENT_SHADER | \
- VEGA_PATTERN_SHADER | \
- VEGA_IMAGE_NORMAL_SHADER)
-
-
/*
static const char max_shader_preamble[] =
"FRAG\n"
int id,
struct pipe_shader_state *shader)
{
- int idx = 0;
+ int idx = 0, sh;
const struct shader_asm_info * shaders[SHADER_STAGES];
- /* the shader has to have a fill */
- debug_assert(id & ALL_FILLS);
-
/* first stage */
- if (id & VEGA_SOLID_FILL_SHADER) {
- debug_assert(idx == 0);
- shaders[idx] = &shaders_asm[0];
- debug_assert(shaders_asm[0].id == VEGA_SOLID_FILL_SHADER);
- ++idx;
- }
- if ((id & VEGA_LINEAR_GRADIENT_SHADER)) {
- debug_assert(idx == 0);
- shaders[idx] = &shaders_asm[1];
- debug_assert(shaders_asm[1].id == VEGA_LINEAR_GRADIENT_SHADER);
- ++idx;
- }
- if ((id & VEGA_RADIAL_GRADIENT_SHADER)) {
- debug_assert(idx == 0);
- shaders[idx] = &shaders_asm[2];
- debug_assert(shaders_asm[2].id == VEGA_RADIAL_GRADIENT_SHADER);
- ++idx;
- }
- if ((id & VEGA_PATTERN_SHADER)) {
- debug_assert(idx == 0);
- debug_assert(shaders_asm[3].id == VEGA_PATTERN_SHADER);
- shaders[idx] = &shaders_asm[3];
- ++idx;
- }
- if ((id & VEGA_IMAGE_NORMAL_SHADER)) {
- debug_assert(idx == 0);
- debug_assert(shaders_asm[4].id == VEGA_IMAGE_NORMAL_SHADER);
- shaders[idx] = &shaders_asm[4];
- ++idx;
+ sh = SHADERS_GET_PAINT_SHADER(id);
+ switch (sh << SHADERS_PAINT_SHIFT) {
+ case VEGA_SOLID_FILL_SHADER:
+ case VEGA_LINEAR_GRADIENT_SHADER:
+ case VEGA_RADIAL_GRADIENT_SHADER:
+ case VEGA_PATTERN_SHADER:
+ shaders[idx] = &shaders_paint_asm[(sh >> SHADERS_PAINT_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
/* second stage */
- if ((id & VEGA_IMAGE_MULTIPLY_SHADER)) {
- debug_assert(shaders_asm[5].id == VEGA_IMAGE_MULTIPLY_SHADER);
- shaders[idx] = &shaders_asm[5];
- ++idx;
- } else if ((id & VEGA_IMAGE_STENCIL_SHADER)) {
- debug_assert(shaders_asm[6].id == VEGA_IMAGE_STENCIL_SHADER);
- shaders[idx] = &shaders_asm[6];
- ++idx;
+ sh = SHADERS_GET_IMAGE_SHADER(id);
+ switch (sh) {
+ case VEGA_IMAGE_NORMAL_SHADER:
+ case VEGA_IMAGE_MULTIPLY_SHADER:
+ case VEGA_IMAGE_STENCIL_SHADER:
+ shaders[idx] = &shaders_image_asm[(sh >> SHADERS_IMAGE_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
+ /* sanity check */
+ assert(idx == ((!sh || sh == VEGA_IMAGE_NORMAL_SHADER) ? 1 : 2));
+
/* third stage */
- if ((id & VEGA_MASK_SHADER)) {
- debug_assert(idx == 1);
- debug_assert(shaders_asm[7].id == VEGA_MASK_SHADER);
- shaders[idx] = &shaders_asm[7];
- ++idx;
+ sh = SHADERS_GET_MASK_SHADER(id);
+ switch (sh) {
+ case VEGA_MASK_SHADER:
+ shaders[idx] = &shaders_mask_asm[(sh >> SHADERS_MASK_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
/* fourth stage */
- if ((id & VEGA_BLEND_MULTIPLY_SHADER)) {
- debug_assert(shaders_asm[8].id == VEGA_BLEND_MULTIPLY_SHADER);
- shaders[idx] = &shaders_asm[8];
- ++idx;
- } else if ((id & VEGA_BLEND_SCREEN_SHADER)) {
- debug_assert(shaders_asm[9].id == VEGA_BLEND_SCREEN_SHADER);
- shaders[idx] = &shaders_asm[9];
- ++idx;
- } else if ((id & VEGA_BLEND_DARKEN_SHADER)) {
- debug_assert(shaders_asm[10].id == VEGA_BLEND_DARKEN_SHADER);
- shaders[idx] = &shaders_asm[10];
- ++idx;
- } else if ((id & VEGA_BLEND_LIGHTEN_SHADER)) {
- debug_assert(shaders_asm[11].id == VEGA_BLEND_LIGHTEN_SHADER);
- shaders[idx] = &shaders_asm[11];
- ++idx;
+ sh = SHADERS_GET_BLEND_SHADER(id);
+ switch (sh) {
+ case VEGA_BLEND_MULTIPLY_SHADER:
+ case VEGA_BLEND_SCREEN_SHADER:
+ case VEGA_BLEND_DARKEN_SHADER:
+ case VEGA_BLEND_LIGHTEN_SHADER:
+ shaders[idx] = &shaders_blend_asm[(sh >> SHADERS_BLEND_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
/* fifth stage */
- if ((id & VEGA_PREMULTIPLY_SHADER)) {
- debug_assert(shaders_asm[12].id == VEGA_PREMULTIPLY_SHADER);
- shaders[idx] = &shaders_asm[12];
- ++idx;
- } else if ((id & VEGA_UNPREMULTIPLY_SHADER)) {
- debug_assert(shaders_asm[13].id == VEGA_UNPREMULTIPLY_SHADER);
- shaders[idx] = &shaders_asm[13];
- ++idx;
+ sh = SHADERS_GET_PREMULTIPLY_SHADER(id);
+ switch (sh) {
+ case VEGA_PREMULTIPLY_SHADER:
+ case VEGA_UNPREMULTIPLY_SHADER:
+ shaders[idx] = &shaders_premultiply_asm[
+ (sh >> SHADERS_PREMULTIPLY_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
/* sixth stage */
- if ((id & VEGA_BW_SHADER)) {
- debug_assert(shaders_asm[14].id == VEGA_BW_SHADER);
- shaders[idx] = &shaders_asm[14];
- ++idx;
+ sh = SHADERS_GET_BW_SHADER(id);
+ switch (sh) {
+ case VEGA_BW_SHADER:
+ shaders[idx] = &shaders_bw_asm[(sh >> SHADERS_BW_SHIFT) - 1];
+ assert(shaders[idx]->id == sh);
+ idx++;
+ break;
+ default:
+ break;
}
return combine_shaders(shaders, idx, pipe, shader);
struct tgsi_token;
struct shaders_cache;
+#define _SHADERS_PAINT_BITS 3
+#define _SHADERS_IMAGE_BITS 2
+#define _SHADERS_MASK_BITS 1
+#define _SHADERS_BLEND_BITS 3
+#define _SHADERS_PREMULTIPLY_BITS 2
+#define _SHADERS_BW_BITS 1
+
+#define SHADERS_PAINT_SHIFT (0)
+#define SHADERS_IMAGE_SHIFT (SHADERS_PAINT_SHIFT + _SHADERS_PAINT_BITS)
+#define SHADERS_MASK_SHIFT (SHADERS_IMAGE_SHIFT + _SHADERS_IMAGE_BITS)
+#define SHADERS_BLEND_SHIFT (SHADERS_MASK_SHIFT + _SHADERS_MASK_BITS)
+#define SHADERS_PREMULTIPLY_SHIFT (SHADERS_BLEND_SHIFT + _SHADERS_BLEND_BITS)
+#define SHADERS_BW_SHIFT (SHADERS_PREMULTIPLY_SHIFT + _SHADERS_PREMULTIPLY_BITS)
+
+#define _SHADERS_GET_STAGE(stage, id) \
+ ((id) & (((1 << _SHADERS_ ## stage ## _BITS) - 1) << SHADERS_ ## stage ## _SHIFT))
+
+#define SHADERS_GET_PAINT_SHADER(id) _SHADERS_GET_STAGE(PAINT, id)
+#define SHADERS_GET_IMAGE_SHADER(id) _SHADERS_GET_STAGE(IMAGE, id)
+#define SHADERS_GET_MASK_SHADER(id) _SHADERS_GET_STAGE(MASK, id)
+#define SHADERS_GET_BLEND_SHADER(id) _SHADERS_GET_STAGE(BLEND, id)
+#define SHADERS_GET_PREMULTIPLY_SHADER(id) _SHADERS_GET_STAGE(PREMULTIPLY, id)
+#define SHADERS_GET_BW_SHADER(id) _SHADERS_GET_STAGE(BW, id)
+
enum VegaShaderType {
- VEGA_SOLID_FILL_SHADER = 1 << 0,
- VEGA_LINEAR_GRADIENT_SHADER = 1 << 1,
- VEGA_RADIAL_GRADIENT_SHADER = 1 << 2,
- VEGA_PATTERN_SHADER = 1 << 3,
- VEGA_IMAGE_NORMAL_SHADER = 1 << 4,
- VEGA_IMAGE_MULTIPLY_SHADER = 1 << 5,
- VEGA_IMAGE_STENCIL_SHADER = 1 << 6,
+ VEGA_SOLID_FILL_SHADER = 1 << SHADERS_PAINT_SHIFT,
+ VEGA_LINEAR_GRADIENT_SHADER = 2 << SHADERS_PAINT_SHIFT,
+ VEGA_RADIAL_GRADIENT_SHADER = 3 << SHADERS_PAINT_SHIFT,
+ VEGA_PATTERN_SHADER = 4 << SHADERS_PAINT_SHIFT,
+
+ VEGA_IMAGE_NORMAL_SHADER = 1 << SHADERS_IMAGE_SHIFT,
+ VEGA_IMAGE_MULTIPLY_SHADER = 2 << SHADERS_IMAGE_SHIFT,
+ VEGA_IMAGE_STENCIL_SHADER = 3 << SHADERS_IMAGE_SHIFT,
- VEGA_MASK_SHADER = 1 << 7,
+ VEGA_MASK_SHADER = 1 << SHADERS_MASK_SHIFT,
- VEGA_BLEND_MULTIPLY_SHADER = 1 << 8,
- VEGA_BLEND_SCREEN_SHADER = 1 << 9,
- VEGA_BLEND_DARKEN_SHADER = 1 << 10,
- VEGA_BLEND_LIGHTEN_SHADER = 1 << 11,
+ VEGA_BLEND_MULTIPLY_SHADER = 1 << SHADERS_BLEND_SHIFT,
+ VEGA_BLEND_SCREEN_SHADER = 2 << SHADERS_BLEND_SHIFT,
+ VEGA_BLEND_DARKEN_SHADER = 3 << SHADERS_BLEND_SHIFT,
+ VEGA_BLEND_LIGHTEN_SHADER = 4 << SHADERS_BLEND_SHIFT,
- VEGA_PREMULTIPLY_SHADER = 1 << 12,
- VEGA_UNPREMULTIPLY_SHADER = 1 << 13,
+ VEGA_PREMULTIPLY_SHADER = 1 << SHADERS_PREMULTIPLY_SHIFT,
+ VEGA_UNPREMULTIPLY_SHADER = 2 << SHADERS_PREMULTIPLY_SHIFT,
- VEGA_BW_SHADER = 1 << 14
+ VEGA_BW_SHADER = 1 << SHADERS_BW_SHIFT
};
struct vg_shader {