/**************************************************************************
- *
+ *
* Copyright 2009-2010 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#include "util/u_memory.h"
#include "util/u_math.h"
#include "util/u_bitmask.h"
+#include "GL/gl.h"
+#include "compiler/shader_info.h"
union tgsi_any_token {
struct tgsi_header header;
struct ureg_program
{
- unsigned processor;
+ enum pipe_shader_type processor;
bool supports_any_inout_decl_range;
int next_shader_processor;
unsigned first;
unsigned last;
unsigned array_id;
+ boolean invariant;
} output[UREG_MAX_OUTPUT];
unsigned nr_outputs, nr_output_regs;
struct {
unsigned index;
enum tgsi_texture_type target;
- unsigned format;
+ enum pipe_format format;
boolean wr;
boolean raw;
} image[PIPE_MAX_SHADER_IMAGES];
}
-struct ureg_src
+struct ureg_src
ureg_DECL_vs_input( struct ureg_program *ureg,
unsigned index )
{
unsigned array_size)
{
return ureg_DECL_fs_input_cyl_centroid_layout(ureg,
- semantic_name, semantic_index, 0, 0, 0,
+ semantic_name, semantic_index,
+ TGSI_INTERPOLATE_CONSTANT, 0, TGSI_INTERPOLATE_LOC_CENTER,
index, usage_mask, array_id, array_size);
}
unsigned array_size)
{
return ureg_DECL_fs_input_cyl_centroid(ureg, semantic_name, semantic_index,
- 0, 0, 0, array_id, array_size);
+ TGSI_INTERPOLATE_CONSTANT, 0,
+ TGSI_INTERPOLATE_LOC_CENTER,
+ array_id, array_size);
}
unsigned index,
unsigned usage_mask,
unsigned array_id,
- unsigned array_size)
+ unsigned array_size,
+ boolean invariant)
{
unsigned i;
ureg->output[i].first = index;
ureg->output[i].last = index + array_size - 1;
ureg->output[i].array_id = array_id;
+ ureg->output[i].invariant = invariant;
ureg->nr_output_regs = MAX2(ureg->nr_output_regs, index + array_size);
ureg->nr_outputs++;
}
unsigned array_size)
{
return ureg_DECL_output_layout(ureg, name, index, 0,
- ureg->nr_output_regs, usage_mask, array_id, array_size);
+ ureg->nr_output_regs, usage_mask, array_id,
+ array_size, FALSE);
}
-struct ureg_dst
+struct ureg_dst
ureg_DECL_output(struct ureg_program *ureg,
- unsigned name,
+ enum tgsi_semantic name,
unsigned index)
{
return ureg_DECL_output_masked(ureg, name, index, TGSI_WRITEMASK_XYZW,
unsigned i;
for (i = 0; i < ureg->nr_samplers; i++)
- if (ureg->sampler[i].Index == nr)
+ if (ureg->sampler[i].Index == (int)nr)
return ureg->sampler[i];
-
+
if (i < PIPE_MAX_SAMPLERS) {
ureg->sampler[i] = ureg_src_register( TGSI_FILE_SAMPLER, nr );
ureg->nr_samplers++;
ureg_DECL_image(struct ureg_program *ureg,
unsigned index,
enum tgsi_texture_type target,
- unsigned format,
+ enum pipe_format format,
boolean wr,
boolean raw)
{
static int
match_or_expand_immediate64( const unsigned *v,
- int type,
unsigned nr,
unsigned *v2,
unsigned *pnr2,
if (type == TGSI_IMM_FLOAT64 ||
type == TGSI_IMM_UINT64 ||
type == TGSI_IMM_INT64)
- return match_or_expand_immediate64(v, type, nr, v2, pnr2, swizzle);
+ return match_or_expand_immediate64(v, nr, v2, pnr2, swizzle);
*swizzle = 0;
assert(src.File != TGSI_FILE_NULL);
assert(src.File < TGSI_FILE_COUNT);
-
+
out[n].value = 0;
out[n].src.File = src.File;
out[n].src.SwizzleX = src.SwizzleX;
}
-void
+void
ureg_emit_dst( struct ureg_program *ureg,
struct ureg_dst dst )
{
out[n].dst.Indirect = dst.Indirect;
out[n].dst.Index = dst.Index;
n++;
-
+
if (dst.Indirect) {
out[n].value = 0;
out[n].ind.File = dst.IndirectFile;
}
-static void validate( unsigned opcode,
+static void validate( enum tgsi_opcode opcode,
unsigned nr_dst,
unsigned nr_src )
{
-#ifdef DEBUG
+#ifndef NDEBUG
const struct tgsi_opcode_info *info = tgsi_get_opcode_info( opcode );
assert(info);
if (info) {
struct ureg_emit_insn_result
ureg_emit_insn(struct ureg_program *ureg,
- unsigned opcode,
+ enum tgsi_opcode opcode,
boolean saturate,
unsigned precise,
unsigned num_dst,
struct ureg_emit_insn_result result;
validate( opcode, num_dst, num_src );
-
+
out = get_tokens( ureg, DOMAIN_INSN, count );
out[0].insn = tgsi_default_instruction();
out[0].insn.Opcode = opcode;
out[0].value = 0;
out[0].insn_texture_offset = *offset;
-
}
void
ureg_emit_memory(struct ureg_program *ureg,
unsigned extended_token,
unsigned qualifier,
- unsigned texture,
- unsigned format)
+ enum tgsi_texture_type texture,
+ enum pipe_format format)
{
union tgsi_any_token *out, *insn;
void
ureg_insn(struct ureg_program *ureg,
- unsigned opcode,
+ enum tgsi_opcode opcode,
const struct ureg_dst *dst,
unsigned nr_dst,
const struct ureg_src *src,
void
ureg_tex_insn(struct ureg_program *ureg,
- unsigned opcode,
+ enum tgsi_opcode opcode,
const struct ureg_dst *dst,
unsigned nr_dst,
enum tgsi_texture_type target,
void
ureg_memory_insn(struct ureg_program *ureg,
- unsigned opcode,
+ enum tgsi_opcode opcode,
const struct ureg_dst *dst,
unsigned nr_dst,
const struct ureg_src *src,
unsigned nr_src,
unsigned qualifier,
- unsigned texture,
- unsigned format)
+ enum tgsi_texture_type texture,
+ enum pipe_format format)
{
struct ureg_emit_insn_result insn;
unsigned i;
unsigned semantic_index,
unsigned streams,
unsigned usage_mask,
- unsigned array_id)
+ unsigned array_id,
+ boolean invariant)
{
union tgsi_any_token *out = get_tokens(ureg, DOMAIN_DECL, array_id ? 4 : 3);
out[0].decl.UsageMask = usage_mask;
out[0].decl.Semantic = 1;
out[0].decl.Array = array_id != 0;
+ out[0].decl.Invariant = invariant;
out[1].value = 0;
out[1].decl_range.First = first;
emit_decl_image(struct ureg_program *ureg,
unsigned index,
enum tgsi_texture_type target,
- unsigned format,
+ enum pipe_format format,
boolean wr,
boolean raw)
{
unsigned i,j;
for (i = 0; i < ARRAY_SIZE(ureg->properties); i++)
- if (ureg->properties[i] != ~0)
+ if (ureg->properties[i] != ~0u)
emit_property(ureg, i, ureg->properties[i]);
if (ureg->processor == PIPE_SHADER_VERTEX) {
ureg->input[i].semantic_index,
0,
TGSI_WRITEMASK_XYZW,
- ureg->input[i].array_id);
+ ureg->input[i].array_id,
+ FALSE);
}
}
else {
ureg->input[i].semantic_index +
(j - ureg->input[i].first),
0,
- TGSI_WRITEMASK_XYZW, 0);
+ TGSI_WRITEMASK_XYZW, 0, FALSE);
}
}
}
ureg->system_value[i].semantic_name,
ureg->system_value[i].semantic_index,
0,
- TGSI_WRITEMASK_XYZW, 0);
+ TGSI_WRITEMASK_XYZW, 0, FALSE);
}
if (ureg->supports_any_inout_decl_range) {
ureg->output[i].semantic_index,
ureg->output[i].streams,
ureg->output[i].usage_mask,
- ureg->output[i].array_id);
+ ureg->output[i].array_id,
+ ureg->output[i].invariant);
}
}
else {
ureg->output[i].semantic_index +
(j - ureg->output[i].first),
ureg->output[i].streams,
- ureg->output[i].usage_mask, 0);
+ ureg->output[i].usage_mask,
+ 0,
+ ureg->output[i].invariant);
}
}
}
for (i = 0; i < ureg->nr_samplers; i++) {
- emit_decl_range( ureg,
+ emit_decl_range( ureg,
TGSI_FILE_SAMPLER,
ureg->sampler[i].Index, 1 );
}
static void copy_instructions( struct ureg_program *ureg )
{
unsigned nr_tokens = ureg->domain[DOMAIN_INSN].count;
- union tgsi_any_token *out = get_tokens( ureg,
- DOMAIN_DECL,
+ union tgsi_any_token *out = get_tokens( ureg,
+ DOMAIN_DECL,
nr_tokens );
- memcpy(out,
- ureg->domain[DOMAIN_INSN].tokens,
+ memcpy(out,
+ ureg->domain[DOMAIN_INSN].tokens,
nr_tokens * sizeof out[0] );
}
PIPE_SHADER_FRAGMENT :
ureg->next_shader_processor);
break;
+ default:
+ ; /* nothing */
}
emit_header( ureg );
emit_decls( ureg );
copy_instructions( ureg );
fixup_header_size( ureg );
-
+
if (ureg->domain[0].tokens == error_tokens ||
ureg->domain[1].tokens == error_tokens) {
debug_printf("%s: error in generated shader\n", __FUNCTION__);
tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
if (0) {
- debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
+ debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
ureg->domain[DOMAIN_DECL].count);
tgsi_dump( tokens, 0 );
}
#if DEBUG
- if (tokens && !tgsi_sanity_check(tokens)) {
+ /* tgsi_sanity doesn't seem to return if there are too many constants. */
+ bool too_many_constants = false;
+ for (unsigned i = 0; i < ARRAY_SIZE(ureg->const_decls); i++) {
+ for (unsigned j = 0; j < ureg->const_decls[i].nr_constant_ranges; j++) {
+ if (ureg->const_decls[i].constant_range[j].last > 4096) {
+ too_many_constants = true;
+ break;
+ }
+ }
+ }
+
+ if (tokens && !too_many_constants && !tgsi_sanity_check(tokens)) {
debug_printf("tgsi_ureg.c, sanity check failed on generated tokens:\n");
tgsi_dump(tokens, 0);
assert(0);
}
#endif
-
+
return tokens;
}
struct pipe_context *pipe,
const struct pipe_stream_output_info *so )
{
- struct pipe_shader_state state;
+ struct pipe_shader_state state = {0};
pipe_shader_state_from_tgsi(&state, ureg_finalize(ureg));
if(!state.tokens)
tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
- if (nr_tokens)
+ if (nr_tokens)
*nr_tokens = ureg->domain[DOMAIN_DECL].count;
ureg->domain[DOMAIN_DECL].tokens = 0;
struct ureg_program *
-ureg_create(unsigned processor)
+ureg_create(enum pipe_shader_type processor)
{
return ureg_create_with_screen(processor, NULL);
}
struct ureg_program *
-ureg_create_with_screen(unsigned processor, struct pipe_screen *screen)
+ureg_create_with_screen(enum pipe_shader_type processor,
+ struct pipe_screen *screen)
{
- int i;
+ uint i;
struct ureg_program *ureg = CALLOC_STRUCT( ureg_program );
if (!ureg)
goto no_ureg;
return ureg->nr_outputs;
}
+static void
+ureg_setup_clipdist_info(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ if (info->clip_distance_array_size)
+ ureg_property(ureg, TGSI_PROPERTY_NUM_CLIPDIST_ENABLED,
+ info->clip_distance_array_size);
+ if (info->cull_distance_array_size)
+ ureg_property(ureg, TGSI_PROPERTY_NUM_CULLDIST_ENABLED,
+ info->cull_distance_array_size);
+}
+
+static void
+ureg_setup_tess_ctrl_shader(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT,
+ info->tess.tcs_vertices_out);
+}
+
+static void
+ureg_setup_tess_eval_shader(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ if (info->tess.primitive_mode == GL_ISOLINES)
+ ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE, GL_LINES);
+ else
+ ureg_property(ureg, TGSI_PROPERTY_TES_PRIM_MODE,
+ info->tess.primitive_mode);
+
+ STATIC_ASSERT((TESS_SPACING_EQUAL + 1) % 3 == PIPE_TESS_SPACING_EQUAL);
+ STATIC_ASSERT((TESS_SPACING_FRACTIONAL_ODD + 1) % 3 ==
+ PIPE_TESS_SPACING_FRACTIONAL_ODD);
+ STATIC_ASSERT((TESS_SPACING_FRACTIONAL_EVEN + 1) % 3 ==
+ PIPE_TESS_SPACING_FRACTIONAL_EVEN);
+
+ ureg_property(ureg, TGSI_PROPERTY_TES_SPACING,
+ (info->tess.spacing + 1) % 3);
+
+ ureg_property(ureg, TGSI_PROPERTY_TES_VERTEX_ORDER_CW,
+ !info->tess.ccw);
+ ureg_property(ureg, TGSI_PROPERTY_TES_POINT_MODE,
+ info->tess.point_mode);
+}
+
+static void
+ureg_setup_geometry_shader(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ ureg_property(ureg, TGSI_PROPERTY_GS_INPUT_PRIM,
+ info->gs.input_primitive);
+ ureg_property(ureg, TGSI_PROPERTY_GS_OUTPUT_PRIM,
+ info->gs.output_primitive);
+ ureg_property(ureg, TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES,
+ info->gs.vertices_out);
+ ureg_property(ureg, TGSI_PROPERTY_GS_INVOCATIONS,
+ info->gs.invocations);
+}
+
+static void
+ureg_setup_fragment_shader(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ if (info->fs.early_fragment_tests || info->fs.post_depth_coverage) {
+ ureg_property(ureg, TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL, 1);
+
+ if (info->fs.post_depth_coverage)
+ ureg_property(ureg, TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE, 1);
+ }
+
+ if (info->fs.depth_layout != FRAG_DEPTH_LAYOUT_NONE) {
+ switch (info->fs.depth_layout) {
+ case FRAG_DEPTH_LAYOUT_ANY:
+ ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
+ TGSI_FS_DEPTH_LAYOUT_ANY);
+ break;
+ case FRAG_DEPTH_LAYOUT_GREATER:
+ ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
+ TGSI_FS_DEPTH_LAYOUT_GREATER);
+ break;
+ case FRAG_DEPTH_LAYOUT_LESS:
+ ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
+ TGSI_FS_DEPTH_LAYOUT_LESS);
+ break;
+ case FRAG_DEPTH_LAYOUT_UNCHANGED:
+ ureg_property(ureg, TGSI_PROPERTY_FS_DEPTH_LAYOUT,
+ TGSI_FS_DEPTH_LAYOUT_UNCHANGED);
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+static void
+ureg_setup_compute_shader(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH,
+ info->cs.local_size[0]);
+ ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT,
+ info->cs.local_size[1]);
+ ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH,
+ info->cs.local_size[2]);
+
+ if (info->cs.shared_size)
+ ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
+}
+
+void
+ureg_setup_shader_info(struct ureg_program *ureg,
+ const struct shader_info *info)
+{
+ if (info->layer_viewport_relative)
+ ureg_property(ureg, TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE, 1);
+
+ switch (info->stage) {
+ case MESA_SHADER_VERTEX:
+ ureg_setup_clipdist_info(ureg, info);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ ureg_setup_tess_ctrl_shader(ureg, info);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ ureg_setup_tess_eval_shader(ureg, info);
+ ureg_setup_clipdist_info(ureg, info);
+ ureg_set_next_shader_processor(ureg, pipe_shader_type_from_mesa(info->next_stage));
+ break;
+ case MESA_SHADER_GEOMETRY:
+ ureg_setup_geometry_shader(ureg, info);
+ ureg_setup_clipdist_info(ureg, info);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ ureg_setup_fragment_shader(ureg, info);
+ break;
+ case MESA_SHADER_COMPUTE:
+ ureg_setup_compute_shader(ureg, info);
+ break;
+ default:
+ break;
+ }
+}
+
void ureg_destroy( struct ureg_program *ureg )
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(ureg->domain); i++) {
- if (ureg->domain[i].tokens &&
+ if (ureg->domain[i].tokens &&
ureg->domain[i].tokens != error_tokens)
FREE(ureg->domain[i].tokens);
}