fi
AC_CHECK_HEADER([xlocale.h], [DEFINES="$DEFINES -DHAVE_XLOCALE_H"])
+ AC_CHECK_HEADER([sys/sysctl.h], [DEFINES="$DEFINES -DHAVE_SYS_SYSCTL_H"])
AC_CHECK_FUNC([strtof], [DEFINES="$DEFINES -DHAVE_STRTOF"])
dnl Check to see if dlopen is in default libraries (like Solaris, which
[enable_opengl="$enableval"],
[enable_opengl=yes])
AC_ARG_ENABLE([gles1],
- [AS_HELP_STRING([--enable-gles1],
- [enable support for OpenGL ES 1.x API @<:@default=disabled@:>@])],
+ [AS_HELP_STRING([--disable-gles1],
+ [disable support for OpenGL ES 1.x API @<:@default=enabled@:>@])],
[enable_gles1="$enableval"],
- [enable_gles1=no])
+ [enable_gles1=yes])
AC_ARG_ENABLE([gles2],
- [AS_HELP_STRING([--enable-gles2],
- [enable support for OpenGL ES 2.x API @<:@default=disabled@:>@])],
+ [AS_HELP_STRING([--disable-gles2],
+ [disable support for OpenGL ES 2.x API @<:@default=enabled@:>@])],
[enable_gles2="$enableval"],
- [enable_gles2=no])
+ [enable_gles2=yes])
AC_ARG_ENABLE([dri],
[AS_HELP_STRING([--enable-dri],
;;
esac
- # Building Xlib-GLX requires shared glapi to be disabled.
- if test "x$enable_xlib_glx" = xyes; then
- AC_MSG_NOTICE([Shared GLAPI should not used with Xlib-GLX, disabling])
- enable_shared_glapi=no
- fi
-
AM_CONDITIONAL(HAVE_SHARED_GLAPI, test "x$enable_shared_glapi" = xyes)
# Build the pipe-drivers as separate libraries/modules.
[driglx_direct="$enableval"],
[driglx_direct="yes"])
+# Check for libcaca
+PKG_CHECK_EXISTS([caca], [have_libcaca=yes], [have_libcaca=no])
+AM_CONDITIONAL([HAVE_LIBCACA], [test x$have_libcaca = xyes])
+
dnl
dnl libGL configuration per driver
dnl
fi
if test "x$enable_dri" = xyes; then
- GBM_BACKEND_DIRS="$GBM_BACKEND_DIRS dri"
if test "x$enable_shared_glapi" = xno; then
AC_MSG_ERROR([gbm_dri requires --enable-shared-glapi])
fi
AC_SUBST([GBM_PC_REQ_PRIV])
AC_SUBST([GBM_PC_LIB_PRIV])
+AM_CONDITIONAL(HAVE_VULKAN, true)
+
dnl
dnl EGL configuration
dnl
if test "$enable_static" != yes; then
if test "x$enable_dri" = xyes; then
- HAVE_EGL_DRIVER_DRI2=1
- fi
+ HAVE_EGL_DRIVER_DRI2=1
+ if test "x$enable_shared_glapi" = xno; then
+ AC_MSG_ERROR([egl_dri2 requires --enable-shared-glapi])
+ fi
+ else
+ # Avoid building an "empty" libEGL. Drop/update this
+ # when other backends (haiku?) come along.
+ AC_MSG_ERROR([egl requires --enable-dri])
+ fi
fi
fi
AC_MSG_ERROR([EGL platform drm requires libdrm >= $LIBDRM_REQUIRED])
;;
+ surfaceless)
+ test "x$have_libdrm" != xyes &&
+ AC_MSG_ERROR([EGL platform surfaceless requires libdrm >= $LIBDRM_REQUIRED])
+ ;;
+
android|gdi|null)
;;
AM_CONDITIONAL(HAVE_EGL_PLATFORM_X11, echo "$egl_platforms" | grep -q 'x11')
AM_CONDITIONAL(HAVE_EGL_PLATFORM_WAYLAND, echo "$egl_platforms" | grep -q 'wayland')
AM_CONDITIONAL(HAVE_EGL_PLATFORM_DRM, echo "$egl_platforms" | grep -q 'drm')
+ AM_CONDITIONAL(HAVE_EGL_PLATFORM_SURFACELESS, echo "$egl_platforms" | grep -q 'surfaceless')
AM_CONDITIONAL(HAVE_EGL_PLATFORM_NULL, echo "$egl_platforms" | grep -q 'null')
AM_CONDITIONAL(HAVE_EGL_DRIVER_DRI2, test "x$HAVE_EGL_DRIVER_DRI2" != "x")
AC_MSG_ERROR([LLVM $LLVM_REQUIRED_VERSION_MAJOR.$LLVM_REQUIRED_VERSION_MINOR or newer is required])
fi
- LLVM_COMPONENTS="engine bitwriter"
- if $LLVM_CONFIG --components | grep -qw 'mcjit'; then
- LLVM_COMPONENTS="${LLVM_COMPONENTS} mcjit"
- fi
+ LLVM_COMPONENTS="engine bitwriter mcjit mcdisassembler"
if test "x$enable_opencl" = xyes; then
llvm_check_version_for "3" "5" "0" "opencl"
LLVM_COMPONENTS="${LLVM_COMPONENTS} all-targets ipo linker instrumentation"
LLVM_COMPONENTS="${LLVM_COMPONENTS} irreader option objcarcopts profiledata"
fi
- DEFINES="${DEFINES} -DHAVE_LLVM=0x0$LLVM_VERSION_INT -DLLVM_VERSION_PATCH=$LLVM_VERSION_PATCH"
+ DEFINES="${DEFINES} -DHAVE_LLVM=0x0$LLVM_VERSION_INT -DMESA_LLVM_VERSION_PATCH=$LLVM_VERSION_PATCH"
MESA_LLVM=1
dnl Check for Clang internal headers
}
radeon_llvm_check() {
+ if test ${LLVM_VERSION_INT} -lt 307; then
+ amdgpu_llvm_target_name='r600'
+ else
+ amdgpu_llvm_target_name='amdgpu'
+ fi
if test "x$enable_gallium_llvm" != "xyes"; then
AC_MSG_ERROR([--enable-gallium-llvm is required when building $1])
fi
llvm_check_version_for "3" "4" "2" $1
- if test true && $LLVM_CONFIG --targets-built | grep -qvw 'R600' ; then
- AC_MSG_ERROR([LLVM R600 Target not enabled. You can enable it when building the LLVM
- sources with the --enable-experimental-targets=R600
- configure flag])
+ if test true && $LLVM_CONFIG --targets-built | grep -iqvw $amdgpu_llvm_target_name ; then
+ AC_MSG_ERROR([LLVM $amdgpu_llvm_target_name not enabled in your LLVM build.])
fi
- LLVM_COMPONENTS="${LLVM_COMPONENTS} r600 bitreader ipo"
+ LLVM_COMPONENTS="${LLVM_COMPONENTS} $amdgpu_llvm_target_name bitreader ipo"
NEED_RADEON_LLVM=yes
if test "x$have_libelf" != xyes; then
AC_MSG_ERROR([$1 requires libelf when using llvm])
AC_SUBST([XA_TINY], $XA_TINY)
AC_SUBST([XA_VERSION], "$XA_MAJOR.$XA_MINOR.$XA_TINY")
+PKG_CHECK_MODULES(VALGRIND, [valgrind],
+ [have_valgrind=yes], [have_valgrind=no])
+if test "x$have_valgrind" = "xyes"; then
+ AC_DEFINE([HAVE_VALGRIND], 1,
+ [Use valgrind intrinsics to suppress false warnings])
+fi
+
dnl Restore LDFLAGS and CPPFLAGS
LDFLAGS="$_SAVE_LDFLAGS"
CPPFLAGS="$_SAVE_CPPFLAGS"
src/gallium/drivers/svga/Makefile
src/gallium/drivers/trace/Makefile
src/gallium/drivers/vc4/Makefile
- src/gallium/drivers/vc4/kernel/Makefile
src/gallium/state_trackers/clover/Makefile
src/gallium/state_trackers/dri/Makefile
src/gallium/state_trackers/glx/xlib/Makefile
src/mesa/drivers/osmesa/osmesa.pc
src/mesa/drivers/x11/Makefile
src/mesa/main/tests/Makefile
+ src/vulkan/Makefile
src/util/Makefile
src/util/tests/hash_table/Makefile])
AM_CFLAGS = $(VISIBILITY_CFLAGS)
AM_CXXFLAGS = $(VISIBILITY_CXXFLAGS)
+if HAVE_VULKAN
+SUBDIRS += vulkan
+endif
+
AM_CPPFLAGS = \
-I$(top_srcdir)/include/ \
-I$(top_srcdir)/src/mapi/ \
libglsl_util_la_SOURCES = \
mesa/main/imports.c \
mesa/program/prog_hash_table.c \
- mesa/program/symbol_table.c
+ mesa/program/symbol_table.c \
+ mesa/program/dummy_errors.c
tests/sampler-types-test \
tests/uniform-initializer-test
-noinst_PROGRAMS = glsl_compiler
+noinst_PROGRAMS = glsl_compiler spirv2nir
tests_blob_test_SOURCES = \
tests/blob_test.c
tests/builtin_variable_test.cpp \
tests/invalidate_locations_test.cpp \
tests/general_ir_test.cpp \
- tests/varyings_test.cpp \
- tests/common.c
+ tests/varyings_test.cpp
tests_general_ir_test_CFLAGS = \
$(PTHREAD_CFLAGS)
tests_general_ir_test_LDADD = \
tests/copy_constant_to_storage_tests.cpp \
tests/set_uniform_initializer_tests.cpp \
tests/uniform_initializer_utils.cpp \
- tests/uniform_initializer_utils.h \
- tests/common.c
+ tests/uniform_initializer_utils.h
tests_uniform_initializer_test_CFLAGS = \
$(PTHREAD_CFLAGS)
tests_uniform_initializer_test_LDADD = \
$(PTHREAD_LIBS)
tests_sampler_types_test_SOURCES = \
- tests/sampler_types_test.cpp \
- tests/common.c
+ tests/sampler_types_test.cpp
tests_sampler_types_test_CFLAGS = \
$(PTHREAD_CFLAGS)
tests_sampler_types_test_LDADD = \
$(LIBGLCPP_FILES)
glcpp_glcpp_SOURCES = \
- glcpp/glcpp.c \
- tests/common.c
+ glcpp/glcpp.c
glcpp_glcpp_LDADD = \
libglcpp.la \
$(top_builddir)/src/libglsl_util.la \
$(top_builddir)/src/libglsl_util.la \
$(PTHREAD_LIBS)
+spirv2nir_SOURCES = \
+ standalone_scaffolding.cpp \
+ standalone_scaffolding.h \
+ nir/spirv2nir.c
+
+spirv2nir_LDADD = \
+ libglsl.la \
+ $(top_builddir)/src/libglsl_util.la \
+ $(PTHREAD_LIBS)
+
glsl_test_SOURCES = \
standalone_scaffolding.cpp \
- tests/common.c \
test.cpp \
test_optpass.cpp \
test_optpass.h
$(RM) glcpp/tests/subtest*/*.out
nir/nir_builder_opcodes.h: nir/nir_opcodes.py nir/nir_builder_opcodes_h.py
- $(MKDIR_P) nir; \
- $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_builder_opcodes_h.py > $@
+ $(AM_V_at)$(MKDIR_P) nir
+ $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_builder_opcodes_h.py > $@
nir/nir_constant_expressions.c: nir/nir_opcodes.py nir/nir_constant_expressions.py nir/nir_constant_expressions.h
- $(MKDIR_P) nir; \
- $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_constant_expressions.py > $@
+ $(AM_V_at)$(MKDIR_P) nir
+ $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_constant_expressions.py > $@
nir/nir_opcodes.h: nir/nir_opcodes.py nir/nir_opcodes_h.py
- $(MKDIR_P) nir; \
- $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_h.py > $@
+ $(AM_V_at)$(MKDIR_P) nir
+ $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_h.py > $@
nir/nir_opcodes.c: nir/nir_opcodes.py nir/nir_opcodes_c.py
- $(MKDIR_P) nir; \
- $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_c.py > $@
+ $(AM_V_at)$(MKDIR_P) nir
+ $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opcodes_c.py > $@
nir/nir_opt_algebraic.c: nir/nir_opt_algebraic.py nir/nir_algebraic.py
- $(MKDIR_P) nir; \
- $(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opt_algebraic.py > $@
+ $(AM_V_at)$(MKDIR_P) nir
+ $(AM_V_GEN)$(PYTHON2) $(PYTHON_FLAGS) $(srcdir)/nir/nir_opt_algebraic.py > $@
* is handled by ir_dereference::is_lvalue.
*/
if (lhs_type->is_unsized_array() && rhs->type->is_array()
- && (lhs_type->element_type() == rhs->type->element_type())) {
+ && (lhs_type->fields.array == rhs->type->fields.array)) {
if (is_initializer) {
return rhs;
} else {
var->data.max_array_access);
}
- var->type = glsl_type::get_array_instance(lhs->type->element_type(),
+ var->type = glsl_type::get_array_instance(lhs->type->fields.array,
rhs->type->array_size());
d->type = var->type;
}
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_ATOMIC_UINT:
/* I assume a comparison of a struct containing a sampler just
* ignores the sampler present in the type.
* with an array of size N, all elements of the array from binding
* through binding + N - 1 must be within this range."
*/
- unsigned limit = ctx->Const.Program[state->stage].MaxTextureImageUnits;
+ unsigned limit = ctx->Const.MaxCombinedTextureImageUnits;
if (max_index >= limit) {
_mesa_glsl_error(loc, state, "layout(binding = %d) for %d samplers "
struct _mesa_glsl_parse_state *state,
YYLTYPE *loc)
{
- const glsl_type *base_type =
- (var->type->is_array() ? var->type->element_type() : var->type);
+ const glsl_type *base_type = var->type->without_array();
if (base_type->is_image()) {
if (var->data.mode != ir_var_uniform &&
state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers;
}
- if (qual->flags.q.explicit_location) {
+ if (qual->flags.q.vk_set) {
+ if (!qual->flags.q.explicit_binding)
+ _mesa_glsl_error(loc, state,
+ "Vulkan descriptor set layout requires both set "
+ "and binding qualifiers");
+
+ var->data.vk_set = true;
+ var->data.set = qual->set;
+ var->data.binding = qual->binding;
+ } else if (qual->flags.q.explicit_location) {
validate_explicit_location(qual, var, state, loc);
} else if (qual->flags.q.explicit_index) {
_mesa_glsl_error(loc, state, "explicit index requires explicit location");
* GL_ARB_conservative_depth
* GL_ARB_gpu_shader5
* GL_ARB_separate_shader_objects
- * GL_ARB_tesselation_shader
+ * GL_ARB_tessellation_shader
* GL_ARB_transform_feedback3
* GL_ARB_uniform_buffer_object
*
* type and specify a size."
*/
if (earlier->type->is_unsized_array() && var->type->is_array()
- && (var->type->element_type() == earlier->type->element_type())) {
+ && (var->type->fields.array == earlier->type->fields.array)) {
/* FINISHME: This doesn't match the qualifiers on the two
* FINISHME: declarations. It's not 100% clear whether this is
* FINISHME: required or not.
}
handle_geometry_shader_input_decl(state, loc, var);
+ } else if (state->stage == MESA_SHADER_FRAGMENT) {
+ /* From section 4.3.4 (Input Variables) of the GLSL ES 3.10 spec:
+ *
+ * It is a compile-time error to declare a fragment shader
+ * input with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * An array of arrays
+ * * An array of structures
+ * * A structure containing an array
+ * * A structure containing a structure
+ */
+ if (state->es_shader) {
+ const glsl_type *check_type = var->type->without_array();
+ if (check_type->is_boolean() ||
+ check_type->contains_opaque()) {
+ _mesa_glsl_error(&loc, state,
+ "fragment shader input cannot have type %s",
+ check_type->name);
+ }
+ if (var->type->is_array() &&
+ var->type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader output "
+ "cannot have an array of arrays",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (var->type->is_array() &&
+ var->type->fields.array->is_record()) {
+ _mesa_glsl_error(&loc, state,
+ "fragment shader input "
+ "cannot have an array of structs");
+ }
+ if (var->type->is_record()) {
+ for (unsigned i = 0; i < var->type->length; i++) {
+ if (var->type->fields.structure[i].type->is_array() ||
+ var->type->fields.structure[i].type->is_record())
+ _mesa_glsl_error(&loc, state,
+ "fragement shader input cannot have "
+ "a struct that contains an "
+ "array or struct");
+ }
+ }
+ }
}
} else if (var->data.mode == ir_var_shader_out) {
const glsl_type *check_type = var->type->without_array();
if (check_type->is_record() || check_type->is_matrix())
_mesa_glsl_error(&loc, state,
"fragment shader output "
- "cannot have struct or array type");
+ "cannot have struct or matrix type");
switch (check_type->base_type) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
"type %s", check_type->name);
}
}
+
+ /* From section 4.3.6 (Output Variables) of the GLSL ES 3.10 spec:
+ *
+ * It is a compile-time error to declare a vertex shader output
+ * with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * An array of arrays
+ * * An array of structures
+ * * A structure containing an array
+ * * A structure containing a structure
+ *
+ * It is a compile-time error to declare a fragment shader output
+ * with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * A matrix
+ * * A structure
+ * * An array of array
+ */
+ if (state->es_shader) {
+ if (var->type->is_array() &&
+ var->type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader output "
+ "cannot have an array of arrays",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (state->stage == MESA_SHADER_VERTEX) {
+ if (var->type->is_array() &&
+ var->type->fields.array->is_record()) {
+ _mesa_glsl_error(&loc, state,
+ "vertex shader output "
+ "cannot have an array of structs");
+ }
+ if (var->type->is_record()) {
+ for (unsigned i = 0; i < var->type->length; i++) {
+ if (var->type->fields.structure[i].type->is_array() ||
+ var->type->fields.structure[i].type->is_record())
+ _mesa_glsl_error(&loc, state,
+ "vertex shader output cannot have a "
+ "struct that contains an "
+ "array or struct");
+ }
+ }
+ }
+ }
}
/* Integer fragment inputs must be qualified with 'flat'. In GLSL ES,
const glsl_type *block_array_type =
process_array_type(&loc, block_type, this->array_specifier, state);
+ /* From section 4.3.9 (Interface Blocks) of the GLSL ES 3.10 spec:
+ *
+ * * Arrays of arrays of blocks are not allowed
+ */
+ if (state->es_shader && block_array_type->is_array() &&
+ block_array_type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "arrays of arrays interface blocks are "
+ "not allowed");
+ }
+
var = new(state) ir_variable(block_array_type,
this->instance_name,
var_mode);
var->data.explicit_binding = this->layout.flags.q.explicit_binding;
var->data.binding = this->layout.binding;
+ var->data.vk_set = this->layout.flags.q.vk_set;
+ var->data.set = this->layout.set;
+ var->data.binding = this->layout.binding;
+
state->symbols->add_variable(var);
instructions->push_tail(var);
}
var->data.explicit_binding = this->layout.flags.q.explicit_binding;
var->data.binding = this->layout.binding;
+ var->data.vk_set = this->layout.flags.q.vk_set;
+ var->data.set = this->layout.set;
+ var->data.binding = this->layout.binding;
+
state->symbols->add_variable(var);
instructions->push_tail(var);
}
this->extensions = &ctx->Extensions;
+ this->ARB_compute_shader_enable = true;
+
this->Const.MaxLights = ctx->Const.MaxLights;
this->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
this->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
/* If the aggregate is an array, recursively set its elements' types. */
if (type->is_array()) {
- /* Each array element has the type type->element_type().
+ /* Each array element has the type type->fields.array.
*
* E.g., if <type> if struct S[2] we want to set each element's type to
* struct S.
link);
if (expr->oper == ast_aggregate)
- _mesa_ast_set_aggregate_type(type->element_type(), expr);
+ _mesa_ast_set_aggregate_type(type->fields.array, expr);
}
/* If the aggregate is a struct, recursively set its fields' types. */
hash_table *glsl_type::array_types = NULL;
hash_table *glsl_type::record_types = NULL;
hash_table *glsl_type::interface_types = NULL;
+hash_table *glsl_type::function_types = NULL;
void *glsl_type::mem_ctx = NULL;
void
mtx_unlock(&glsl_type::mutex);
}
+glsl_type::glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params) :
+ gl_type(0),
+ base_type(GLSL_TYPE_FUNCTION),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ sampler_type(0), interface_packing(0),
+ vector_elements(0), matrix_columns(0),
+ length(num_params)
+{
+ unsigned int i;
+
+ mtx_lock(&glsl_type::mutex);
+
+ init_ralloc_type_ctx();
+
+ this->fields.parameters = rzalloc_array(this->mem_ctx,
+ glsl_function_param, num_params + 1);
+
+ /* We store the return type as the first parameter */
+ this->fields.parameters[0].type = return_type;
+ this->fields.parameters[0].in = false;
+ this->fields.parameters[0].out = true;
+
+ /* We store the i'th parameter in slot i+1 */
+ for (i = 0; i < length; i++) {
+ this->fields.parameters[i + 1].type = params[i].type;
+ this->fields.parameters[i + 1].in = params[i].in;
+ this->fields.parameters[i + 1].out = params[i].out;
+ }
+
+ mtx_unlock(&glsl_type::mutex);
+}
+
bool
glsl_type::contains_sampler() const
case GLSL_TYPE_ATOMIC_UINT:
return true;
case GLSL_TYPE_ARRAY:
- return element_type()->contains_opaque();
+ return fields.array->contains_opaque();
case GLSL_TYPE_STRUCT:
for (unsigned int i = 0; i < length; i++) {
if (fields.structure[i].type->contains_opaque())
}
+static int
+function_key_compare(const void *a, const void *b)
+{
+ const glsl_type *const key1 = (glsl_type *) a;
+ const glsl_type *const key2 = (glsl_type *) b;
+
+ if (key1->length != key2->length)
+ return 1;
+
+ return memcmp(key1->fields.parameters, key2->fields.parameters,
+ (key1->length + 1) * sizeof(*key1->fields.parameters));
+}
+
+
+static unsigned
+function_key_hash(const void *a)
+{
+ const glsl_type *const key = (glsl_type *) a;
+ char hash_key[128];
+ unsigned size = 0;
+
+ size = snprintf(hash_key, sizeof(hash_key), "%08x", key->length);
+
+ for (unsigned i = 0; i < key->length; i++) {
+ if (size >= sizeof(hash_key))
+ break;
+
+ size += snprintf(& hash_key[size], sizeof(hash_key) - size,
+ "%p", (void *) key->fields.structure[i].type);
+ }
+
+ return hash_table_string_hash(& hash_key);
+}
+
+const glsl_type *
+glsl_type::get_function_instance(const glsl_type *return_type,
+ const glsl_function_param *params,
+ unsigned num_params)
+{
+ const glsl_type key(return_type, params, num_params);
+
+ mtx_lock(&glsl_type::mutex);
+
+ if (function_types == NULL) {
+ function_types = hash_table_ctor(64, function_key_hash,
+ function_key_compare);
+ }
+
+ const glsl_type *t = (glsl_type *) hash_table_find(function_types, &key);
+ if (t == NULL) {
+ mtx_unlock(&glsl_type::mutex);
+ t = new glsl_type(return_type, params, num_params);
+ mtx_lock(&glsl_type::mutex);
+
+ hash_table_insert(function_types, (void *) t, t);
+ }
+
+ assert(t->base_type == GLSL_TYPE_FUNCTION);
+ assert(t->length == num_params);
+
+ mtx_unlock(&glsl_type::mutex);
+
+ return t;
+}
+
+
const glsl_type *
glsl_type::get_mul_type(const glsl_type *type_a, const glsl_type *type_b)
{
case GLSL_TYPE_IMAGE:
return 1;
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_ATOMIC_UINT:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ARRAY:
return this->length * this->fields.array->count_attribute_slots();
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_ATOMIC_UINT:
GLSL_TYPE_IMAGE,
GLSL_TYPE_ATOMIC_UINT,
GLSL_TYPE_STRUCT,
+ GLSL_TYPE_FUNCTION,
GLSL_TYPE_INTERFACE,
GLSL_TYPE_ARRAY,
GLSL_TYPE_VOID,
*/
union {
const struct glsl_type *array; /**< Type of array elements. */
- const struct glsl_type *parameters; /**< Parameters to function. */
+ struct glsl_function_param *parameters; /**< Parameters to function. */
struct glsl_struct_field *structure; /**< List of struct fields. */
} fields;
*/
const glsl_type *get_scalar_type() const;
- /**
- * Query the type of elements in an array
- *
- * \return
- * Pointer to the type of elements in the array for array types, or \c NULL
- * for non-array types.
- */
- const glsl_type *element_type() const
- {
- return is_array() ? fields.array : NULL;
- }
-
/**
* Get the instance of a built-in scalar, vector, or matrix type
*/
enum glsl_interface_packing packing,
const char *block_name);
+ /**
+ * Get the instance of a function type
+ */
+ static const glsl_type *get_function_instance(const struct glsl_type *return_type,
+ const glsl_function_param *parameters,
+ unsigned num_params);
+
/**
* Get the type resulting from a multiplication of \p type_a * \p type_b
*/
if (base_type == GLSL_TYPE_ATOMIC_UINT)
return ATOMIC_COUNTER_SIZE;
else if (is_array())
- return length * element_type()->atomic_size();
+ return length * fields.array->atomic_size();
else
return 0;
}
glsl_type(const glsl_struct_field *fields, unsigned num_fields,
enum glsl_interface_packing packing, const char *name);
+ /** Constructor for interface types */
+ glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params);
+
/** Constructor for array types */
glsl_type(const glsl_type *array, unsigned length);
/** Hash table containing the known interface types. */
static struct hash_table *interface_types;
+ /** Hash table containing the known function types. */
+ static struct hash_table *function_types;
+
static int record_key_compare(const void *a, const void *b);
static unsigned record_key_hash(const void *key);
/*@}*/
};
+#undef DECL_TYPE
+#undef STRUCT_TYPE
+#endif /* __cplusplus */
+
struct glsl_struct_field {
const struct glsl_type *type;
const char *name;
int stream;
};
+struct glsl_function_param {
+ const struct glsl_type *type;
+
+ bool in;
+ bool out;
+};
+
static inline unsigned int
glsl_align(unsigned int a, unsigned int align)
{
return (a + align - 1) / align * align;
}
-#undef DECL_TYPE
-#undef STRUCT_TYPE
-#endif /* __cplusplus */
-
#endif /* GLSL_TYPES_H */
ir_type_discard,
ir_type_emit_vertex,
ir_type_end_primitive,
+ ir_type_barrier,
ir_type_max, /**< maximum ir_type enum number, for validation */
ir_type_unset = ir_type_max
};
unsigned explicit_location:1;
unsigned explicit_index:1;
+ /**
+ * Do we have a Vulkan (group, index) qualifier for this variable?
+ */
+ unsigned vk_set:1;
+
/**
* Was an initial binding explicitly set in the shader?
*
* \note
* The GLSL spec only allows the values 0 or 1 for the index in \b dual
* source blending.
+ *
+ * This is now also used for the Vulkan descriptor set index.
*/
- unsigned index:1;
+ int16_t index;
/**
* \brief Layout qualifier for gl_FragDepth.
*/
int16_t binding;
+ /**
+ * Vulkan descriptor set for the resource.
+ */
+ int16_t set;
+
/**
* Storage location of the base of this variable
*
ir_rvalue *stream;
};
+ /**
+ * IR instruction for tessellation control and compute shader barrier.
+ */
+ class ir_barrier : public ir_instruction {
+ public:
+ ir_barrier()
+ : ir_instruction(ir_type_barrier)
+ {
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_barrier *clone(void *mem_ctx, struct hash_table *) const
+ {
+ return new(mem_ctx) ir_barrier();
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+ };
+
/*@}*/
/**
case GLSL_TYPE_IMAGE:
case GLSL_TYPE_ATOMIC_UINT:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
/* All other types should have already been filtered by other
set_sampler_binding(gl_shader_program *prog, const char *name, int binding)
{
struct gl_uniform_storage *const storage =
- get_storage(prog->UniformStorage, prog->NumUserUniformStorage, name);
+ get_storage(prog->UniformStorage, prog->NumUniformStorage, name);
if (storage == NULL) {
assert(storage != NULL);
struct gl_uniform_storage *const storage =
get_storage(prog->UniformStorage,
- prog->NumUserUniformStorage,
+ prog->NumUniformStorage,
name);
if (storage == NULL) {
assert(storage != NULL);
virtual void visit(ir_dereference_variable *);
virtual void visit(ir_dereference_record *);
virtual void visit(ir_dereference_array *);
+ virtual void visit(ir_barrier *);
void create_function(ir_function *ir);
}
var->data.index = ir->data.index;
+ var->data.descriptor_set = ir->data.set;
var->data.binding = ir->data.binding;
/* XXX Get rid of buffer_index */
var->data.atomic.buffer_index = ir->data.binding;
op = nir_intrinsic_atomic_counter_inc_var;
} else if (strcmp(ir->callee_name(), "__intrinsic_atomic_predecrement") == 0) {
op = nir_intrinsic_atomic_counter_dec_var;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_load") == 0) {
+ op = nir_intrinsic_image_load;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_store") == 0) {
+ op = nir_intrinsic_image_store;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_add") == 0) {
+ op = nir_intrinsic_image_atomic_add;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_min") == 0) {
+ op = nir_intrinsic_image_atomic_min;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_max") == 0) {
+ op = nir_intrinsic_image_atomic_max;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_and") == 0) {
+ op = nir_intrinsic_image_atomic_and;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_or") == 0) {
+ op = nir_intrinsic_image_atomic_or;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_xor") == 0) {
+ op = nir_intrinsic_image_atomic_xor;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_exchange") == 0) {
+ op = nir_intrinsic_image_atomic_exchange;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_image_atomic_comp_swap") == 0) {
+ op = nir_intrinsic_image_atomic_comp_swap;
+ } else if (strcmp(ir->callee_name(), "__intrinsic_memory_barrier") == 0) {
+ op = nir_intrinsic_memory_barrier;
} else {
unreachable("not reached");
}
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
- ir_dereference *param =
- (ir_dereference *) ir->actual_parameters.get_head();
- instr->variables[0] = evaluate_deref(&instr->instr, param);
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+
+ switch (op) {
+ case nir_intrinsic_atomic_counter_read_var:
+ case nir_intrinsic_atomic_counter_inc_var:
+ case nir_intrinsic_atomic_counter_dec_var: {
+ ir_dereference *param =
+ (ir_dereference *) ir->actual_parameters.get_head();
+ instr->variables[0] = evaluate_deref(&instr->instr, param);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+ break;
+ }
+ case nir_intrinsic_image_load:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_atomic_min:
+ case nir_intrinsic_image_atomic_max:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_comp_swap: {
+ nir_ssa_undef_instr *instr_undef =
+ nir_ssa_undef_instr_create(shader, 1);
+ nir_instr_insert_after_cf_list(this->cf_node_list,
+ &instr_undef->instr);
+
+ /* Set the image variable dereference. */
+ exec_node *param = ir->actual_parameters.get_head();
+ ir_dereference *image = (ir_dereference *)param;
+ const glsl_type *type =
+ image->variable_referenced()->type->without_array();
+
+ instr->variables[0] = evaluate_deref(&instr->instr, image);
+ param = param->get_next();
+
+ /* Set the address argument, extending the coordinate vector to four
+ * components.
+ */
+ const nir_src src_addr = evaluate_rvalue((ir_dereference *)param);
+ nir_alu_instr *instr_addr = nir_alu_instr_create(shader, nir_op_vec4);
+ nir_ssa_dest_init(&instr_addr->instr, &instr_addr->dest.dest, 4, NULL);
+
+ for (int i = 0; i < 4; i++) {
+ if (i < type->coordinate_components()) {
+ instr_addr->src[i].src = src_addr;
+ instr_addr->src[i].swizzle[0] = i;
+ } else {
+ instr_addr->src[i].src = nir_src_for_ssa(&instr_undef->def);
+ }
+ }
+
+ nir_instr_insert_after_cf_list(cf_node_list, &instr_addr->instr);
+ instr->src[0] = nir_src_for_ssa(&instr_addr->dest.dest.ssa);
+ param = param->get_next();
+
+ /* Set the sample argument, which is undefined for single-sample
+ * images.
+ */
+ if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
+ instr->src[1] = evaluate_rvalue((ir_dereference *)param);
+ param = param->get_next();
+ } else {
+ instr->src[1] = nir_src_for_ssa(&instr_undef->def);
+ }
+
+ /* Set the intrinsic parameters. */
+ if (!param->is_tail_sentinel()) {
+ instr->src[2] = evaluate_rvalue((ir_dereference *)param);
+ param = param->get_next();
+ }
+
+ if (!param->is_tail_sentinel()) {
+ instr->src[3] = evaluate_rvalue((ir_dereference *)param);
+ param = param->get_next();
+ }
+
+ /* Set the intrinsic destination. */
+ if (ir->return_deref)
+ nir_ssa_dest_init(&instr->instr, &instr->dest,
+ ir->return_deref->type->vector_elements, NULL);
+ break;
+ }
+ case nir_intrinsic_memory_barrier:
+ break;
+ default:
+ unreachable("not reached");
+ }
nir_instr_insert_after_cf_list(this->cf_node_list, &instr->instr);
- nir_intrinsic_instr *store_instr =
- nir_intrinsic_instr_create(shader, nir_intrinsic_store_var);
- store_instr->num_components = 1;
+ if (ir->return_deref) {
+ nir_intrinsic_instr *store_instr =
+ nir_intrinsic_instr_create(shader, nir_intrinsic_store_var);
+ store_instr->num_components = ir->return_deref->type->vector_elements;
- store_instr->variables[0] = evaluate_deref(&store_instr->instr, ir->return_deref);
- store_instr->src[0].is_ssa = true;
- store_instr->src[0].ssa = &instr->dest.ssa;
+ store_instr->variables[0] =
+ evaluate_deref(&store_instr->instr, ir->return_deref);
+ store_instr->src[0] = nir_src_for_ssa(&instr->dest.ssa);
- nir_instr_insert_after_cf_list(this->cf_node_list, &store_instr->instr);
+ nir_instr_insert_after_cf_list(this->cf_node_list,
+ &store_instr->instr);
+ }
return;
}
}
nir_dest *dest = get_instr_dest(this->result);
-
assert(dest->is_ssa);
- nir_src src = NIR_SRC_INIT;
- src.is_ssa = true;
- src.ssa = &dest->ssa;
- return src;
+ return nir_src_for_ssa(&dest->ssa);
}
nir_alu_instr *
ralloc_steal(this->deref_tail, deref);
this->deref_tail = &deref->deref;
}
+
+ void
+ nir_visitor::visit(ir_barrier *ir)
+ {
+ nir_intrinsic_instr *instr =
+ nir_intrinsic_instr_create(this->shader, nir_intrinsic_barrier);
+ nir_instr_insert_after_cf_list(this->cf_node_list, &instr->instr);
+ }
return 0;
case GLSL_TYPE_IMAGE:
return 0;
+ case GLSL_TYPE_FUNCTION:
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_DOUBLE:
offset += intrin->variables[0]->var->data.driver_location;
load->const_index[0] = offset;
- load->const_index[1] = 1;
if (has_indirect)
load->src[0] = indirect;
offset += intrin->variables[0]->var->data.driver_location;
store->const_index[0] = offset;
- store->const_index[1] = 1;
nir_src_copy(&store->src[0], &intrin->src[0], state->mem_ctx);
#include "program/program.h"
}
+static void
+add_indirect_to_tex(nir_tex_instr *instr, nir_src indirect)
+{
+ /* First, we have to resize the array of texture sources */
+ nir_tex_src *new_srcs = rzalloc_array(instr, nir_tex_src,
+ instr->num_srcs + 1);
+
+ for (unsigned i = 0; i < instr->num_srcs; i++) {
+ new_srcs[i].src_type = instr->src[i].src_type;
+ nir_instr_move_src(&instr->instr, &new_srcs[i].src, &instr->src[i].src);
+ }
+
+ ralloc_free(instr->src);
+ instr->src = new_srcs;
+
+ /* Now we can go ahead and move the source over to being a
+ * first-class texture source.
+ */
+ instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
+ instr->num_srcs++;
+ nir_instr_rewrite_src(&instr->instr, &instr->src[instr->num_srcs - 1].src,
+ indirect);
+}
+
static unsigned
get_sampler_index(const struct gl_shader_program *shader_program,
gl_shader_stage stage, const char *name)
case nir_deref_type_array: {
nir_deref_array *deref_array = nir_deref_as_array(deref->child);
+ assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);
+
+ if (deref_array->deref.child) {
+ ralloc_asprintf_append(&name, "[%u]",
+ deref_array->deref_array_type == nir_deref_array_type_direct ?
+ deref_array->base_offset : 0);
+ } else {
+ assert(deref->child->type->base_type == GLSL_TYPE_SAMPLER);
+ instr->sampler_index = deref_array->base_offset;
+ }
+
/* XXX: We're assuming here that the indirect is the last array
* thing we have. This should be ok for now as we don't support
* arrays_of_arrays yet.
*/
-
- instr->sampler_index *= glsl_get_length(deref->type);
- switch (deref_array->deref_array_type) {
- case nir_deref_array_type_direct:
- instr->sampler_index += deref_array->base_offset;
- if (deref_array->deref.child)
- ralloc_asprintf_append(&name, "[%u]", deref_array->base_offset);
- break;
- case nir_deref_array_type_indirect: {
- add_indirect_to_tex(instr, deref_array->indirect);
- nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
- NIR_SRC_INIT);
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ /* First, we have to resize the array of texture sources */
+ nir_tex_src *new_srcs = rzalloc_array(instr, nir_tex_src,
+ instr->num_srcs + 1);
+
+ for (unsigned i = 0; i < instr->num_srcs; i++) {
+ new_srcs[i].src_type = instr->src[i].src_type;
+ nir_instr_move_src(&instr->instr, &new_srcs[i].src,
+ &instr->src[i].src);
+ }
+
+ ralloc_free(instr->src);
+ instr->src = new_srcs;
+
+ /* Now we can go ahead and move the source over to being a
+ * first-class texture source.
+ */
+ instr->src[instr->num_srcs].src_type = nir_tex_src_sampler_offset;
+ instr->num_srcs++;
+ nir_instr_move_src(&instr->instr,
+ &instr->src[instr->num_srcs - 1].src,
+ &deref_array->indirect);
instr->sampler_array_size = glsl_get_length(deref->type);
-
- if (deref_array->deref.child)
- ralloc_strcat(&name, "[0]");
- break;
- }
-
- case nir_deref_array_type_wildcard:
- unreachable("Cannot copy samplers");
- default:
- unreachable("Invalid deref array type");
}
break;
}
lower_impl(overload->impl, shader_program, stage);
}
}
+
+static bool
+lower_samplers_for_vk_block(nir_block *block, void *data)
+{
+ nir_foreach_instr(block, instr) {
+ if (instr->type != nir_instr_type_tex)
+ continue;
+
+ nir_tex_instr *tex = nir_instr_as_tex(instr);
+
+ assert(tex->sampler);
+
+ tex->sampler_set = tex->sampler->var->data.descriptor_set;
+ tex->sampler_index = tex->sampler->var->data.binding;
+
+ if (tex->sampler->deref.child) {
+ assert(tex->sampler->deref.child->deref_type == nir_deref_type_array);
+ nir_deref_array *arr = nir_deref_as_array(tex->sampler->deref.child);
+
+ /* Only one-level arrays are allowed in vulkan */
+ assert(arr->deref.child == NULL);
+
+ tex->sampler_index += arr->base_offset;
+ if (arr->deref_array_type == nir_deref_array_type_indirect) {
+ add_indirect_to_tex(tex, arr->indirect);
+ nir_instr_rewrite_src(instr, &arr->indirect, NIR_SRC_INIT);
+
+ tex->sampler_array_size = glsl_get_length(tex->sampler->deref.type);
+ }
+ }
+
+ tex->sampler = NULL;
+ }
+
+ return true;
+}
+
+extern "C" void
+nir_lower_samplers_for_vk(nir_shader *shader)
+{
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl) {
+ nir_foreach_block(overload->impl, lower_samplers_for_vk_block, NULL);
+ }
+ }
+}
#include <string.h>
#include "util/ralloc.h"
+extern "C" void
+_mesa_error_no_memory(const char *caller)
+{
+ fprintf(stderr, "Mesa error: out of memory in %s", caller);
+}
+
void
_mesa_warning(struct gl_context *ctx, const char *fmt, ...)
{
{
unsigned i;
- shProg->NumUserUniformStorage = 0;
+ shProg->NumUniformStorage = 0;
shProg->UniformStorage = NULL;
shProg->NumUniformRemapTable = 0;
shProg->UniformRemapTable = NULL;
AM_CXXFLAGS = $(AM_CFLAGS)
-noinst_LTLIBRARIES = libi965_dri.la
+noinst_LTLIBRARIES = libi965_dri.la libi965_compiler.la
libi965_dri_la_SOURCES = $(i965_FILES)
libi965_dri_la_LIBADD = $(INTEL_LIBS)
TEST_LIBS = \
libi965_dri.la \
../common/libdricommon.la \
+ ../common/libxmlconfig.la \
../common/libmegadriver_stub.la \
../../../libmesa.la \
$(DRI_LIB_DEPS) \
$(CLOCK_LIB) \
../common/libdri_test_stubs.la
+libi965_compiler_la_SOURCES = $(i965_FILES)
+libi965_compiler_la_LIBADD = $(INTEL_LIBS) \
+ ../common/libdricommon.la \
+ ../common/libmegadriver_stub.la \
+ ../../../libmesa.la \
+ $(DRI_LIB_DEPS) \
+ $(CLOCK_LIB) \
+ ../common/libdri_test_stubs.la -lm
+
TESTS = \
test_fs_cmod_propagation \
test_fs_saturate_propagation \
#include "brw_context.h"
#include "brw_defines.h"
+ #include "brw_shader.h"
#include "brw_draw.h"
#include "brw_state.h"
#include "tnl/t_pipeline.h"
#include "util/ralloc.h"
- #include "glsl/nir/nir.h"
-
/***************************************
* Mesa's Driver Functions
***************************************/
else
gen4_init_queryobj_functions(functions);
brw_init_compute_functions(functions);
+ if (brw->gen >= 7)
+ brw_init_conditional_render_functions(functions);
functions->QuerySamplesForFormat = brw_query_samples_for_format;
functions->GetSamplePosition = gen6_get_sample_position;
}
-static void
+void
brw_initialize_context_constants(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
int max_samples;
const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
const int clamp_max_samples =
- driQueryOptioni(&brw->optionCache, "clamp_max_samples");
+ brw->optionCache.info != NULL ?
+ driQueryOptioni(&brw->optionCache, "clamp_max_samples") : -1;
if (clamp_max_samples < 0) {
max_samples = msaa_modes[0];
ctx->Const.MinLineWidth = 1.0;
ctx->Const.MinLineWidthAA = 1.0;
- if (brw->gen >= 9 || brw->is_cherryview) {
- ctx->Const.MaxLineWidth = 40.0;
- ctx->Const.MaxLineWidthAA = 40.0;
- ctx->Const.LineWidthGranularity = 0.125;
- } else if (brw->gen >= 6) {
+ if (brw->gen >= 6) {
ctx->Const.MaxLineWidth = 7.375;
ctx->Const.MaxLineWidthAA = 7.375;
ctx->Const.LineWidthGranularity = 0.125;
ctx->Const.LineWidthGranularity = 0.5;
}
+ /* For non-antialiased lines, we have to round the line width to the
+ * nearest whole number. Make sure that we don't advertise a line
+ * width that, when rounded, will be beyond the actual hardware
+ * maximum.
+ */
+ assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
+
ctx->Const.MinPointSize = 1.0;
ctx->Const.MinPointSizeAA = 1.0;
ctx->Const.MaxPointSize = 255.0;
*/
ctx->Const.UniformBufferOffsetAlignment = 16;
ctx->Const.TextureBufferOffsetAlignment = 16;
+ ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
if (brw->gen >= 6) {
ctx->Const.MaxVarying = 32;
ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
}
- static const nir_shader_compiler_options nir_options = {
- .native_integers = true,
- /* In order to help allow for better CSE at the NIR level we tell NIR
- * to split all ffma instructions during opt_algebraic and we then
- * re-combine them as a later step.
- */
- .lower_ffma = true,
- .lower_sub = true,
- };
-
/* We want the GLSL compiler to emit code that uses condition codes */
for (int i = 0; i < MESA_SHADER_STAGES; i++) {
- ctx->Const.ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
- ctx->Const.ShaderCompilerOptions[i].EmitCondCodes = true;
- ctx->Const.ShaderCompilerOptions[i].EmitNoNoise = true;
- ctx->Const.ShaderCompilerOptions[i].EmitNoMainReturn = true;
- ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectInput = true;
- ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectOutput =
- (i == MESA_SHADER_FRAGMENT);
- ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectTemp =
- (i == MESA_SHADER_FRAGMENT);
- ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectUniform = false;
- ctx->Const.ShaderCompilerOptions[i].LowerClipDistance = true;
+ ctx->Const.ShaderCompilerOptions[i] =
+ brw->intelScreen->compiler->glsl_compiler_options[i];
}
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = true;
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
-
- if (brw->scalar_vs) {
- /* If we're using the scalar backend for vertex shaders, we need to
- * configure these accordingly.
- */
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].OptimizeForAOS = false;
-
- if (brw_env_var_as_boolean("INTEL_USE_NIR", true))
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].NirOptions = &nir_options;
- }
-
- if (brw_env_var_as_boolean("INTEL_USE_NIR", true))
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions = &nir_options;
-
- ctx->Const.ShaderCompilerOptions[MESA_SHADER_COMPUTE].NirOptions = &nir_options;
-
/* ARB_viewport_array */
if (brw->gen >= 6 && ctx->API == API_OPENGL_CORE) {
ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
/* ARB_gpu_shader5 */
if (brw->gen >= 7)
ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
+
+ /* ARB_framebuffer_no_attachments */
+ ctx->Const.MaxFramebufferWidth = ctx->Const.MaxViewportWidth;
+ ctx->Const.MaxFramebufferHeight = ctx->Const.MaxViewportHeight;
+ ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
+ ctx->Const.MaxFramebufferSamples = max_samples;
}
static void
_mesa_meta_init(ctx);
brw_process_driconf_options(brw);
- brw_process_intel_debug_variable(brw);
- if (brw->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
- brw->scalar_vs = true;
+ if (INTEL_DEBUG & DEBUG_PERF)
+ brw->perf_debug = true;
brw_initialize_context_constants(brw);
intel_batchbuffer_init(brw);
+#if 0
if (brw->gen >= 6) {
/* Create a new hardware context. Using a hardware context means that
* our GPU state will be saved/restored on context switch, allowing us
}
brw_init_state(brw);
+#endif
intelInitExtensions(ctx);
brw->gs.enabled = false;
brw->sf.viewport_transform_enable = true;
+ brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
+
ctx->VertexProgram._MaintainTnlProgram = true;
ctx->FragmentProgram._MaintainTexEnvProgram = true;
_mesa_compute_version(ctx);
+#if 0
_mesa_initialize_dispatch_tables(ctx);
_mesa_initialize_vbo_vtxfmt(ctx);
+#endif
if (ctx->Extensions.AMD_performance_monitor) {
brw_init_performance_monitors(brw);
/** @} */
} binding_table;
+ uint32_t *map_entries;
+ struct {
+ uint32_t index_count;
+ uint32_t *index;
+ } bind_map[8]; /* MAX_SETS from vulkan/private.h */
+
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
unsigned svbi_postincrement_value;
};
+ enum shader_dispatch_mode {
+ DISPATCH_MODE_4X1_SINGLE = 0,
+ DISPATCH_MODE_4X2_DUAL_INSTANCE = 1,
+ DISPATCH_MODE_4X2_DUAL_OBJECT = 2,
+ DISPATCH_MODE_SIMD8 = 3,
+ };
/* Note: brw_vue_prog_data_compare() must be updated when adding fields to
* this struct!
*/
GLuint urb_entry_size;
- bool simd8;
+ enum shader_dispatch_mode dispatch_mode;
};
int invocations;
- /**
- * Dispatch mode, can be any of:
- * GEN7_GS_DISPATCH_MODE_DUAL_OBJECT
- * GEN7_GS_DISPATCH_MODE_DUAL_INSTANCE
- * GEN7_GS_DISPATCH_MODE_SINGLE
- */
- int dispatch_mode;
-
/**
* Gen6 transform feedback enabled flag.
*/
enum shader_time_shader_type {
ST_NONE,
ST_VS,
- ST_VS_WRITTEN,
- ST_VS_RESET,
ST_GS,
- ST_GS_WRITTEN,
- ST_GS_RESET,
ST_FS8,
- ST_FS8_WRITTEN,
- ST_FS8_RESET,
ST_FS16,
- ST_FS16_WRITTEN,
- ST_FS16_RESET,
ST_CS,
- ST_CS_WRITTEN,
- ST_CS_RESET,
};
struct brw_vertex_buffer {
uint32_t sampler_offset;
};
+ enum brw_predicate_state {
+ /* The first two states are used if we can determine whether to draw
+ * without having to look at the values in the query object buffer. This
+ * will happen if there is no conditional render in progress, if the query
+ * object is already completed or if something else has already added
+ * samples to the preliminary result such as via a BLT command.
+ */
+ BRW_PREDICATE_STATE_RENDER,
+ BRW_PREDICATE_STATE_DONT_RENDER,
+ /* In this case whether to draw or not depends on the result of an
+ * MI_PREDICATE command so the predicate enable bit needs to be checked.
+ */
+ BRW_PREDICATE_STATE_USE_BIT
+ };
+
+ struct shader_times;
/**
* brw_context is derived from gl_context.
bool has_pln;
bool no_simd8;
bool use_rep_send;
- bool scalar_vs;
/**
* Some versions of Gen hardware don't do centroid interpolation correctly
bool begin_emitted;
} query;
+ struct {
+ enum brw_predicate_state state;
+ bool supported;
+ } predicate;
+
struct {
/** A map from pipeline statistics counter IDs to MMIO addresses. */
const int *statistics_registers;
uint32_t offset;
uint32_t size;
enum aub_state_struct_type type;
+ int index;
} *state_batch_list;
int state_batch_count;
const char **names;
int *ids;
enum shader_time_shader_type *types;
- uint64_t *cumulative;
+ struct shader_times *cumulative;
int num_entries;
int max_entries;
double report_time;
void brw_store_register_mem64(struct brw_context *brw,
drm_intel_bo *bo, uint32_t reg, int idx);
+ /** brw_conditional_render.c */
+ void brw_init_conditional_render_functions(struct dd_function_table *functions);
+ bool brw_check_conditional_render(struct brw_context *brw);
+
/** intel_batchbuffer.c */
void brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
drm_intel_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
+ void brw_load_register_mem64(struct brw_context *brw,
+ uint32_t reg,
+ drm_intel_bo *bo,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t offset);
/*======================================================================
* brw_state_dump.c
struct brw_stage_state *stage_state,
enum aub_state_struct_type type);
+struct intel_screen *intel_screen_create(int fd);
+void intel_screen_destroy(struct intel_screen *screen);
+
+struct brw_context *intel_context_create(struct intel_screen *screen);
+void intel_context_destroy(struct brw_context *brw);
+
+void
+brw_initialize_context_constants(struct brw_context *brw);
+
+ bool
+ gen9_use_linear_1d_layout(const struct brw_context *brw,
+ const struct intel_mipmap_tree *mt);
+
#ifdef __cplusplus
}
#endif
}
-static const unsigned *
+const unsigned *
brw_cs_emit(struct brw_context *brw,
void *mem_ctx,
const struct brw_cs_prog_key *key,
cfg_t *cfg = NULL;
const char *fail_msg = NULL;
+ int st_index = -1;
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ st_index = brw_get_shader_time_index(brw, prog, &cp->Base, ST_CS);
+
/* Now the main event: Visit the shader IR and generate our CS IR for it.
*/
- fs_visitor v8(brw, mem_ctx, key, prog_data, prog, cp, 8);
+ fs_visitor v8(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+ &cp->Base, 8, st_index);
if (!v8.run_cs()) {
fail_msg = v8.fail_msg;
} else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
prog_data->simd_size = 8;
}
- fs_visitor v16(brw, mem_ctx, key, prog_data, prog, cp, 16);
+ fs_visitor v16(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+ &cp->Base, 16, st_index);
if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
!fail_msg && !v8.simd16_unsupported &&
local_workgroup_size <= 16 * brw->max_cs_threads) {
return NULL;
}
- fs_generator g(brw, mem_ctx, (void*) key, &prog_data->base, &cp->Base,
+ fs_generator g(brw->intelScreen->compiler, brw,
+ mem_ctx, (void*) key, &prog_data->base, &cp->Base,
v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
extern "C"
const struct brw_tracked_state brw_cs_state = {
- .dirty = {
- .mesa = 0,
- .brw = BRW_NEW_CS_PROG_DATA,
+ /* explicit initialisers aren't valid C++, comment
+ * them for documentation purposes */
+ /* .dirty = */{
+ /* .mesa = */ 0,
+ /* .brw = */ BRW_NEW_CS_PROG_DATA,
},
- .emit = brw_upload_cs_state
+ /* .emit = */ brw_upload_cs_state
};
fieldval & field ## _MASK; \
})
+ #define GET_BITS(data, high, low) ((data & INTEL_MASK((high), (low))) >> (low))
#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
#ifndef BRW_DEFINES_H
# define GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 15)
# define GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM (1 << 15)
# define GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE (1 << 10)
+ # define GEN7_3DPRIM_PREDICATE_ENABLE (1 << 8)
/* DW1 */
# define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL (0 << 8)
# define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM (1 << 8)
+#ifndef _3DPRIM_POINTLIST /* FIXME: Avoid clashing with defines from bdw_pack.h */
#define _3DPRIM_POINTLIST 0x01
#define _3DPRIM_LINELIST 0x02
#define _3DPRIM_LINESTRIP 0x03
#define _3DPRIM_LINESTRIP_BF 0x13
#define _3DPRIM_LINESTRIP_CONT_BF 0x14
#define _3DPRIM_TRIFAN_NOSTIPPLE 0x15
+#endif
/* We use this offset to be able to pass native primitive types in struct
* _mesa_prim::mode. Native primitive types are BRW_PRIM_OFFSET +
#define GEN7_SURFACE_ARYSPC_FULL (0 << 10)
#define GEN7_SURFACE_ARYSPC_LOD0 (1 << 10)
- /* Surface state DW0 */
+ /* Surface state DW1 */
#define GEN8_SURFACE_MOCS_SHIFT 24
#define GEN8_SURFACE_MOCS_MASK INTEL_MASK(30, 24)
+ #define GEN8_SURFACE_QPITCH_SHIFT 0
+ #define GEN8_SURFACE_QPITCH_MASK INTEL_MASK(14, 0)
/* Surface state DW2 */
#define BRW_SURFACE_HEIGHT_SHIFT 19
#define GEN7_SURFACE_MOCS_SHIFT 16
#define GEN7_SURFACE_MOCS_MASK INTEL_MASK(19, 16)
+ #define GEN9_SURFACE_TRMODE_SHIFT 18
+ #define GEN9_SURFACE_TRMODE_MASK INTEL_MASK(19, 18)
+ #define GEN9_SURFACE_TRMODE_NONE 0
+ #define GEN9_SURFACE_TRMODE_TILEYF 1
+ #define GEN9_SURFACE_TRMODE_TILEYS 2
+
+ #define GEN9_SURFACE_MIP_TAIL_START_LOD_SHIFT 8
+ #define GEN9_SURFACE_MIP_TAIL_START_LOD_MASK INTEL_MASK(11, 8)
+
/* Surface state DW6 */
#define GEN7_SURFACE_MCS_ENABLE (1 << 0)
#define GEN7_SURFACE_MCS_PITCH_SHIFT 3
#define GEN8_SURFACE_AUX_MODE_HIZ 3
/* Surface state DW7 */
+ #define GEN9_SURFACE_RT_COMPRESSION_SHIFT 30
+ #define GEN9_SURFACE_RT_COMPRESSION_MASK INTEL_MASK(30, 30)
#define GEN7_SURFACE_CLEAR_COLOR_SHIFT 28
#define GEN7_SURFACE_SCS_R_SHIFT 25
#define GEN7_SURFACE_SCS_R_MASK INTEL_MASK(27, 25)
* Terminate the compute shader.
*/
CS_OPCODE_CS_TERMINATE,
+
+ /**
+ * GLSL barrier()
+ */
+ SHADER_OPCODE_BARRIER,
};
enum brw_urb_write_flags {
#define BRW_SCRATCH_SPACE_SIZE_1M 10
#define BRW_SCRATCH_SPACE_SIZE_2M 11
+ #define BRW_MESSAGE_GATEWAY_SFID_OPEN_GATEWAY 0
+ #define BRW_MESSAGE_GATEWAY_SFID_CLOSE_GATEWAY 1
+ #define BRW_MESSAGE_GATEWAY_SFID_FORWARD_MSG 2
+ #define BRW_MESSAGE_GATEWAY_SFID_GET_TIMESTAMP 3
+ #define BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG 4
+ #define BRW_MESSAGE_GATEWAY_SFID_UPDATE_GATEWAY_STATE 5
+ #define BRW_MESSAGE_GATEWAY_SFID_MMIO_READ_WRITE 6
+
#define CMD_URB_FENCE 0x6000
#define CMD_CS_URB_STATE 0x6001
# define GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID 1
# define GEN7_GS_CONTROL_DATA_HEADER_SIZE_SHIFT 20
# define GEN7_GS_INSTANCE_CONTROL_SHIFT 15
- # define GEN7_GS_DISPATCH_MODE_SINGLE (0 << 11)
- # define GEN7_GS_DISPATCH_MODE_DUAL_INSTANCE (1 << 11)
- # define GEN7_GS_DISPATCH_MODE_DUAL_OBJECT (2 << 11)
+ # define GEN7_GS_DISPATCH_MODE_SHIFT 11
+ # define GEN7_GS_DISPATCH_MODE_MASK INTEL_MASK(12, 11)
# define GEN6_GS_STATISTICS_ENABLE (1 << 10)
# define GEN6_GS_SO_STATISTICS_ENABLE (1 << 9)
# define GEN6_GS_RENDERING_ENABLE (1 << 8)
* cache settings. We still use only either write-back or write-through; and
* rely on the documented default values.
*/
- #define SKL_MOCS_WB 9
- #define SKL_MOCS_WT 5
+ #define SKL_MOCS_WB (0b001001 << 1)
+ #define SKL_MOCS_WT (0b000101 << 1)
#define MEDIA_VFE_STATE 0x7000
/* GEN7 DW2, GEN8+ DW3 */
#include "glsl/glsl_types.h"
#include "program/sampler.h"
+ using namespace brw;
+
void
fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
const fs_reg *src, unsigned sources)
}
}
- #define ALU1(op) \
- fs_inst * \
- fs_visitor::op(const fs_reg &dst, const fs_reg &src0) \
- { \
- return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0); \
- }
-
- #define ALU2(op) \
- fs_inst * \
- fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
- const fs_reg &src1) \
- { \
- return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1); \
- }
-
- #define ALU2_ACC(op) \
- fs_inst * \
- fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
- const fs_reg &src1) \
- { \
- fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1);\
- inst->writes_accumulator = true; \
- return inst; \
- }
-
- #define ALU3(op) \
- fs_inst * \
- fs_visitor::op(const fs_reg &dst, const fs_reg &src0, \
- const fs_reg &src1, const fs_reg &src2) \
- { \
- return new(mem_ctx) fs_inst(BRW_OPCODE_##op, dst, src0, src1, src2);\
- }
-
- ALU1(NOT)
- ALU1(MOV)
- ALU1(FRC)
- ALU1(RNDD)
- ALU1(RNDE)
- ALU1(RNDZ)
- ALU2(ADD)
- ALU2(MUL)
- ALU2_ACC(MACH)
- ALU2(AND)
- ALU2(OR)
- ALU2(XOR)
- ALU2(SHL)
- ALU2(SHR)
- ALU2(ASR)
- ALU3(LRP)
- ALU1(BFREV)
- ALU3(BFE)
- ALU2(BFI1)
- ALU3(BFI2)
- ALU1(FBH)
- ALU1(FBL)
- ALU1(CBIT)
- ALU3(MAD)
- ALU2_ACC(ADDC)
- ALU2_ACC(SUBB)
- ALU2(SEL)
- ALU2(MAC)
-
- /** Gen4 predicated IF. */
- fs_inst *
- fs_visitor::IF(enum brw_predicate predicate)
- {
- fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, dispatch_width);
- inst->predicate = predicate;
- return inst;
- }
-
- /** Gen6 IF with embedded comparison. */
- fs_inst *
- fs_visitor::IF(const fs_reg &src0, const fs_reg &src1,
- enum brw_conditional_mod condition)
- {
- assert(devinfo->gen == 6);
- fs_inst *inst = new(mem_ctx) fs_inst(BRW_OPCODE_IF, dispatch_width,
- reg_null_d, src0, src1);
- inst->conditional_mod = condition;
- return inst;
- }
-
- /**
- * CMP: Sets the low bit of the destination channels with the result
- * of the comparison, while the upper bits are undefined, and updates
- * the flag register with the packed 16 bits of the result.
- */
- fs_inst *
- fs_visitor::CMP(fs_reg dst, fs_reg src0, fs_reg src1,
- enum brw_conditional_mod condition)
- {
- fs_inst *inst;
-
- /* Take the instruction:
- *
- * CMP null<d> src0<f> src1<f>
- *
- * Original gen4 does type conversion to the destination type before
- * comparison, producing garbage results for floating point comparisons.
- *
- * The destination type doesn't matter on newer generations, so we set the
- * type to match src0 so we can compact the instruction.
- */
- dst.type = src0.type;
- if (dst.file == HW_REG)
- dst.fixed_hw_reg.type = dst.type;
-
- resolve_ud_negate(&src0);
- resolve_ud_negate(&src1);
-
- inst = new(mem_ctx) fs_inst(BRW_OPCODE_CMP, dst, src0, src1);
- inst->conditional_mod = condition;
-
- return inst;
- }
-
- fs_inst *
- fs_visitor::LOAD_PAYLOAD(const fs_reg &dst, fs_reg *src, int sources,
- int header_size)
- {
- assert(dst.width % 8 == 0);
- fs_inst *inst = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD, dst.width,
- dst, src, sources);
- inst->header_size = header_size;
-
- for (int i = 0; i < header_size; i++)
- assert(src[i].file != GRF || src[i].width * type_sz(src[i].type) == 32);
- inst->regs_written = header_size;
-
- for (int i = header_size; i < sources; ++i)
- assert(src[i].file != GRF || src[i].width == dst.width);
- inst->regs_written += (sources - header_size) * (dst.width / 8);
-
- return inst;
- }
-
- exec_list
- fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_reg &dst,
+ void
+ fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
+ const fs_reg &dst,
const fs_reg &surf_index,
const fs_reg &varying_offset,
uint32_t const_offset)
{
- exec_list instructions;
- fs_inst *inst;
-
/* We have our constant surface use a pitch of 4 bytes, so our index can
* be any component of a vector, and then we load 4 contiguous
* components starting from that.
* the redundant ones.
*/
fs_reg vec4_offset = vgrf(glsl_type::int_type);
- instructions.push_tail(ADD(vec4_offset,
- varying_offset, fs_reg(const_offset & ~3)));
+ bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
int scale = 1;
if (devinfo->gen == 4 && dst.width == 8) {
int regs_written = 4 * (dst.width / 8) * scale;
fs_reg vec4_result = fs_reg(GRF, alloc.allocate(regs_written),
dst.type, dst.width);
- inst = new(mem_ctx) fs_inst(op, vec4_result, surf_index, vec4_offset);
+ fs_inst *inst = bld.emit(op, vec4_result, surf_index, vec4_offset);
inst->regs_written = regs_written;
- instructions.push_tail(inst);
if (devinfo->gen < 7) {
inst->base_mrf = 13;
inst->mlen = 1 + dispatch_width / 8;
}
- fs_reg result = offset(vec4_result, (const_offset & 3) * scale);
- instructions.push_tail(MOV(dst, result));
-
- return instructions;
+ bld.MOV(dst, offset(vec4_result, (const_offset & 3) * scale));
}
/**
* A helper for MOV generation for fixing up broken hardware SEND dependency
* handling.
*/
- fs_inst *
- fs_visitor::DEP_RESOLVE_MOV(int grf)
+ void
+ fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
{
- fs_inst *inst = MOV(brw_null_reg(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
-
- inst->ir = NULL;
- inst->annotation = "send dependency resolve";
-
/* The caller always wants uncompressed to emit the minimal extra
* dependencies, and to avoid having to deal with aligning its regs to 2.
*/
- inst->exec_size = 8;
+ const fs_builder ubld = bld.annotate("send dependency resolve")
+ .half(0);
- return inst;
+ ubld.MOV(ubld.null_reg_f(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
}
bool
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
* the destination of the MOV, with extra parameters set.
*/
fs_reg
- fs_visitor::get_timestamp(fs_inst **out_mov)
+ fs_visitor::get_timestamp(const fs_builder &bld)
{
assert(devinfo->gen >= 7);
fs_reg dst = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD, 4);
- fs_inst *mov = MOV(dst, ts);
/* We want to read the 3 fields we care about even if it's not enabled in
* the dispatch.
*/
- mov->force_writemask_all = true;
+ bld.exec_all().MOV(dst, ts);
/* The caller wants the low 32 bits of the timestamp. Since it's running
* at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
*/
dst.set_smear(0);
- *out_mov = mov;
return dst;
}
void
fs_visitor::emit_shader_time_begin()
{
- current_annotation = "shader time start";
- fs_inst *mov;
- shader_start_time = get_timestamp(&mov);
- emit(mov);
+ shader_start_time = get_timestamp(bld.annotate("shader time start"));
}
void
fs_visitor::emit_shader_time_end()
{
- current_annotation = "shader time end";
-
- enum shader_time_shader_type type, written_type, reset_type;
- switch (stage) {
- case MESA_SHADER_VERTEX:
- type = ST_VS;
- written_type = ST_VS_WRITTEN;
- reset_type = ST_VS_RESET;
- break;
- case MESA_SHADER_GEOMETRY:
- type = ST_GS;
- written_type = ST_GS_WRITTEN;
- reset_type = ST_GS_RESET;
- break;
- case MESA_SHADER_FRAGMENT:
- if (dispatch_width == 8) {
- type = ST_FS8;
- written_type = ST_FS8_WRITTEN;
- reset_type = ST_FS8_RESET;
- } else {
- assert(dispatch_width == 16);
- type = ST_FS16;
- written_type = ST_FS16_WRITTEN;
- reset_type = ST_FS16_RESET;
- }
- break;
- case MESA_SHADER_COMPUTE:
- type = ST_CS;
- written_type = ST_CS_WRITTEN;
- reset_type = ST_CS_RESET;
- break;
- default:
- unreachable("fs_visitor::emit_shader_time_end missing code");
- }
-
/* Insert our code just before the final SEND with EOT. */
exec_node *end = this->instructions.get_tail();
assert(end && ((fs_inst *) end)->eot);
+ const fs_builder ibld = bld.annotate("shader time end")
+ .exec_all().at(NULL, end);
- fs_inst *tm_read;
- fs_reg shader_end_time = get_timestamp(&tm_read);
- end->insert_before(tm_read);
+ fs_reg shader_end_time = get_timestamp(ibld);
/* Check that there weren't any timestamp reset events (assuming these
* were the only two timestamp reads that happened).
*/
fs_reg reset = shader_end_time;
reset.set_smear(2);
- fs_inst *test = AND(reg_null_d, reset, fs_reg(1u));
- test->conditional_mod = BRW_CONDITIONAL_Z;
- test->force_writemask_all = true;
- end->insert_before(test);
- end->insert_before(IF(BRW_PREDICATE_NORMAL));
+ set_condmod(BRW_CONDITIONAL_Z,
+ ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
+ ibld.IF(BRW_PREDICATE_NORMAL);
fs_reg start = shader_start_time;
start.negate = true;
fs_reg diff = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD, 1);
diff.set_smear(0);
- fs_inst *add = ADD(diff, start, shader_end_time);
- add->force_writemask_all = true;
- end->insert_before(add);
+ ibld.ADD(diff, start, shader_end_time);
/* If there were no instructions between the two timestamp gets, the diff
* is 2 cycles. Remove that overhead, so I can forget about that when
* trying to determine the time taken for single instructions.
*/
- add = ADD(diff, diff, fs_reg(-2u));
- add->force_writemask_all = true;
- end->insert_before(add);
-
- end->insert_before(SHADER_TIME_ADD(type, diff));
- end->insert_before(SHADER_TIME_ADD(written_type, fs_reg(1u)));
- end->insert_before(new(mem_ctx) fs_inst(BRW_OPCODE_ELSE, dispatch_width));
- end->insert_before(SHADER_TIME_ADD(reset_type, fs_reg(1u)));
- end->insert_before(new(mem_ctx) fs_inst(BRW_OPCODE_ENDIF, dispatch_width));
+ ibld.ADD(diff, diff, fs_reg(-2u));
+ SHADER_TIME_ADD(ibld, 0, diff);
+ SHADER_TIME_ADD(ibld, 1, fs_reg(1u));
+ ibld.emit(BRW_OPCODE_ELSE);
+ SHADER_TIME_ADD(ibld, 2, fs_reg(1u));
+ ibld.emit(BRW_OPCODE_ENDIF);
}
- fs_inst *
- fs_visitor::SHADER_TIME_ADD(enum shader_time_shader_type type, fs_reg value)
+ void
+ fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
+ int shader_time_subindex,
+ fs_reg value)
{
- int shader_time_index =
- brw_get_shader_time_index(brw, shader_prog, prog, type);
- fs_reg offset = fs_reg(shader_time_index * SHADER_TIME_STRIDE);
+ int index = shader_time_index * 3 + shader_time_subindex;
+ fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
fs_reg payload;
if (dispatch_width == 8)
else
payload = vgrf(glsl_type::uint_type);
- return new(mem_ctx) fs_inst(SHADER_OPCODE_SHADER_TIME_ADD,
- fs_reg(), payload, offset, value);
+ bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
}
void
* During a SIMD16 compile (if one happens anyway), this just calls fail().
*/
void
- fs_visitor::no16(const char *format, ...)
+ fs_visitor::no16(const char *msg)
{
- va_list va;
-
- va_start(va, format);
-
if (dispatch_width == 16) {
- vfail(format, va);
+ fail("%s", msg);
} else {
simd16_unsupported = true;
- if (brw->perf_debug) {
- if (no16_msg)
- ralloc_vasprintf_append(&no16_msg, format, va);
- else
- no16_msg = ralloc_vasprintf(mem_ctx, format, va);
- }
+ compiler->shader_perf_log(log_data,
+ "SIMD16 shader failed to compile: %s", msg);
}
-
- va_end(va);
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dispatch_width));
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dst));
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dst, src0));
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
- const fs_reg &src1)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1));
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst, const fs_reg &src0,
- const fs_reg &src1, const fs_reg &src2)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dst, src0, src1, src2));
- }
-
- fs_inst *
- fs_visitor::emit(enum opcode opcode, const fs_reg &dst,
- fs_reg src[], int sources)
- {
- return emit(new(mem_ctx) fs_inst(opcode, dst, src, sources));
}
/**
case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
return inst->mlen;
case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
- return 2;
+ return inst->mlen;
case SHADER_OPCODE_UNTYPED_ATOMIC:
case SHADER_OPCODE_UNTYPED_SURFACE_READ:
case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
brw_type_for_base_type(type), dispatch_width);
}
- fs_reg
- fs_visitor::vgrf(int num_components)
- {
- int reg_width = dispatch_width / 8;
- return fs_reg(GRF, alloc.allocate(num_components * reg_width),
- BRW_REGISTER_TYPE_F, dispatch_width);
- }
-
/** Fixed HW reg constructor. */
fs_reg::fs_reg(enum register_file file, int reg)
{
this->width = width;
}
- fs_reg *
- fs_visitor::variable_storage(ir_variable *var)
- {
- return (fs_reg *)hash_table_find(this->variable_ht, var);
- }
-
- void
- import_uniforms_callback(const void *key,
- void *data,
- void *closure)
- {
- struct hash_table *dst_ht = (struct hash_table *)closure;
- const fs_reg *reg = (const fs_reg *)data;
-
- if (reg->file != UNIFORM)
- return;
-
- hash_table_insert(dst_ht, data, key);
- }
-
/* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
* This brings in those uniform definitions
*/
void
fs_visitor::import_uniforms(fs_visitor *v)
{
- hash_table_call_foreach(v->variable_ht,
- import_uniforms_callback,
- variable_ht);
this->push_constant_loc = v->push_constant_loc;
this->pull_constant_loc = v->pull_constant_loc;
this->uniforms = v->uniforms;
this->param_size = v->param_size;
}
- /* Our support for uniforms is piggy-backed on the struct
- * gl_fragment_program, because that's where the values actually
- * get stored, rather than in some global gl_shader_program uniform
- * store.
- */
- void
- fs_visitor::setup_uniform_values(ir_variable *ir)
- {
- int namelen = strlen(ir->name);
-
- /* The data for our (non-builtin) uniforms is stored in a series of
- * gl_uniform_driver_storage structs for each subcomponent that
- * glGetUniformLocation() could name. We know it's been set up in the same
- * order we'd walk the type, so walk the list of storage and find anything
- * with our name, or the prefix of a component that starts with our name.
- */
- unsigned params_before = uniforms;
- for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
- struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
-
- if (strncmp(ir->name, storage->name, namelen) != 0 ||
- (storage->name[namelen] != 0 &&
- storage->name[namelen] != '.' &&
- storage->name[namelen] != '[')) {
- continue;
- }
-
- unsigned slots = storage->type->component_slots();
- if (storage->array_elements)
- slots *= storage->array_elements;
-
- for (unsigned i = 0; i < slots; i++) {
- stage_prog_data->param[uniforms++] = &storage->storage[i];
- }
- }
-
- /* Make sure we actually initialized the right amount of stuff here. */
- assert(params_before + ir->type->component_slots() == uniforms);
- (void)params_before;
- }
-
-
- /* Our support for builtin uniforms is even scarier than non-builtin.
- * It sits on top of the PROG_STATE_VAR parameters that are
- * automatically updated from GL context state.
- */
- void
- fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
- {
- const ir_state_slot *const slots = ir->get_state_slots();
- assert(slots != NULL);
-
- for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
- /* This state reference has already been setup by ir_to_mesa, but we'll
- * get the same index back here.
- */
- int index = _mesa_add_state_reference(this->prog->Parameters,
- (gl_state_index *)slots[i].tokens);
-
- /* Add each of the unique swizzles of the element as a parameter.
- * This'll end up matching the expected layout of the
- * array/matrix/structure we're trying to fill in.
- */
- int last_swiz = -1;
- for (unsigned int j = 0; j < 4; j++) {
- int swiz = GET_SWZ(slots[i].swizzle, j);
- if (swiz == last_swiz)
- break;
- last_swiz = swiz;
-
- stage_prog_data->param[uniforms++] =
- &prog->Parameters->ParameterValues[index][swiz];
- }
- }
- }
-
fs_reg *
fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
bool origin_upper_left)
/* gl_FragCoord.x */
if (pixel_center_integer) {
- emit(MOV(wpos, this->pixel_x));
+ bld.MOV(wpos, this->pixel_x);
} else {
- emit(ADD(wpos, this->pixel_x, fs_reg(0.5f)));
+ bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
}
wpos = offset(wpos, 1);
/* gl_FragCoord.y */
if (!flip && pixel_center_integer) {
- emit(MOV(wpos, this->pixel_y));
+ bld.MOV(wpos, this->pixel_y);
} else {
fs_reg pixel_y = this->pixel_y;
float offset = (pixel_center_integer ? 0.0 : 0.5);
offset += key->drawable_height - 1.0;
}
- emit(ADD(wpos, pixel_y, fs_reg(offset)));
+ bld.ADD(wpos, pixel_y, fs_reg(offset));
}
wpos = offset(wpos, 1);
/* gl_FragCoord.z */
if (devinfo->gen >= 6) {
- emit(MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0))));
+ bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
} else {
- emit(FS_OPCODE_LINTERP, wpos,
+ bld.emit(FS_OPCODE_LINTERP, wpos,
this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
interp_reg(VARYING_SLOT_POS, 2));
}
wpos = offset(wpos, 1);
/* gl_FragCoord.w: Already set up in emit_interpolation */
- emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
+ bld.MOV(wpos, this->wpos_w);
return reg;
}
*/
barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
}
- return emit(FS_OPCODE_LINTERP, attr,
- this->delta_xy[barycoord_mode], interp);
+ return bld.emit(FS_OPCODE_LINTERP, attr,
+ this->delta_xy[barycoord_mode], interp);
}
void
struct brw_reg interp = interp_reg(location, k);
interp = suboffset(interp, 3);
interp.type = attr.type;
- emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
+ bld.emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
attr = offset(attr, 1);
}
} else {
* unlit, replace the centroid data with non-centroid
* data.
*/
- emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
+ bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
fs_inst *inst;
inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
mod_sample || key->persample_shading);
}
if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
- emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
+ bld.MUL(attr, attr, this->pixel_w);
}
attr = offset(attr, 1);
}
fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
g0.negate = true;
- emit(ASR(*reg, g0, fs_reg(15)));
+ bld.ASR(*reg, g0, fs_reg(15));
} else {
/* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
* a boolean result from this (1/true or 0/false).
fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
g1_6.negate = true;
- emit(ASR(*reg, g1_6, fs_reg(31)));
+ bld.ASR(*reg, g1_6, fs_reg(31));
}
return reg;
if (key->compute_pos_offset) {
/* Convert int_sample_pos to floating point */
- emit(MOV(dst, int_sample_pos));
+ bld.MOV(dst, int_sample_pos);
/* Scale to the range [0, 1] */
- emit(MUL(dst, dst, fs_reg(1 / 16.0f)));
+ bld.MUL(dst, dst, fs_reg(1 / 16.0f));
}
else {
/* From ARB_sample_shading specification:
* rasterization is disabled, gl_SamplePosition will always be
* (0.5, 0.5).
*/
- emit(MOV(dst, fs_reg(0.5f)));
+ bld.MOV(dst, fs_reg(0.5f));
}
}
{
assert(devinfo->gen >= 6);
- this->current_annotation = "compute sample position";
+ const fs_builder abld = bld.annotate("compute sample position");
fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
fs_reg pos = *reg;
fs_reg int_sample_x = vgrf(glsl_type::int_type);
BRW_REGISTER_TYPE_B), 16, 8, 2);
if (dispatch_width == 8) {
- emit(MOV(int_sample_x, fs_reg(sample_pos_reg)));
+ abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
} else {
- emit(MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg)));
- emit(MOV(half(int_sample_x, 1), fs_reg(suboffset(sample_pos_reg, 16))))
- ->force_sechalf = true;
+ abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
+ abld.half(1).MOV(half(int_sample_x, 1),
+ fs_reg(suboffset(sample_pos_reg, 16)));
}
/* Compute gl_SamplePosition.x */
compute_sample_position(pos, int_sample_x);
pos = offset(pos, 1);
if (dispatch_width == 8) {
- emit(MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1))));
+ abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
} else {
- emit(MOV(half(int_sample_y, 0),
- fs_reg(suboffset(sample_pos_reg, 1))));
- emit(MOV(half(int_sample_y, 1), fs_reg(suboffset(sample_pos_reg, 17))))
- ->force_sechalf = true;
+ abld.half(0).MOV(half(int_sample_y, 0),
+ fs_reg(suboffset(sample_pos_reg, 1)));
+ abld.half(1).MOV(half(int_sample_y, 1),
+ fs_reg(suboffset(sample_pos_reg, 17)));
}
/* Compute gl_SamplePosition.y */
compute_sample_position(pos, int_sample_y);
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
assert(devinfo->gen >= 6);
- this->current_annotation = "compute sample id";
+ const fs_builder abld = bld.annotate("compute sample id");
fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
if (key->compute_sample_id) {
* are sample 1 of subspan 0; the third group is sample 0 of
* subspan 1, and finally sample 1 of subspan 1.
*/
- fs_inst *inst;
- inst = emit(BRW_OPCODE_AND, t1,
- fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
- fs_reg(0xc0));
- inst->force_writemask_all = true;
- inst = emit(BRW_OPCODE_SHR, t1, t1, fs_reg(5));
- inst->force_writemask_all = true;
+ abld.exec_all()
+ .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+ fs_reg(0xc0));
+ abld.exec_all().SHR(t1, t1, fs_reg(5));
+
/* This works for both SIMD8 and SIMD16 */
- inst = emit(MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210)));
- inst->force_writemask_all = true;
+ abld.exec_all()
+ .MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210));
+
/* This special instruction takes care of setting vstride=1,
* width=4, hstride=0 of t2 during an ADD instruction.
*/
- emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
+ abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
} else {
/* As per GL_ARB_sample_shading specification:
* "When rendering to a non-multisample buffer, or if multisample
* rasterization is disabled, gl_SampleID will always be zero."
*/
- emit(BRW_OPCODE_MOV, *reg, fs_reg(0));
+ abld.MOV(*reg, fs_reg(0));
}
return reg;
if (!src->abs && !src->negate)
return;
- fs_reg temp = retype(vgrf(1), src->type);
- emit(MOV(temp, *src));
+ fs_reg temp = bld.vgrf(src->type);
+ bld.MOV(temp, *src);
*src = temp;
}
- fs_reg
- fs_visitor::fix_math_operand(fs_reg src)
- {
- /* Can't do hstride == 0 args on gen6 math, so expand it out. We
- * might be able to do better by doing execsize = 1 math and then
- * expanding that result out, but we would need to be careful with
- * masking.
- *
- * The hardware ignores source modifiers (negate and abs) on math
- * instructions, so we also move to a temp to set those up.
- */
- if (devinfo->gen == 6 && src.file != UNIFORM && src.file != IMM &&
- !src.abs && !src.negate)
- return src;
-
- /* Gen7 relaxes most of the above restrictions, but still can't use IMM
- * operands to math
- */
- if (devinfo->gen >= 7 && src.file != IMM)
- return src;
-
- fs_reg expanded = vgrf(glsl_type::float_type);
- expanded.type = src.type;
- emit(BRW_OPCODE_MOV, expanded, src);
- return expanded;
- }
-
- fs_inst *
- fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
- {
- switch (opcode) {
- case SHADER_OPCODE_RCP:
- case SHADER_OPCODE_RSQ:
- case SHADER_OPCODE_SQRT:
- case SHADER_OPCODE_EXP2:
- case SHADER_OPCODE_LOG2:
- case SHADER_OPCODE_SIN:
- case SHADER_OPCODE_COS:
- break;
- default:
- unreachable("not reached: bad math opcode");
- }
-
- /* Can't do hstride == 0 args to gen6 math, so expand it out. We
- * might be able to do better by doing execsize = 1 math and then
- * expanding that result out, but we would need to be careful with
- * masking.
- *
- * Gen 6 hardware ignores source modifiers (negate and abs) on math
- * instructions, so we also move to a temp to set those up.
- */
- if (devinfo->gen == 6 || devinfo->gen == 7)
- src = fix_math_operand(src);
-
- fs_inst *inst = emit(opcode, dst, src);
-
- if (devinfo->gen < 6) {
- inst->base_mrf = 2;
- inst->mlen = dispatch_width / 8;
- }
-
- return inst;
- }
-
- fs_inst *
- fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
- {
- int base_mrf = 2;
- fs_inst *inst;
-
- if (devinfo->gen >= 8) {
- inst = emit(opcode, dst, src0, src1);
- } else if (devinfo->gen >= 6) {
- src0 = fix_math_operand(src0);
- src1 = fix_math_operand(src1);
-
- inst = emit(opcode, dst, src0, src1);
- } else {
- /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
- * "Message Payload":
- *
- * "Operand0[7]. For the INT DIV functions, this operand is the
- * denominator."
- * ...
- * "Operand1[7]. For the INT DIV functions, this operand is the
- * numerator."
- */
- bool is_int_div = opcode != SHADER_OPCODE_POW;
- fs_reg &op0 = is_int_div ? src1 : src0;
- fs_reg &op1 = is_int_div ? src0 : src1;
-
- emit(MOV(fs_reg(MRF, base_mrf + 1, op1.type, dispatch_width), op1));
- inst = emit(opcode, dst, op0, reg_null_f);
-
- inst->base_mrf = base_mrf;
- inst->mlen = 2 * dispatch_width / 8;
- }
- return inst;
- }
-
void
fs_visitor::emit_discard_jump()
{
/* For performance, after a discard, jump to the end of the
* shader if all relevant channels have been discarded.
*/
- fs_inst *discard_jump = emit(FS_OPCODE_DISCARD_JUMP);
+ fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
discard_jump->flag_subreg = 1;
discard_jump->predicate = (dispatch_width == 8)
unsigned vue_entries =
MAX2(count, vs_prog_data->base.vue_map.num_slots);
+ /* URB entry size is counted in units of 64 bytes (for the 3DSTATE_URB_VS
+ * command). Each attribute is 16 bytes (4 floats/dwords), so each unit
+ * fits four attributes.
+ */
vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4;
vs_prog_data->base.urb_read_length = (count + 1) / 2;
continue;
/* Set up the annotation tracking for new generated instructions. */
- base_ir = inst->ir;
- current_annotation = inst->annotation;
-
+ const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
+ .at(block, inst);
fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
fs_reg dst = vgrf(glsl_type::float_type);
/* Generate a pull load into dst. */
if (inst->src[i].reladdr) {
- exec_list list = VARYING_PULL_CONSTANT_LOAD(dst,
- surf_index,
- *inst->src[i].reladdr,
- pull_index);
- inst->insert_before(block, &list);
+ VARYING_PULL_CONSTANT_LOAD(ibld, dst,
+ surf_index,
+ *inst->src[i].reladdr,
+ pull_index);
inst->src[i].reladdr = NULL;
} else {
fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
- fs_inst *pull =
- new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, 8,
- dst, surf_index, offset);
- inst->insert_before(block, pull);
+ ibld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ dst, surf_index, offset);
inst->src[i].set_smear(pull_index & 3);
}
if (unlikely(tex_inst->is_head_sentinel()) || !tex_inst->is_tex())
return false;
+ /* This optimisation doesn't seem to work for textureGather for some
+ * reason. I can't find any documentation or known workarounds to indicate
+ * that this is expected, but considering that it is probably pretty
+ * unlikely that a shader would directly write out the results from
+ * textureGather we might as well just disable it.
+ */
+ if (tex_inst->opcode == SHADER_OPCODE_TG4 ||
+ tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
+ return false;
+
/* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
* It's very likely to be the previous instruction.
*/
tex_inst->offset |= fb_write->target << 24;
tex_inst->eot = true;
- tex_inst->dst = reg_null_ud;
+ tex_inst->dst = bld.null_reg_ud();
fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
/* If a header is present, marking the eot is sufficient. Otherwise, we need
if (tex_inst->header_size != 0)
return true;
- fs_reg send_header = vgrf(load_payload->sources + 1);
+ fs_reg send_header = bld.vgrf(BRW_REGISTER_TYPE_F,
+ load_payload->sources + 1);
fs_reg *new_sources =
ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
int base_mrf = 1;
int color_mrf = base_mrf + 2;
+ fs_inst *mov;
- fs_inst *mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
- fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
+ if (uniforms == 1) {
- mov = emit(MOV(vec4(brw_message_reg(color_mrf)),
- fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F)));
++ mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
++ fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
+ } else {
+ struct brw_reg reg =
+ brw_reg(BRW_GENERAL_REGISTER_FILE,
+ 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_8,
+ BRW_WIDTH_2,
+ BRW_HORIZONTAL_STRIDE_4, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
+
- mov = emit(MOV(vec4(brw_message_reg(color_mrf)), fs_reg(reg)));
++ mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
++ fs_reg(reg));
+ }
- mov->force_writemask_all = true;
-
fs_inst *write;
if (key->nr_color_regions == 1) {
- write = emit(FS_OPCODE_REP_FB_WRITE);
+ write = bld.emit(FS_OPCODE_REP_FB_WRITE);
write->saturate = key->clamp_fragment_color;
write->base_mrf = color_mrf;
write->target = 0;
} else {
assume(key->nr_color_regions > 0);
for (int i = 0; i < key->nr_color_regions; ++i) {
- write = emit(FS_OPCODE_REP_FB_WRITE);
+ write = bld.emit(FS_OPCODE_REP_FB_WRITE);
write->saturate = key->clamp_fragment_color;
write->base_mrf = base_mrf;
write->target = i;
assign_curb_setup();
/* Now that we have the uniform assigned, go ahead and force it to a vec4. */
- assert(mov->src[0].file == HW_REG);
- mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
+ if (uniforms == 1) {
+ assert(mov->src[0].file == HW_REG);
+ mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
+ }
}
/**
*/
if (block->start() == scan_inst) {
for (int i = 0; i < write_len; i++) {
- if (needs_dep[i]) {
- inst->insert_before(block, DEP_RESOLVE_MOV(first_write_grf + i));
- }
+ if (needs_dep[i])
+ DEP_RESOLVE_MOV(bld.at(block, inst), first_write_grf + i);
}
return;
}
if (reg >= first_write_grf &&
reg < first_write_grf + write_len &&
needs_dep[reg - first_write_grf]) {
- inst->insert_before(block, DEP_RESOLVE_MOV(reg));
+ DEP_RESOLVE_MOV(bld.at(block, inst), reg);
needs_dep[reg - first_write_grf] = false;
if (scan_inst->exec_size == 16)
needs_dep[reg - first_write_grf + 1] = false;
if (block->end() == scan_inst) {
for (int i = 0; i < write_len; i++) {
if (needs_dep[i])
- scan_inst->insert_before(block,
- DEP_RESOLVE_MOV(first_write_grf + i));
+ DEP_RESOLVE_MOV(bld.at(block, scan_inst), first_write_grf + i);
}
return;
}
scan_inst->dst.reg >= first_write_grf &&
scan_inst->dst.reg < first_write_grf + write_len &&
needs_dep[scan_inst->dst.reg - first_write_grf]) {
- scan_inst->insert_before(block, DEP_RESOLVE_MOV(scan_inst->dst.reg));
+ DEP_RESOLVE_MOV(bld.at(block, scan_inst), scan_inst->dst.reg);
needs_dep[scan_inst->dst.reg - first_write_grf] = false;
}
assert(inst->dst.file == MRF || inst->dst.file == GRF);
assert(inst->saturate == false);
+ const fs_builder ibld = bld.group(inst->exec_size, inst->force_sechalf)
+ .exec_all(inst->force_writemask_all)
+ .at(block, inst);
fs_reg dst = inst->dst;
/* Get rid of COMPR4. We'll add it back in if we need it */
fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
mov_src.width = 8;
- fs_inst *mov = MOV(mov_dst, mov_src);
- mov->force_writemask_all = true;
- inst->insert_before(block, mov);
+ ibld.exec_all().MOV(mov_dst, mov_src);
}
dst = offset(dst, 1);
}
if (devinfo->has_compr4) {
fs_reg compr4_dst = retype(dst, inst->src[i].type);
compr4_dst.reg |= BRW_MRF_COMPR4;
-
- fs_inst *mov = MOV(compr4_dst, inst->src[i]);
- mov->force_writemask_all = inst->force_writemask_all;
- inst->insert_before(block, mov);
+ ibld.MOV(compr4_dst, inst->src[i]);
} else {
/* Platform doesn't have COMPR4. We have to fake it */
fs_reg mov_dst = retype(dst, inst->src[i].type);
mov_dst.width = 8;
-
- fs_inst *mov = MOV(mov_dst, half(inst->src[i], 0));
- mov->force_writemask_all = inst->force_writemask_all;
- inst->insert_before(block, mov);
-
- mov = MOV(offset(mov_dst, 4), half(inst->src[i], 1));
- mov->force_writemask_all = inst->force_writemask_all;
- mov->force_sechalf = true;
- inst->insert_before(block, mov);
+ ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
+ ibld.half(1).MOV(offset(mov_dst, 4), half(inst->src[i], 1));
}
}
}
for (uint8_t i = inst->header_size; i < inst->sources; i++) {
- if (inst->src[i].file != BAD_FILE) {
- fs_inst *mov = MOV(retype(dst, inst->src[i].type),
- inst->src[i]);
- mov->force_writemask_all = inst->force_writemask_all;
- inst->insert_before(block, mov);
- }
+ if (inst->src[i].file != BAD_FILE)
+ ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
dst = offset(dst, 1);
}
return progress;
}
+ bool
+ fs_visitor::lower_integer_multiplication()
+ {
+ bool progress = false;
+
+ /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit operation
+ * directly, but Cherryview cannot.
+ */
+ if (devinfo->gen >= 8 && !devinfo->is_cherryview)
+ return false;
+
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
+ if (inst->opcode != BRW_OPCODE_MUL ||
+ inst->dst.is_accumulator() ||
+ (inst->dst.type != BRW_REGISTER_TYPE_D &&
+ inst->dst.type != BRW_REGISTER_TYPE_UD))
+ continue;
+
+ const fs_builder ibld = bld.at(block, inst);
+
+ /* The MUL instruction isn't commutative. On Gen <= 6, only the low
+ * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
+ * src1 are used.
+ *
+ * If multiplying by an immediate value that fits in 16-bits, do a
+ * single MUL instruction with that value in the proper location.
+ */
+ if (inst->src[1].file == IMM &&
+ inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
+ if (devinfo->gen < 7) {
+ fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type, dispatch_width);
+ ibld.MOV(imm, inst->src[1]);
+ ibld.MUL(inst->dst, imm, inst->src[0]);
+ } else {
+ ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
+ }
+ } else {
+ /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
+ * do 32-bit integer multiplication in one instruction, but instead
+ * must do a sequence (which actually calculates a 64-bit result):
+ *
+ * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
+ * mach(8) null g3<8,8,1>D g4<8,8,1>D
+ * mov(8) g2<1>D acc0<8,8,1>D
+ *
+ * But on Gen > 6, the ability to use second accumulator register
+ * (acc1) for non-float data types was removed, preventing a simple
+ * implementation in SIMD16. A 16-channel result can be calculated by
+ * executing the three instructions twice in SIMD8, once with quarter
+ * control of 1Q for the first eight channels and again with 2Q for
+ * the second eight channels.
+ *
+ * Which accumulator register is implicitly accessed (by AccWrEnable
+ * for instance) is determined by the quarter control. Unfortunately
+ * Ivybridge (and presumably Baytrail) has a hardware bug in which an
+ * implicit accumulator access by an instruction with 2Q will access
+ * acc1 regardless of whether the data type is usable in acc1.
+ *
+ * Specifically, the 2Q mach(8) writes acc1 which does not exist for
+ * integer data types.
+ *
+ * Since we only want the low 32-bits of the result, we can do two
+ * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
+ * adjust the high result and add them (like the mach is doing):
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
+ * shl(8) g9<1>D g8<8,8,1>D 16D
+ * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
+ *
+ * We avoid the shl instruction by realizing that we only want to add
+ * the low 16-bits of the "high" result to the high 16-bits of the
+ * "low" result and using proper regioning on the add:
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
+ * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
+ *
+ * Since it does not use the (single) accumulator register, we can
+ * schedule multi-component multiplications much better.
+ */
+
+ if (inst->conditional_mod && inst->dst.is_null()) {
+ inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type, dispatch_width);
+ }
+ fs_reg low = inst->dst;
+ fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type, dispatch_width);
+
+ if (devinfo->gen >= 7) {
+ fs_reg src1_0_w = inst->src[1];
+ fs_reg src1_1_w = inst->src[1];
+
+ if (inst->src[1].file == IMM) {
+ src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
+ src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
+ } else {
+ src1_0_w.type = BRW_REGISTER_TYPE_UW;
+ if (src1_0_w.stride != 0) {
+ assert(src1_0_w.stride == 1);
+ src1_0_w.stride = 2;
+ }
+
+ src1_1_w.type = BRW_REGISTER_TYPE_UW;
+ if (src1_1_w.stride != 0) {
+ assert(src1_1_w.stride == 1);
+ src1_1_w.stride = 2;
+ }
+ src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
+ }
+ ibld.MUL(low, inst->src[0], src1_0_w);
+ ibld.MUL(high, inst->src[0], src1_1_w);
+ } else {
+ fs_reg src0_0_w = inst->src[0];
+ fs_reg src0_1_w = inst->src[0];
+
+ src0_0_w.type = BRW_REGISTER_TYPE_UW;
+ if (src0_0_w.stride != 0) {
+ assert(src0_0_w.stride == 1);
+ src0_0_w.stride = 2;
+ }
+
+ src0_1_w.type = BRW_REGISTER_TYPE_UW;
+ if (src0_1_w.stride != 0) {
+ assert(src0_1_w.stride == 1);
+ src0_1_w.stride = 2;
+ }
+ src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
+
+ ibld.MUL(low, src0_0_w, inst->src[1]);
+ ibld.MUL(high, src0_1_w, inst->src[1]);
+ }
+
+ fs_reg dst = inst->dst;
+ dst.type = BRW_REGISTER_TYPE_UW;
+ dst.subreg_offset = 2;
+ dst.stride = 2;
+
+ high.type = BRW_REGISTER_TYPE_UW;
+ high.stride = 2;
+
+ low.type = BRW_REGISTER_TYPE_UW;
+ low.subreg_offset = 2;
+ low.stride = 2;
+
+ ibld.ADD(dst, low, high);
+
+ if (inst->conditional_mod) {
+ fs_reg null(retype(ibld.null_reg_f(), inst->dst.type));
+ set_condmod(inst->conditional_mod,
+ ibld.MOV(null, inst->dst));
+ }
+ }
+
+ inst->remove(block);
+ progress = true;
+ }
+
+ if (progress)
+ invalidate_live_intervals();
+
+ return progress;
+ }
+
void
fs_visitor::dump_instructions()
{
}
fprintf(file, "(%d) ", inst->exec_size);
+ if (inst->mlen) {
+ fprintf(file, "(mlen: %d) ", inst->mlen);
+ }
switch (inst->dst.file) {
case GRF:
void
fs_visitor::setup_cs_payload()
{
- assert(brw->gen >= 7);
+ assert(devinfo->gen >= 7);
payload.num_regs = 1;
}
void
fs_visitor::optimize()
{
+ /* bld is the common builder object pointing at the end of the program we
+ * used to translate it into i965 IR. For the optimization and lowering
+ * passes coming next, any code added after the end of the program without
+ * having explicitly called fs_builder::at() clearly points at a mistake.
+ * Ideally optimization passes wouldn't be part of the visitor so they
+ * wouldn't have access to bld at all, but they do, so just in case some
+ * pass forgets to ask for a location explicitly set it to NULL here to
+ * make it trip.
+ */
+ bld = bld.at(NULL, NULL);
+
split_virtual_grfs();
move_uniform_array_access_to_pull_constants();
snprintf(filename, 64, "%s%d-%04d-%02d-%02d-" #pass, \
stage_abbrev, dispatch_width, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
\
- backend_visitor::dump_instructions(filename); \
+ backend_shader::dump_instructions(filename); \
} \
\
progress = progress || this_progress; \
stage_abbrev, dispatch_width,
shader_prog ? shader_prog->Name : 0);
- backend_visitor::dump_instructions(filename);
+ backend_shader::dump_instructions(filename);
}
bool progress;
}
OPT(opt_combine_constants);
+ OPT(lower_integer_multiplication);
lower_uniform_pull_constant_loads();
}
fail("Failure to register allocate. Reduce number of "
"live scalar values to avoid this.");
} else {
- perf_debug("%s shader triggered register spilling. "
- "Try reducing the number of live scalar values to "
- "improve performance.\n", stage_name);
+ compiler->shader_perf_log(log_data,
+ "%s shader triggered register spilling. "
+ "Try reducing the number of live scalar "
+ "values to improve performance.\n",
+ stage_name);
}
/* Since we're out of heuristics, just go spill registers until we
}
bool
- fs_visitor::run_vs()
+ fs_visitor::run_vs(gl_clip_plane *clip_planes)
{
assert(stage == MESA_SHADER_VERTEX);
- assign_common_binding_table_offsets(0);
+ if (prog_data->map_entries == NULL)
+ assign_common_binding_table_offsets(0);
setup_vs_payload();
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_begin();
- if (brw->ctx.Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].NirOptions) {
- emit_nir_code();
- } else {
- foreach_in_list(ir_instruction, ir, shader->base.ir) {
- base_ir = ir;
- this->result = reg_undef;
- ir->accept(this);
- }
- base_ir = NULL;
- }
+ emit_nir_code();
if (failed)
return false;
- emit_urb_writes();
+ emit_urb_writes(clip_planes);
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_end();
calculate_cfg();
}
bool
- fs_visitor::run_fs()
+ fs_visitor::run_fs(bool do_rep_send)
{
brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
sanity_param_count = prog->Parameters->NumParameters;
- assign_binding_table_offsets();
+ if (prog_data->map_entries == NULL)
+ assign_binding_table_offsets();
if (devinfo->gen >= 6)
setup_payload_gen6();
if (0) {
emit_dummy_fs();
- } else if (brw->use_rep_send && dispatch_width == 16) {
+ } else if (do_rep_send) {
+ assert(dispatch_width == 16);
emit_repclear_shader();
} else {
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_begin();
calculate_urb_setup();
* Initialize it with the dispatched pixels.
*/
if (wm_prog_data->uses_kill) {
- fs_inst *discard_init = emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
+ fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
discard_init->flag_subreg = 1;
}
/* Generate FS IR for main(). (the visitor only descends into
* functions called "main").
*/
- if (brw->ctx.Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions) {
- emit_nir_code();
- } else if (shader) {
- foreach_in_list(ir_instruction, ir, shader->base.ir) {
- base_ir = ir;
- this->result = reg_undef;
- ir->accept(this);
- }
- } else {
- emit_fragment_program_code();
- }
- base_ir = NULL;
+ emit_nir_code();
+
if (failed)
return false;
if (wm_prog_data->uses_kill)
- emit(FS_OPCODE_PLACEHOLDER_HALT);
+ bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
if (wm_key->alpha_test_func)
emit_alpha_test();
emit_fb_writes();
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_end();
calculate_cfg();
setup_cs_payload();
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_begin();
emit_nir_code();
emit_cs_terminate();
- if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ if (shader_time_index >= 0)
emit_shader_time_end();
calculate_cfg();
if (unlikely(INTEL_DEBUG & DEBUG_WM))
brw_dump_ir("fragment", prog, &shader->base, &fp->Base);
+ int st_index8 = -1, st_index16 = -1;
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
+ st_index8 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS8);
+ st_index16 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS16);
+ }
+
/* Now the main event: Visit the shader IR and generate our FS IR for it.
*/
- fs_visitor v(brw, mem_ctx, key, prog_data, prog, fp, 8);
- if (!v.run_fs()) {
+ fs_visitor v(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
+ prog, &fp->Base, 8, st_index8);
+ if (!v.run_fs(false /* do_rep_send */)) {
if (prog) {
prog->LinkStatus = false;
ralloc_strcat(&prog->InfoLog, v.fail_msg);
}
cfg_t *simd16_cfg = NULL;
- fs_visitor v2(brw, mem_ctx, key, prog_data, prog, fp, 16);
+ fs_visitor v2(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
+ prog, &fp->Base, 16, st_index16);
if (likely(!(INTEL_DEBUG & DEBUG_NO16) || brw->use_rep_send)) {
if (!v.simd16_unsupported) {
/* Try a SIMD16 compile */
v2.import_uniforms(&v);
- if (!v2.run_fs()) {
- perf_debug("SIMD16 shader failed to compile, falling back to "
- "SIMD8 at a 10-20%% performance cost: %s", v2.fail_msg);
+ if (!v2.run_fs(brw->use_rep_send)) {
+ perf_debug("SIMD16 shader failed to compile: %s", v2.fail_msg);
} else {
simd16_cfg = v2.cfg;
}
- } else {
- perf_debug("SIMD16 shader unsupported, falling back to "
- "SIMD8 at a 10-20%% performance cost: %s", v.no16_msg);
}
}
prog_data->no_8 = false;
}
- fs_generator g(brw, mem_ctx, (void *) key, &prog_data->base,
+ fs_generator g(brw->intelScreen->compiler, brw,
+ mem_ctx, (void *) key, &prog_data->base,
&fp->Base, v.promoted_constants, v.runtime_check_aads_emit, "FS");
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
#include "brw_fs.h"
#include "brw_nir.h"
+ using namespace brw;
+
void
fs_visitor::emit_nir_code()
{
*/
if (nir->num_inputs > 0) {
- nir_inputs = vgrf(nir->num_inputs);
+ nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
nir_setup_inputs(nir);
}
if (nir->num_outputs > 0) {
- nir_outputs = vgrf(nir->num_outputs);
+ nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
nir_setup_outputs(nir);
}
unsigned array_elems =
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
unsigned size = array_elems * reg->num_components;
- nir_globals[reg->index] = vgrf(size);
+ nir_globals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
}
/* get the main function and emit it */
unsigned array_length = var->type->is_array() ? var->type->length : 1;
for (unsigned i = 0; i < array_length; i++) {
for (unsigned j = 0; j < components; j++) {
- emit(MOV(retype(offset(input, components * i + j), type),
- offset(fs_reg(ATTR, var->data.location + i, type), j)));
+ bld.MOV(retype(offset(input, components * i + j), type),
+ offset(fs_reg(ATTR, var->data.location + i, type), j));
}
}
break;
if (var->data.location == VARYING_SLOT_POS) {
reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
var->data.origin_upper_left);
- emit_percomp(MOV(input, reg), 0xF);
+ emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, input, reg), 0xF);
} else {
emit_general_interpolation(input, var->name, var->type,
(glsl_interp_qualifier) var->data.interpolation,
* our name.
*/
unsigned index = var->data.driver_location;
- for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
+ for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
+ if (storage->builtin)
+ continue;
+
if (strncmp(var->name, storage->name, namelen) != 0 ||
(storage->name[namelen] != 0 &&
storage->name[namelen] != '.' &&
unsigned array_elems =
reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
unsigned size = array_elems * reg->num_components;
- nir_locals[reg->index] = vgrf(size);
+ nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
}
nir_emit_cf_list(&impl->body);
fs_visitor::nir_emit_if(nir_if *if_stmt)
{
/* first, put the condition into f0 */
- fs_inst *inst = emit(MOV(reg_null_d,
+ fs_inst *inst = bld.MOV(bld.null_reg_d(),
retype(get_nir_src(if_stmt->condition),
- BRW_REGISTER_TYPE_D)));
+ BRW_REGISTER_TYPE_D));
inst->conditional_mod = BRW_CONDITIONAL_NZ;
- emit(IF(BRW_PREDICATE_NORMAL));
+ bld.IF(BRW_PREDICATE_NORMAL);
nir_emit_cf_list(&if_stmt->then_list);
/* note: if the else is empty, dead CF elimination will remove it */
- emit(BRW_OPCODE_ELSE);
+ bld.emit(BRW_OPCODE_ELSE);
nir_emit_cf_list(&if_stmt->else_list);
- emit(BRW_OPCODE_ENDIF);
+ bld.emit(BRW_OPCODE_ENDIF);
if (!try_replace_with_sel() && devinfo->gen < 6) {
no16("Can't support (non-uniform) control flow on SIMD16\n");
no16("Can't support (non-uniform) control flow on SIMD16\n");
}
- emit(BRW_OPCODE_DO);
+ bld.emit(BRW_OPCODE_DO);
nir_emit_cf_list(&loop->body);
- emit(BRW_OPCODE_WHILE);
+ bld.emit(BRW_OPCODE_WHILE);
}
void
void
fs_visitor::nir_emit_instr(nir_instr *instr)
{
- this->base_ir = instr;
+ const fs_builder abld = bld.annotate(NULL, instr);
switch (instr->type) {
case nir_instr_type_alu:
- nir_emit_alu(nir_instr_as_alu(instr));
+ nir_emit_alu(abld, nir_instr_as_alu(instr));
break;
case nir_instr_type_intrinsic:
- nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
+ nir_emit_intrinsic(abld, nir_instr_as_intrinsic(instr));
break;
case nir_instr_type_tex:
- nir_emit_texture(nir_instr_as_tex(instr));
+ nir_emit_texture(abld, nir_instr_as_tex(instr));
break;
case nir_instr_type_load_const:
break;
case nir_instr_type_jump:
- nir_emit_jump(nir_instr_as_jump(instr));
+ nir_emit_jump(abld, nir_instr_as_jump(instr));
break;
default:
unreachable("unknown instruction type");
}
-
- this->base_ir = NULL;
}
static brw_reg_type
tmp.subreg_offset = 2;
tmp.stride = 2;
- fs_inst *or_inst = emit(OR(tmp, g0, fs_reg(0x3f80)));
+ fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
tmp.type = BRW_REGISTER_TYPE_D;
g1_6.negate = true;
}
- emit(OR(tmp, g1_6, fs_reg(0x3f800000)));
+ bld.OR(tmp, g1_6, fs_reg(0x3f800000));
}
- emit(AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000)));
+ bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
return true;
}
void
- fs_visitor::nir_emit_alu(nir_alu_instr *instr)
+ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
{
struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
fs_inst *inst;
if (!instr->src[i].src.is_ssa &&
instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
need_extra_copy = true;
- temp = retype(vgrf(4), result.type);
+ temp = bld.vgrf(result.type, 4);
break;
}
}
continue;
if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
- inst = emit(MOV(offset(temp, i),
- offset(op[0], instr->src[0].swizzle[i])));
+ inst = bld.MOV(offset(temp, i),
+ offset(op[0], instr->src[0].swizzle[i]));
} else {
- inst = emit(MOV(offset(temp, i),
- offset(op[i], instr->src[i].swizzle[0])));
+ inst = bld.MOV(offset(temp, i),
+ offset(op[i], instr->src[i].swizzle[0]));
}
inst->saturate = instr->dest.saturate;
}
if (!(instr->dest.write_mask & (1 << i)))
continue;
- emit(MOV(offset(result, i), offset(temp, i)));
+ bld.MOV(offset(result, i), offset(temp, i));
}
}
return;
switch (instr->op) {
case nir_op_i2f:
case nir_op_u2f:
- inst = emit(MOV(result, op[0]));
+ inst = bld.MOV(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_f2i:
case nir_op_f2u:
- emit(MOV(result, op[0]));
+ bld.MOV(result, op[0]);
break;
case nir_op_fsign: {
* Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
* zero.
*/
- emit(CMP(reg_null_f, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
+ bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
op[0].type = BRW_REGISTER_TYPE_UD;
result.type = BRW_REGISTER_TYPE_UD;
- emit(AND(result_int, op[0], fs_reg(0x80000000u)));
+ bld.AND(result_int, op[0], fs_reg(0x80000000u));
- inst = emit(OR(result_int, result_int, fs_reg(0x3f800000u)));
+ inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
inst->predicate = BRW_PREDICATE_NORMAL;
if (instr->dest.saturate) {
- inst = emit(MOV(result, result));
+ inst = bld.MOV(result, result);
inst->saturate = true;
}
break;
* -> non-negative val generates 0x00000000.
* Predicated OR sets 1 if val is positive.
*/
- emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_G));
- emit(ASR(result, op[0], fs_reg(31)));
- inst = emit(OR(result, result, fs_reg(1)));
+ bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
+ bld.ASR(result, op[0], fs_reg(31));
+ inst = bld.OR(result, result, fs_reg(1));
inst->predicate = BRW_PREDICATE_NORMAL;
break;
case nir_op_frcp:
- inst = emit_math(SHADER_OPCODE_RCP, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fexp2:
- inst = emit_math(SHADER_OPCODE_EXP2, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_flog2:
- inst = emit_math(SHADER_OPCODE_LOG2, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fsin:
- inst = emit_math(SHADER_OPCODE_SIN, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fcos:
- inst = emit_math(SHADER_OPCODE_COS, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx:
if (fs_key->high_quality_derivatives) {
- inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+ inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
} else {
- inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+ inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
}
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx_fine:
- inst = emit(FS_OPCODE_DDX_FINE, result, op[0]);
+ inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddx_coarse:
- inst = emit(FS_OPCODE_DDX_COARSE, result, op[0]);
+ inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy:
if (fs_key->high_quality_derivatives) {
- inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
} else {
- inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
}
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_fine:
- inst = emit(FS_OPCODE_DDY_FINE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_coarse:
- inst = emit(FS_OPCODE_DDY_COARSE, result, op[0],
- fs_reg(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
+ fs_reg(fs_key->render_to_fbo));
inst->saturate = instr->dest.saturate;
break;
case nir_op_fadd:
case nir_op_iadd:
- inst = emit(ADD(result, op[0], op[1]));
+ inst = bld.ADD(result, op[0], op[1]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fmul:
- inst = emit(MUL(result, op[0], op[1]));
+ inst = bld.MUL(result, op[0], op[1]);
inst->saturate = instr->dest.saturate;
break;
- case nir_op_imul: {
- if (devinfo->gen >= 8) {
- emit(MUL(result, op[0], op[1]));
- break;
- } else {
- nir_const_value *value0 = nir_src_as_const_value(instr->src[0].src);
- nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
-
- if (value0 && value0->u[0] < (1 << 16)) {
- if (devinfo->gen < 7) {
- emit(MUL(result, op[0], op[1]));
- } else {
- emit(MUL(result, op[1], op[0]));
- }
- break;
- } else if (value1 && value1->u[0] < (1 << 16)) {
- if (devinfo->gen < 7) {
- emit(MUL(result, op[1], op[0]));
- } else {
- emit(MUL(result, op[0], op[1]));
- }
- break;
- }
- }
-
- if (devinfo->gen >= 7)
- no16("SIMD16 explicit accumulator operands unsupported\n");
-
- struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
-
- emit(MUL(acc, op[0], op[1]));
- emit(MACH(reg_null_d, op[0], op[1]));
- emit(MOV(result, fs_reg(acc)));
+ case nir_op_imul:
+ bld.MUL(result, op[0], op[1]);
break;
- }
case nir_op_imul_high:
case nir_op_umul_high: {
struct brw_reg acc = retype(brw_acc_reg(dispatch_width), result.type);
- fs_inst *mul = emit(MUL(acc, op[0], op[1]));
- emit(MACH(result, op[0], op[1]));
+ fs_inst *mul = bld.MUL(acc, op[0], op[1]);
+ bld.MACH(result, op[0], op[1]);
/* Until Gen8, integer multiplies read 32-bits from one source, and
* 16-bits from the other, and relying on the MACH instruction to
case nir_op_idiv:
case nir_op_udiv:
- emit_math(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
+ bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
break;
case nir_op_uadd_carry: {
struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
BRW_REGISTER_TYPE_UD);
- emit(ADDC(reg_null_ud, op[0], op[1]));
- emit(MOV(result, fs_reg(acc)));
+ bld.ADDC(bld.null_reg_ud(), op[0], op[1]);
+ bld.MOV(result, fs_reg(acc));
break;
}
struct brw_reg acc = retype(brw_acc_reg(dispatch_width),
BRW_REGISTER_TYPE_UD);
- emit(SUBB(reg_null_ud, op[0], op[1]));
- emit(MOV(result, fs_reg(acc)));
+ bld.SUBB(bld.null_reg_ud(), op[0], op[1]);
+ bld.MOV(result, fs_reg(acc));
break;
}
case nir_op_umod:
- emit_math(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
+ bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
break;
case nir_op_flt:
case nir_op_ilt:
case nir_op_ult:
- emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_L));
+ bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
break;
case nir_op_fge:
case nir_op_ige:
case nir_op_uge:
- emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_GE));
+ bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
break;
case nir_op_feq:
case nir_op_ieq:
- emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_Z));
+ bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
break;
case nir_op_fne:
case nir_op_ine:
- emit(CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ));
+ bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
break;
case nir_op_inot:
if (devinfo->gen >= 8) {
resolve_source_modifiers(&op[0]);
}
- emit(NOT(result, op[0]));
+ bld.NOT(result, op[0]);
break;
case nir_op_ixor:
if (devinfo->gen >= 8) {
resolve_source_modifiers(&op[0]);
resolve_source_modifiers(&op[1]);
}
- emit(XOR(result, op[0], op[1]));
+ bld.XOR(result, op[0], op[1]);
break;
case nir_op_ior:
if (devinfo->gen >= 8) {
resolve_source_modifiers(&op[0]);
resolve_source_modifiers(&op[1]);
}
- emit(OR(result, op[0], op[1]));
+ bld.OR(result, op[0], op[1]);
break;
case nir_op_iand:
if (devinfo->gen >= 8) {
resolve_source_modifiers(&op[0]);
resolve_source_modifiers(&op[1]);
}
- emit(AND(result, op[0], op[1]));
+ bld.AND(result, op[0], op[1]);
break;
case nir_op_fdot2:
unreachable("not reached: should be handled by ldexp_to_arith()");
case nir_op_fsqrt:
- inst = emit_math(SHADER_OPCODE_SQRT, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_frsq:
- inst = emit_math(SHADER_OPCODE_RSQ, result, op[0]);
+ inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_b2i:
- emit(AND(result, op[0], fs_reg(1)));
+ bld.AND(result, op[0], fs_reg(1));
break;
case nir_op_b2f:
- emit(AND(retype(result, BRW_REGISTER_TYPE_UD), op[0], fs_reg(0x3f800000u)));
+ bld.AND(retype(result, BRW_REGISTER_TYPE_UD), op[0], fs_reg(0x3f800000u));
break;
case nir_op_f2b:
- emit(CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ));
+ bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
break;
case nir_op_i2b:
- emit(CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
+ bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
break;
case nir_op_ftrunc:
- inst = emit(RNDZ(result, op[0]));
+ inst = bld.RNDZ(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fceil: {
op[0].negate = !op[0].negate;
fs_reg temp = vgrf(glsl_type::float_type);
- emit(RNDD(temp, op[0]));
+ bld.RNDD(temp, op[0]);
temp.negate = true;
- inst = emit(MOV(result, temp));
+ inst = bld.MOV(result, temp);
inst->saturate = instr->dest.saturate;
break;
}
case nir_op_ffloor:
- inst = emit(RNDD(result, op[0]));
+ inst = bld.RNDD(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_ffract:
- inst = emit(FRC(result, op[0]));
+ inst = bld.FRC(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fround_even:
- inst = emit(RNDE(result, op[0]));
+ inst = bld.RNDE(result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_imin:
case nir_op_umin:
if (devinfo->gen >= 6) {
- inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+ inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
inst->conditional_mod = BRW_CONDITIONAL_L;
} else {
- emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_L));
- inst = emit(SEL(result, op[0], op[1]));
+ bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
+ inst = bld.SEL(result, op[0], op[1]);
inst->predicate = BRW_PREDICATE_NORMAL;
}
inst->saturate = instr->dest.saturate;
case nir_op_imax:
case nir_op_umax:
if (devinfo->gen >= 6) {
- inst = emit(BRW_OPCODE_SEL, result, op[0], op[1]);
+ inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
inst->conditional_mod = BRW_CONDITIONAL_GE;
} else {
- emit(CMP(reg_null_d, op[0], op[1], BRW_CONDITIONAL_GE));
- inst = emit(SEL(result, op[0], op[1]));
+ bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
+ inst = bld.SEL(result, op[0], op[1]);
inst->predicate = BRW_PREDICATE_NORMAL;
}
inst->saturate = instr->dest.saturate;
unreachable("not reached: should be handled by lower_packing_builtins");
case nir_op_unpack_half_2x16_split_x:
- inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
+ inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_unpack_half_2x16_split_y:
- inst = emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
+ inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fpow:
- inst = emit_math(SHADER_OPCODE_POW, result, op[0], op[1]);
+ inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_bitfield_reverse:
- emit(BFREV(result, op[0]));
+ bld.BFREV(result, op[0]);
break;
case nir_op_bit_count:
- emit(CBIT(result, op[0]));
+ bld.CBIT(result, op[0]);
break;
case nir_op_ufind_msb:
case nir_op_ifind_msb: {
- emit(FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]));
+ bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
/* FBH counts from the MSB side, while GLSL's findMSB() wants the count
* from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
* subtract the result from 31 to convert the MSB count into an LSB count.
*/
- emit(CMP(reg_null_d, result, fs_reg(-1), BRW_CONDITIONAL_NZ));
+ bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
fs_reg neg_result(result);
neg_result.negate = true;
- inst = emit(ADD(result, neg_result, fs_reg(31)));
+ inst = bld.ADD(result, neg_result, fs_reg(31));
inst->predicate = BRW_PREDICATE_NORMAL;
break;
}
case nir_op_find_lsb:
- emit(FBL(result, op[0]));
+ bld.FBL(result, op[0]);
break;
case nir_op_ubitfield_extract:
case nir_op_ibitfield_extract:
- emit(BFE(result, op[2], op[1], op[0]));
+ bld.BFE(result, op[2], op[1], op[0]);
break;
case nir_op_bfm:
- emit(BFI1(result, op[0], op[1]));
+ bld.BFI1(result, op[0], op[1]);
break;
case nir_op_bfi:
- emit(BFI2(result, op[0], op[1], op[2]));
+ bld.BFI2(result, op[0], op[1], op[2]);
break;
case nir_op_bitfield_insert:
"lower_instructions::bitfield_insert_to_bfm_bfi");
case nir_op_ishl:
- emit(SHL(result, op[0], op[1]));
+ bld.SHL(result, op[0], op[1]);
break;
case nir_op_ishr:
- emit(ASR(result, op[0], op[1]));
+ bld.ASR(result, op[0], op[1]);
break;
case nir_op_ushr:
- emit(SHR(result, op[0], op[1]));
+ bld.SHR(result, op[0], op[1]);
break;
case nir_op_pack_half_2x16_split:
- emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
+ bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
break;
case nir_op_ffma:
- inst = emit(MAD(result, op[2], op[1], op[0]));
+ inst = bld.MAD(result, op[2], op[1], op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_flrp:
- inst = emit_lrp(result, op[0], op[1], op[2]);
+ inst = bld.LRP(result, op[0], op[1], op[2]);
inst->saturate = instr->dest.saturate;
break;
if (optimize_frontfacing_ternary(instr, result))
return;
- emit(CMP(reg_null_d, op[0], fs_reg(0), BRW_CONDITIONAL_NZ));
- inst = emit(SEL(result, op[1], op[2]));
+ bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
+ inst = bld.SEL(result, op[1], op[2]);
inst->predicate = BRW_PREDICATE_NORMAL;
break;
if (devinfo->gen <= 5 &&
(instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
fs_reg masked = vgrf(glsl_type::int_type);
- emit(AND(masked, result, fs_reg(1)));
+ bld.AND(masked, result, fs_reg(1));
masked.negate = true;
- emit(MOV(retype(result, BRW_REGISTER_TYPE_D), masked));
+ bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
}
}
int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
- v->emit(v->MUL(*reg.reladdr, v->get_nir_src(*indirect),
- fs_reg(multiplier)));
+ v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
+ fs_reg(multiplier));
}
return reg;
if (src.is_ssa) {
assert(src.ssa->parent_instr->type == nir_instr_type_load_const);
nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
- fs_reg reg = vgrf(src.ssa->num_components);
- reg.type = BRW_REGISTER_TYPE_D;
+ fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, src.ssa->num_components);
for (unsigned i = 0; i < src.ssa->num_components; ++i)
- emit(MOV(offset(reg, i), fs_reg(load->value.i[i])));
+ bld.MOV(offset(reg, i), fs_reg(load->value.i[i]));
return reg;
} else {
}
void
- fs_visitor::emit_percomp(fs_inst *inst, unsigned wr_mask)
+ fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
+ unsigned wr_mask)
{
for (unsigned i = 0; i < 4; i++) {
if (!((wr_mask >> i) & 1))
continue;
- fs_inst *new_inst = new(mem_ctx) fs_inst(*inst);
+ fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
new_inst->dst = offset(new_inst->dst, i);
for (unsigned j = 0; j < new_inst->sources; j++)
- if (inst->src[j].file == GRF)
+ if (new_inst->src[j].file == GRF)
new_inst->src[j] = offset(new_inst->src[j], i);
- emit(new_inst);
+ bld.emit(new_inst);
}
}
void
- fs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
+ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
{
fs_reg dest;
if (nir_intrinsic_infos[instr->intrinsic].has_dest)
*/
fs_inst *cmp;
if (instr->intrinsic == nir_intrinsic_discard_if) {
- cmp = emit(CMP(reg_null_f, get_nir_src(instr->src[0]),
- fs_reg(0), BRW_CONDITIONAL_Z));
+ cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
+ fs_reg(0), BRW_CONDITIONAL_Z);
} else {
fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
BRW_REGISTER_TYPE_UW));
- cmp = emit(CMP(reg_null_f, some_reg, some_reg, BRW_CONDITIONAL_NZ));
+ cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
}
cmp->predicate = BRW_PREDICATE_NORMAL;
cmp->flag_subreg = 1;
}
case nir_intrinsic_load_front_face:
- emit(MOV(retype(dest, BRW_REGISTER_TYPE_D),
- *emit_frontfacing_interpolation()));
+ bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
+ *emit_frontfacing_interpolation());
break;
case nir_intrinsic_load_vertex_id:
fs_reg vertex_id = nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
assert(vertex_id.file != BAD_FILE);
dest.type = vertex_id.type;
- emit(MOV(dest, vertex_id));
+ bld.MOV(dest, vertex_id);
break;
}
fs_reg base_vertex = nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
assert(base_vertex.file != BAD_FILE);
dest.type = base_vertex.type;
- emit(MOV(dest, base_vertex));
+ bld.MOV(dest, base_vertex);
break;
}
fs_reg instance_id = nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
assert(instance_id.file != BAD_FILE);
dest.type = instance_id.type;
- emit(MOV(dest, instance_id));
+ bld.MOV(dest, instance_id);
break;
}
fs_reg sample_mask_in = nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
assert(sample_mask_in.file != BAD_FILE);
dest.type = sample_mask_in.type;
- emit(MOV(dest, sample_mask_in));
+ bld.MOV(dest, sample_mask_in);
break;
}
fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
assert(sample_pos.file != BAD_FILE);
dest.type = sample_pos.type;
- emit(MOV(dest, sample_pos));
- emit(MOV(offset(dest, 1), offset(sample_pos, 1)));
+ bld.MOV(dest, sample_pos);
+ bld.MOV(offset(dest, 1), offset(sample_pos, 1));
break;
}
fs_reg sample_id = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
assert(sample_id.file != BAD_FILE);
dest.type = sample_id.type;
- emit(MOV(dest, sample_id));
+ bld.MOV(dest, sample_id);
break;
}
index -= num_direct_uniforms;
}
- for (int i = 0; i < instr->const_index[1]; i++) {
- for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg src = offset(retype(uniform_reg, dest.type), index);
- if (has_indirect)
- src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
- index++;
+ for (unsigned j = 0; j < instr->num_components; j++) {
+ fs_reg src = offset(retype(uniform_reg, dest.type), index);
+ if (has_indirect)
+ src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+ index++;
- emit(MOV(dest, src));
- dest = offset(dest, 1);
- }
+ bld.MOV(dest, src);
+ dest = offset(dest, 1);
}
break;
}
fs_reg surf_index;
if (const_index) {
- surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
- const_index->u[0]);
+ uint32_t index = const_index->u[0];
+ uint32_t set = shader->base.UniformBlocks[index].Set;
+ uint32_t binding = shader->base.UniformBlocks[index].Binding;
+
+ /* FIXME: We should probably assert here, but dota2 seems to hit
+ * it and we'd like to keep going.
+ */
+ if (binding >= stage_prog_data->bind_map[set].index_count)
+ binding = 0;
+
+ surf_index = fs_reg(stage_prog_data->bind_map[set].index[binding]);
} else {
+ assert(0 && "need more info from the ir for this.");
/* The block index is not a constant. Evaluate the index expression
* per-channel and add the base UBO index; we have to select a value
* from any live channel.
*/
surf_index = vgrf(glsl_type::uint_type);
- emit(ADD(surf_index, get_nir_src(instr->src[0]),
- fs_reg(stage_prog_data->binding_table.ubo_start)));
- emit_uniformize(surf_index, surf_index);
+ bld.ADD(surf_index, get_nir_src(instr->src[0]),
+ fs_reg(stage_prog_data->binding_table.ubo_start));
+ bld.emit_uniformize(surf_index, surf_index);
/* Assume this may touch any UBO. It would be nice to provide
* a tighter bound, but the array information is already lowered away.
if (has_indirect) {
/* Turn the byte offset into a dword offset. */
fs_reg base_offset = vgrf(glsl_type::int_type);
- emit(SHR(base_offset, retype(get_nir_src(instr->src[1]),
- BRW_REGISTER_TYPE_D),
- fs_reg(2)));
+ bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
+ BRW_REGISTER_TYPE_D),
+ fs_reg(2));
unsigned vec4_offset = instr->const_index[0] / 4;
for (int i = 0; i < instr->num_components; i++)
- emit(VARYING_PULL_CONSTANT_LOAD(offset(dest, i), surf_index,
- base_offset, vec4_offset + i));
+ VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, i), surf_index,
+ base_offset, vec4_offset + i);
} else {
fs_reg packed_consts = vgrf(glsl_type::float_type);
packed_consts.type = dest.type;
fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
- emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
- surf_index, const_offset_reg);
+ bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
+ surf_index, const_offset_reg);
for (unsigned i = 0; i < instr->num_components; i++) {
packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
*/
assert(packed_consts.subreg_offset < 32);
- emit(MOV(dest, packed_consts));
+ bld.MOV(dest, packed_consts);
dest = offset(dest, 1);
}
}
/* fallthrough */
case nir_intrinsic_load_input: {
unsigned index = 0;
- for (int i = 0; i < instr->const_index[1]; i++) {
- for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg src = offset(retype(nir_inputs, dest.type),
- instr->const_index[0] + index);
- if (has_indirect)
- src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
- index++;
-
- emit(MOV(dest, src));
- dest = offset(dest, 1);
- }
+ for (unsigned j = 0; j < instr->num_components; j++) {
+ fs_reg src = offset(retype(nir_inputs, dest.type),
+ instr->const_index[0] + index);
+ if (has_indirect)
+ src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+ index++;
+
+ bld.MOV(dest, src);
+ dest = offset(dest, 1);
}
break;
}
*/
no16("interpolate_at_* not yet supported in SIMD16 mode.");
- fs_reg dst_xy = vgrf(2);
+ fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
/* For most messages, we need one reg of ignored data; the hardware
* requires mlen==1 even when there is no payload. in the per-slot
switch (instr->intrinsic) {
case nir_intrinsic_interp_var_at_centroid:
- inst = emit(FS_OPCODE_INTERPOLATE_AT_CENTROID, dst_xy, src, fs_reg(0u));
+ inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID,
+ dst_xy, src, fs_reg(0u));
break;
case nir_intrinsic_interp_var_at_sample: {
nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
assert(const_sample);
unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
- inst = emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
- fs_reg(msg_data));
+ inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
+ fs_reg(msg_data));
break;
}
unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
- inst = emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
- fs_reg(off_x | (off_y << 4)));
+ inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
+ fs_reg(off_x | (off_y << 4)));
} else {
src = vgrf(glsl_type::ivec2_type);
fs_reg offset_src = retype(get_nir_src(instr->src[0]),
BRW_REGISTER_TYPE_F);
for (int i = 0; i < 2; i++) {
fs_reg temp = vgrf(glsl_type::float_type);
- emit(MUL(temp, offset(offset_src, i), fs_reg(16.0f)));
+ bld.MUL(temp, offset(offset_src, i), fs_reg(16.0f));
fs_reg itemp = vgrf(glsl_type::int_type);
- emit(MOV(itemp, temp)); /* float to int */
+ bld.MOV(itemp, temp); /* float to int */
/* Clamp the upper end of the range to +7/16.
* ARB_gpu_shader5 requires that we support a maximum offset
* implementation-dependent constant
* FRAGMENT_INTERPOLATION_OFFSET_BITS"
*/
-
- emit(BRW_OPCODE_SEL, offset(src, i), itemp, fs_reg(7))
- ->conditional_mod = BRW_CONDITIONAL_L; /* min(src2, 7) */
+ set_condmod(BRW_CONDITIONAL_L,
+ bld.SEL(offset(src, i), itemp, fs_reg(7)));
}
mlen = 2;
- inst = emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
- fs_reg(0u));
+ inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
+ fs_reg(0u));
}
break;
}
fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
src.type = dest.type;
- emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
+ bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
dest = offset(dest, 1);
}
break;
case nir_intrinsic_store_output: {
fs_reg src = get_nir_src(instr->src[0]);
unsigned index = 0;
- for (int i = 0; i < instr->const_index[1]; i++) {
- for (unsigned j = 0; j < instr->num_components; j++) {
- fs_reg new_dest = offset(retype(nir_outputs, src.type),
- instr->const_index[0] + index);
- if (has_indirect)
- src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
- index++;
- emit(MOV(new_dest, src));
- src = offset(src, 1);
- }
+ for (unsigned j = 0; j < instr->num_components; j++) {
+ fs_reg new_dest = offset(retype(nir_outputs, src.type),
+ instr->const_index[0] + index);
+ if (has_indirect)
+ src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
+ index++;
+ bld.MOV(new_dest, src);
+ src = offset(src, 1);
}
break;
}
+ case nir_intrinsic_barrier:
+ emit_barrier();
+ break;
+
default:
unreachable("unknown intrinsic");
}
}
void
- fs_visitor::nir_emit_texture(nir_tex_instr *instr)
+ fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
{
- unsigned sampler = instr->sampler_index;
+ uint32_t set = instr->sampler_set;
+ uint32_t binding = instr->sampler_index;
+
+ assert(binding < stage_prog_data->bind_map[set].index_count);
+ assert(stage_prog_data->bind_map[set].index[binding] < 1000);
+
+ unsigned sampler = stage_prog_data->bind_map[set].index[binding];
fs_reg sampler_reg(sampler);
/* FINISHME: We're failing to recompile our programs when the sampler is
bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
instr->is_array;
- int lod_components = 0, offset_components = 0;
+ int lod_components = 0;
+ int UNUSED offset_components = 0;
fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
/* Emit code to evaluate the actual indexing expression */
sampler_reg = vgrf(glsl_type::uint_type);
- emit(ADD(sampler_reg, src, fs_reg(sampler)));
- emit_uniformize(sampler_reg, sampler_reg);
+ bld.ADD(sampler_reg, src, fs_reg(sampler));
+ bld.emit_uniformize(sampler_reg, sampler_reg);
break;
}
fs_reg dest = get_nir_dest(instr->dest);
dest.type = this->result.type;
unsigned num_components = nir_tex_instr_dest_size(instr);
- emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
+ emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, dest, this->result),
+ (1 << num_components) - 1);
}
void
- fs_visitor::nir_emit_jump(nir_jump_instr *instr)
+ fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
{
switch (instr->type) {
case nir_jump_break:
- emit(BRW_OPCODE_BREAK);
+ bld.emit(BRW_OPCODE_BREAK);
break;
case nir_jump_continue:
- emit(BRW_OPCODE_CONTINUE);
+ bld.emit(BRW_OPCODE_CONTINUE);
break;
case nir_jump_return:
default:
/* Get rid of split copies */
nir_optimize(nir);
- if (shader_prog) {
- nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
- &nir->num_direct_uniforms,
- &nir->num_uniforms);
- } else {
- /* ARB programs generally create a giant array of "uniform" data, and allow
- * indirect addressing without any boundaries. In the absence of bounds
- * analysis, it's all or nothing. num_direct_uniforms is only useful when
- * we have some direct and some indirect access; it doesn't matter here.
- */
- nir->num_direct_uniforms = 0;
- }
+ nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
+ &nir->num_direct_uniforms,
+ &nir->num_uniforms);
nir_assign_var_locations_scalar(&nir->inputs, &nir->num_inputs);
nir_assign_var_locations_scalar(&nir->outputs, &nir->num_outputs);
nir_validate_shader(nir);
if (shader_prog) {
- nir_lower_samplers(nir, shader_prog, stage);
+ nir_lower_samplers_for_vk(nir);
nir_validate_shader(nir);
}
nir_validate_shader(nir);
if (unlikely(debug_enabled)) {
+ /* Re-index SSA defs so we print more sensible numbers. */
+ nir_foreach_overload(nir, overload) {
+ if (overload->impl)
+ nir_index_ssa_defs(overload->impl);
+ }
+
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
_mesa_shader_stage_to_string(stage));
nir_print_shader(nir, stderr);
return NULL;
}
- case MESA_GEOMETRY_PROGRAM: {
+ case GL_GEOMETRY_PROGRAM_NV: {
struct brw_geometry_program *prog = CALLOC_STRUCT(brw_geometry_program);
if (prog) {
prog->id = get_new_program_id(brw->intelScreen);
void brwInitFragProgFuncs( struct dd_function_table *functions )
{
- assert(functions->ProgramStringNotify == _tnl_program_string);
+ /* assert(functions->ProgramStringNotify == _tnl_program_string); */
functions->NewProgram = brwNewProgram;
functions->DeleteProgram = brwDeleteProgram;
functions->MemoryBarrier = brw_memory_barrier;
}
+ struct shader_times {
+ uint64_t time;
+ uint64_t written;
+ uint64_t reset;
+ };
+
void
brw_init_shader_time(struct brw_context *brw)
{
- const int max_entries = 4096;
- brw->shader_time.bo = drm_intel_bo_alloc(brw->bufmgr, "shader time",
- max_entries * SHADER_TIME_STRIDE,
- 4096);
+ const int max_entries = 2048;
+ brw->shader_time.bo =
+ drm_intel_bo_alloc(brw->bufmgr, "shader time",
+ max_entries * SHADER_TIME_STRIDE * 3, 4096);
brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
max_entries);
- brw->shader_time.cumulative = rzalloc_array(brw, uint64_t,
+ brw->shader_time.cumulative = rzalloc_array(brw, struct shader_times,
max_entries);
brw->shader_time.max_entries = max_entries;
}
return 1;
}
- static void
- get_written_and_reset(struct brw_context *brw, int i,
- uint64_t *written, uint64_t *reset)
- {
- enum shader_time_shader_type type = brw->shader_time.types[i];
- assert(type == ST_VS || type == ST_GS || type == ST_FS8 ||
- type == ST_FS16 || type == ST_CS);
-
- /* Find where we recorded written and reset. */
- int wi, ri;
-
- for (wi = i; brw->shader_time.types[wi] != type + 1; wi++)
- ;
-
- for (ri = i; brw->shader_time.types[ri] != type + 2; ri++)
- ;
-
- *written = brw->shader_time.cumulative[wi];
- *reset = brw->shader_time.cumulative[ri];
- }
-
static void
print_shader_time_line(const char *stage, const char *name,
int shader_num, uint64_t time, uint64_t total)
sorted[i] = &scaled[i];
switch (type) {
- case ST_VS_WRITTEN:
- case ST_VS_RESET:
- case ST_GS_WRITTEN:
- case ST_GS_RESET:
- case ST_FS8_WRITTEN:
- case ST_FS8_RESET:
- case ST_FS16_WRITTEN:
- case ST_FS16_RESET:
- case ST_CS_WRITTEN:
- case ST_CS_RESET:
- /* We'll handle these when along with the time. */
- scaled[i] = 0;
- continue;
-
case ST_VS:
case ST_GS:
case ST_FS8:
case ST_FS16:
case ST_CS:
- get_written_and_reset(brw, i, &written, &reset);
+ written = brw->shader_time.cumulative[i].written;
+ reset = brw->shader_time.cumulative[i].reset;
break;
default:
break;
}
- uint64_t time = brw->shader_time.cumulative[i];
+ uint64_t time = brw->shader_time.cumulative[i].time;
if (written) {
scaled[i] = time / written * (written + reset);
} else {
* overhead compared to the cost of tracking the time in the first place.
*/
drm_intel_bo_map(brw->shader_time.bo, true);
-
- uint32_t *times = brw->shader_time.bo->virtual;
+ void *bo_map = brw->shader_time.bo->virtual;
for (int i = 0; i < brw->shader_time.num_entries; i++) {
- brw->shader_time.cumulative[i] += times[i * SHADER_TIME_STRIDE / 4];
+ uint32_t *times = bo_map + i * 3 * SHADER_TIME_STRIDE;
+
+ brw->shader_time.cumulative[i].time += times[SHADER_TIME_STRIDE * 0 / 4];
+ brw->shader_time.cumulative[i].written += times[SHADER_TIME_STRIDE * 1 / 4];
+ brw->shader_time.cumulative[i].reset += times[SHADER_TIME_STRIDE * 2 / 4];
}
/* Zero the BO out to clear it out for our next collection.
*/
- memset(times, 0, brw->shader_time.bo->size);
+ memset(bo_map, 0, brw->shader_time.bo->size);
drm_intel_bo_unmap(brw->shader_time.bo);
}
#include "glsl/glsl_parser_extras.h"
#include "main/shaderapi.h"
+ static void
+ shader_debug_log_mesa(void *data, const char *fmt, ...)
+ {
+ struct brw_context *brw = (struct brw_context *)data;
+ va_list args;
+
+ va_start(args, fmt);
+ GLuint msg_id = 0;
+ _mesa_gl_vdebug(&brw->ctx, &msg_id,
+ MESA_DEBUG_SOURCE_SHADER_COMPILER,
+ MESA_DEBUG_TYPE_OTHER,
+ MESA_DEBUG_SEVERITY_NOTIFICATION, fmt, args);
+ va_end(args);
+ }
+
+ static void
+ shader_perf_log_mesa(void *data, const char *fmt, ...)
+ {
+ struct brw_context *brw = (struct brw_context *)data;
+
+ va_list args;
+ va_start(args, fmt);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
+ va_list args_copy;
+ va_copy(args_copy, args);
+ vfprintf(stderr, fmt, args_copy);
+ va_end(args_copy);
+ }
+
+ if (brw->perf_debug) {
+ GLuint msg_id = 0;
+ _mesa_gl_vdebug(&brw->ctx, &msg_id,
+ MESA_DEBUG_SOURCE_SHADER_COMPILER,
+ MESA_DEBUG_TYPE_PERFORMANCE,
+ MESA_DEBUG_SEVERITY_MEDIUM, fmt, args);
+ }
+ va_end(args);
+ }
+
struct brw_compiler *
brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
{
struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
compiler->devinfo = devinfo;
+ compiler->shader_debug_log = shader_debug_log_mesa;
+ compiler->shader_perf_log = shader_perf_log_mesa;
brw_fs_alloc_reg_sets(compiler);
brw_vec4_alloc_reg_set(compiler);
+ if (devinfo->gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
+ compiler->scalar_vs = true;
+
+ nir_shader_compiler_options *nir_options =
+ rzalloc(compiler, nir_shader_compiler_options);
+ nir_options->native_integers = true;
+ /* In order to help allow for better CSE at the NIR level we tell NIR
+ * to split all ffma instructions during opt_algebraic and we then
+ * re-combine them as a later step.
+ */
+ nir_options->lower_ffma = true;
+ nir_options->lower_sub = true;
+
+ /* We want the GLSL compiler to emit code that uses condition codes */
+ for (int i = 0; i < MESA_SHADER_STAGES; i++) {
+ compiler->glsl_compiler_options[i].MaxUnrollIterations = 32;
+ compiler->glsl_compiler_options[i].MaxIfDepth =
+ devinfo->gen < 6 ? 16 : UINT_MAX;
+
+ compiler->glsl_compiler_options[i].EmitCondCodes = true;
+ compiler->glsl_compiler_options[i].EmitNoNoise = true;
+ compiler->glsl_compiler_options[i].EmitNoMainReturn = true;
+ compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
+ compiler->glsl_compiler_options[i].EmitNoIndirectOutput =
+ (i == MESA_SHADER_FRAGMENT);
+ compiler->glsl_compiler_options[i].EmitNoIndirectTemp =
+ (i == MESA_SHADER_FRAGMENT);
+ compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
+ compiler->glsl_compiler_options[i].LowerClipDistance = true;
+ }
+
+ compiler->glsl_compiler_options[MESA_SHADER_VERTEX].OptimizeForAOS = true;
+ compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].OptimizeForAOS = true;
+
+ if (compiler->scalar_vs) {
+ /* If we're using the scalar backend for vertex shaders, we need to
+ * configure these accordingly.
+ */
+ compiler->glsl_compiler_options[MESA_SHADER_VERTEX].EmitNoIndirectOutput = true;
+ compiler->glsl_compiler_options[MESA_SHADER_VERTEX].EmitNoIndirectTemp = true;
+ compiler->glsl_compiler_options[MESA_SHADER_VERTEX].OptimizeForAOS = false;
+
+ compiler->glsl_compiler_options[MESA_SHADER_VERTEX].NirOptions = nir_options;
+ }
+
+ compiler->glsl_compiler_options[MESA_SHADER_FRAGMENT].NirOptions = nir_options;
+ compiler->glsl_compiler_options[MESA_SHADER_COMPUTE].NirOptions = nir_options;
+
return compiler;
}
case MESA_SHADER_FRAGMENT:
return true;
case MESA_SHADER_VERTEX:
- return brw->scalar_vs;
+ return brw->intelScreen->compiler->scalar_vs;
default:
return false;
}
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
return "gs_ff_sync_set_primitives";
case CS_OPCODE_CS_TERMINATE:
return "cs_terminate";
+ case SHADER_OPCODE_BARRIER:
+ return "barrier";
}
unreachable("not reached");
return false;
}
- backend_visitor::backend_visitor(struct brw_context *brw,
- struct gl_shader_program *shader_prog,
- struct gl_program *prog,
- struct brw_stage_prog_data *stage_prog_data,
- gl_shader_stage stage)
- : brw(brw),
- devinfo(brw->intelScreen->devinfo),
- ctx(&brw->ctx),
+ backend_shader::backend_shader(const struct brw_compiler *compiler,
+ void *log_data,
+ void *mem_ctx,
+ struct gl_shader_program *shader_prog,
+ struct gl_program *prog,
+ struct brw_stage_prog_data *stage_prog_data,
+ gl_shader_stage stage)
+ : compiler(compiler),
+ log_data(log_data),
+ devinfo(compiler->devinfo),
shader(shader_prog ?
(struct brw_shader *)shader_prog->_LinkedShaders[stage] : NULL),
shader_prog(shader_prog),
prog(prog),
stage_prog_data(stage_prog_data),
+ mem_ctx(mem_ctx),
cfg(NULL),
stage(stage)
{
case BRW_OPCODE_LINE:
case BRW_OPCODE_LRP:
case BRW_OPCODE_MAC:
- case BRW_OPCODE_MACH:
case BRW_OPCODE_MAD:
case BRW_OPCODE_MATH:
case BRW_OPCODE_MOV:
case SHADER_OPCODE_MEMORY_FENCE:
case SHADER_OPCODE_URB_WRITE_SIMD8:
case FS_OPCODE_FB_WRITE:
+ case SHADER_OPCODE_BARRIER:
return true;
default:
return false;
}
void
- backend_visitor::dump_instructions()
+ backend_shader::dump_instructions()
{
dump_instructions(NULL);
}
void
- backend_visitor::dump_instructions(const char *name)
+ backend_shader::dump_instructions(const char *name)
{
FILE *file = stderr;
if (name && geteuid() != 0) {
}
void
- backend_visitor::calculate_cfg()
+ backend_shader::calculate_cfg()
{
if (this->cfg)
return;
}
void
- backend_visitor::invalidate_cfg()
+ backend_shader::invalidate_cfg()
{
ralloc_free(this->cfg);
this->cfg = NULL;
* trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
*/
void
- backend_visitor::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
+ backend_shader::assign_common_binding_table_offsets(uint32_t next_binding_table_offset)
{
int num_textures = _mesa_fls(prog->SamplersUsed);
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
unreachable("not reached");
}
* order we'd walk the type, so walk the list of storage and find anything
* with our name, or the prefix of a component that starts with our name.
*/
- for (unsigned u = 0; u < shader_prog->NumUserUniformStorage; u++) {
+ for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
+ if (storage->builtin)
+ continue;
+
if (strncmp(ir->name, storage->name, namelen) != 0 ||
(storage->name[namelen] != 0 &&
storage->name[namelen] != '.' &&
}
void
- vec4_visitor::setup_uniform_clipplane_values()
+ vec4_visitor::setup_uniform_clipplane_values(gl_clip_plane *clip_planes)
{
- gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
-
for (int i = 0; i < key->nr_userclip_plane_consts; ++i) {
assert(this->uniforms < uniform_array_size);
this->uniform_vector_size[this->uniforms] = 4;
new(mem_ctx) vec4_instruction(SHADER_OPCODE_TXF_MCS,
dst_reg(this, glsl_type::uvec4_type));
inst->base_mrf = 2;
- inst->mlen = 1;
inst->src[1] = sampler;
+ int param_base;
+
+ if (devinfo->gen >= 9) {
+ /* Gen9+ needs a message header in order to use SIMD4x2 mode */
+ vec4_instruction *header_inst = new(mem_ctx)
+ vec4_instruction(VS_OPCODE_SET_SIMD4X2_HEADER_GEN9,
+ dst_reg(MRF, inst->base_mrf));
+
+ emit(header_inst);
+
+ inst->mlen = 2;
+ inst->header_size = 1;
+ param_base = inst->base_mrf + 1;
+ } else {
+ inst->mlen = 1;
+ param_base = inst->base_mrf;
+ }
+
/* parameters are: u, v, r, lod; lod will always be zero due to api restrictions */
- int param_base = inst->base_mrf;
int coord_mask = (1 << ir->coordinate->type->vector_elements) - 1;
int zero_mask = 0xf & ~coord_mask;
unreachable("not reached");
}
+ void
+ vec4_visitor::visit(ir_barrier *)
+ {
+ unreachable("not reached");
+ }
+
void
vec4_visitor::emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
dst_reg dst, src_reg offset,
*reg = neg_result;
}
- vec4_visitor::vec4_visitor(struct brw_context *brw,
+ vec4_visitor::vec4_visitor(const struct brw_compiler *compiler,
struct brw_vec4_compile *c,
struct gl_program *prog,
const struct brw_vue_prog_key *key,
gl_shader_stage stage,
void *mem_ctx,
bool no_spills,
- shader_time_shader_type st_base,
- shader_time_shader_type st_written,
- shader_time_shader_type st_reset)
- : backend_visitor(brw, shader_prog, prog, &prog_data->base, stage),
+ int shader_time_index)
+ : backend_shader(compiler, NULL, mem_ctx,
+ shader_prog, prog, &prog_data->base, stage),
c(c),
key(key),
prog_data(prog_data),
first_non_payload_grf(0),
need_all_constants_in_pull_buffer(false),
no_spills(no_spills),
- st_base(st_base),
- st_written(st_written),
- st_reset(st_reset)
+ shader_time_index(shader_time_index)
{
- this->mem_ctx = mem_ctx;
this->failed = false;
this->base_ir = NULL;
#include "main/formats.h"
#include "main/fbobject.h"
#include "main/samplerobj.h"
+ #include "main/framebuffer.h"
#include "program/prog_parameter.h"
#include "program/program.h"
#include "intel_mipmap_tree.h"
* Return a bitfield where bit n is set if barycentric interpolation mode n
* (see enum brw_wm_barycentric_interp_mode) is needed by the fragment shader.
*/
-static unsigned
+unsigned
brw_compute_barycentric_interp_modes(struct brw_context *brw,
bool shade_model_flat,
bool persample_shading,
GLuint lookup = 0;
GLuint line_aa;
bool program_uses_dfdy = fp->program.UsesDFdy;
- bool multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
+ const bool multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;
memset(key, 0, sizeof(*key));
* drawable height in order to invert the Y axis.
*/
if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
- key->drawable_height = ctx->DrawBuffer->Height;
+ key->drawable_height = _mesa_geometric_height(ctx->DrawBuffer);
}
if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
key->persample_shading =
_mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
if (key->persample_shading)
- key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
+ key->persample_2x = _mesa_geometric_samples(ctx->DrawBuffer) == 2;
key->compute_pos_offset =
_mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
{ "urb", DEBUG_URB },
{ "vs", DEBUG_VS },
{ "clip", DEBUG_CLIP },
- { "aub", DEBUG_AUB },
+ { "foob", DEBUG_AUB }, /* disable aub dumbing in the dri driver */
{ "shader_time", DEBUG_SHADER_TIME },
{ "no16", DEBUG_NO16 },
{ "blorp", DEBUG_BLORP },
}
void
- brw_process_intel_debug_variable(struct brw_context *brw)
+ brw_process_intel_debug_variable(struct intel_screen *screen)
{
uint64_t intel_debug = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
(void) p_atomic_cmpxchg(&INTEL_DEBUG, 0, intel_debug);
if (INTEL_DEBUG & DEBUG_BUFMGR)
- dri_bufmgr_set_debug(brw->bufmgr, true);
+ dri_bufmgr_set_debug(screen->bufmgr, true);
- if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && brw->gen < 7) {
+ if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && screen->devinfo->gen < 7) {
fprintf(stderr,
"shader_time debugging requires gen7 (Ivybridge) or better.\n");
INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
}
- if (INTEL_DEBUG & DEBUG_PERF)
- brw->perf_debug = true;
-
if (INTEL_DEBUG & DEBUG_AUB)
- drm_intel_bufmgr_gem_set_aub_dump(brw->bufmgr, true);
+ drm_intel_bufmgr_gem_set_aub_dump(screen->bufmgr, true);
}
/**
ctx->Extensions.EXT_shader_integer_mix = ctx->Const.GLSLVersion >= 130;
ctx->Extensions.EXT_timer_query = true;
- if (brw->gen == 5 || can_write_oacontrol(brw)) {
- ctx->Extensions.AMD_performance_monitor = true;
- ctx->Extensions.INTEL_performance_query = true;
+ if (brw->bufmgr) {
+ if (brw->gen == 5 || can_write_oacontrol(brw)) {
+ ctx->Extensions.AMD_performance_monitor = true;
+ ctx->Extensions.INTEL_performance_query = true;
+ }
}
}
uint64_t dummy;
ctx->Extensions.ARB_blend_func_extended =
+ brw->optionCache.info == NULL ||
!driQueryOptionb(&brw->optionCache, "disable_blend_func_extended");
ctx->Extensions.ARB_conditional_render_inverted = true;
ctx->Extensions.ARB_draw_buffers_blend = true;
ctx->Extensions.OES_depth_texture_cube_map = true;
/* Test if the kernel has the ioctl. */
- if (drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &dummy) == 0)
+ if (brw->bufmgr && drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &dummy) == 0)
ctx->Extensions.ARB_timer_query = true;
/* Only enable this in core profile because other parts of Mesa behave
}
}
+ brw->predicate.supported = false;
+
if (brw->gen >= 7) {
ctx->Extensions.ARB_conservative_depth = true;
ctx->Extensions.ARB_derivative_control = true;
+ ctx->Extensions.ARB_framebuffer_no_attachments = true;
ctx->Extensions.ARB_gpu_shader5 = true;
ctx->Extensions.ARB_shader_atomic_counters = true;
ctx->Extensions.ARB_texture_compression_bptc = true;
ctx->Extensions.ARB_texture_view = true;
- if (can_do_pipelined_register_writes(brw)) {
+ if (brw->bufmgr &&
+ can_do_pipelined_register_writes(brw)) {
ctx->Extensions.ARB_draw_indirect = true;
ctx->Extensions.ARB_transform_feedback2 = true;
ctx->Extensions.ARB_transform_feedback3 = true;
ctx->Extensions.ARB_transform_feedback_instanced = true;
+
+ if (brw->intelScreen->cmd_parser_version >= 2)
+ brw->predicate.supported = true;
}
/* Only enable this in core profile because other parts of Mesa behave
if (ctx->API != API_OPENGL_CORE)
ctx->Extensions.ARB_color_buffer_float = true;
- if (ctx->Mesa_DXTn || driQueryOptionb(&brw->optionCache, "force_s3tc_enable"))
+ if (ctx->Mesa_DXTn ||
+ (brw->optionCache.info != NULL &&
+ driQueryOptionb(&brw->optionCache, "force_s3tc_enable")))
ctx->Extensions.EXT_texture_compression_s3tc = true;
ctx->Extensions.ANGLE_texture_compression_dxt = true;
#include "swrast/s_renderbuffer.h"
#include "util/ralloc.h"
#include "brw_shader.h"
+ #include "glsl/nir/nir.h"
#include "utils.h"
#include "xmlpool.h"
if (!intelScreen->devinfo)
return false;
+ brw_process_intel_debug_variable(intelScreen);
+
intelScreen->hw_must_use_separate_stencil = intelScreen->devinfo->gen >= 7;
intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
(ret != -1 || errno != EINVAL);
}
+ struct drm_i915_getparam getparam;
+ getparam.param = I915_PARAM_CMD_PARSER_VERSION;
+ getparam.value = &intelScreen->cmd_parser_version;
+ const int ret = drmIoctl(psp->fd, DRM_IOCTL_I915_GETPARAM, &getparam);
+ if (ret == -1)
+ intelScreen->cmd_parser_version = 0;
+
psp->extensions = !intelScreen->has_context_reset_notification
? intelScreenExtensions : intelRobustScreenExtensions;
return (const __DRIconfig**) intel_screen_make_configs(psp);
}
+struct intel_screen *
+intel_screen_create(int fd)
+{
+ __DRIscreen *psp;
+ __DRIconfig **configs;
+ int i;
+
+ psp = malloc(sizeof(*psp));
+ if (psp == NULL)
+ return NULL;
+
+ psp->image.loader = (void *) 1; /* Don't complain about this being NULL */
+ psp->fd = fd;
+ psp->dri2.useInvalidate = (void *) 1;
+
+ configs = (__DRIconfig **) intelInitScreen2(psp);
+ for (i = 0; configs[i]; i++)
+ free(configs[i]);
+ free(configs);
+
+ return psp->driverPrivate;
+}
+
+void
+intel_screen_destroy(struct intel_screen *screen)
+{
+ __DRIscreen *psp;
+
+ psp = screen->driScrnPriv;
+ intelDestroyScreen(screen->driScrnPriv);
+ free(psp);
+}
+
+
+struct brw_context *
+intel_context_create(struct intel_screen *screen)
+{
+ __DRIcontext *driContextPriv;
+ struct brw_context *brw;
+ unsigned error;
+
+ driContextPriv = malloc(sizeof(*driContextPriv));
+ if (driContextPriv == NULL)
+ return NULL;
+
+ driContextPriv->driScreenPriv = screen->driScrnPriv;
+
+ brwCreateContext(API_OPENGL_CORE,
+ NULL, /* visual */
+ driContextPriv,
+ 3, 0,
+ 0, /* flags */
+ false, /* notify_reset */
+ &error,
+ NULL);
+
+ brw = driContextPriv->driverPrivate;
+ brw->ctx.FirstTimeCurrent = false;
+
+ return driContextPriv->driverPrivate;
+}
+
+void
+intel_context_destroy(struct brw_context *brw)
+{
+ __DRIcontext *driContextPriv;
+
+ driContextPriv = brw->driContext;
+ intelDestroyContext(driContextPriv);
+ free(driContextPriv);
+}
+
struct intel_buffer {
__DRIbuffer base;
drm_intel_bo *bo;
#include "glapi/glapi.h"
#include "math/m_matrix.h" /* GLmatrix */
#include "glsl/shader_enums.h"
- #include "util/simple_list.h" /* struct simple_node */
#include "main/formats.h" /* MESA_FORMAT_COUNT */
{
GLboolean rgbMode;
GLboolean floatMode;
- GLboolean colorIndexMode; /* XXX is this used anywhere? */
GLuint doubleBufferMode;
GLuint stereoMode;
GLbitfield64 DoubleInputsRead; /**< Bitmask of which input regs are read and are doubles */
GLbitfield64 OutputsWritten; /**< Bitmask of which output regs are written */
GLbitfield SystemValuesRead; /**< Bitmask of SYSTEM_VALUE_x inputs used */
- GLbitfield InputFlags[MAX_PROGRAM_INPUTS]; /**< PROG_PARAM_BIT_x flags */
- GLbitfield OutputFlags[MAX_PROGRAM_OUTPUTS]; /**< PROG_PARAM_BIT_x flags */
GLbitfield TexturesUsed[MAX_COMBINED_TEXTURE_IMAGE_UNITS]; /**< TEXTURE_x_BIT bitmask */
GLbitfield SamplersUsed; /**< Bitfield of which samplers are used */
GLbitfield ShadowSamplers; /**< Texture units used for shadow sampling. */
*/
struct gl_geometry_program_state
{
- GLboolean Enabled; /**< GL_ARB_GEOMETRY_SHADER4 */
- GLboolean _Enabled; /**< Enabled and valid program? */
- struct gl_geometry_program *Current; /**< user-bound geometry program */
-
/** Currently enabled and valid program (including internal programs
* and compiled shader programs).
*/
struct gl_geometry_program *_Current;
-
- GLfloat Parameters[MAX_PROGRAM_ENV_PARAMS][4]; /**< Env params */
};
/**
*/
struct gl_compute_program_state
{
- struct gl_compute_program *Current; /**< user-bound compute program */
-
/** Currently enabled and valid program (including internal programs
* and compiled shader programs).
*/
*/
GLuint Binding;
+ /**
+ * Vulkan descriptor set qualifier for this block.
+ */
+ GLuint Set;
+
/**
* Minimum size (in bytes) of a buffer object to back this uniform buffer
* (GL_UNIFORM_BLOCK_DATA_SIZE).
} Comp;
/* post-link info: */
- unsigned NumUserUniformStorage;
+ unsigned NumUniformStorage;
unsigned NumHiddenUniforms;
struct gl_uniform_storage *UniformStorage;
mtx_t Mutex;
+ GLchar *Label; /**< GL_KHR_debug */
+
/**
* Programs used for rendering
*
struct _mesa_HashTable *Programs; /**< All vertex/fragment programs */
struct gl_vertex_program *DefaultVertexProgram;
struct gl_fragment_program *DefaultFragmentProgram;
- struct gl_geometry_program *DefaultGeometryProgram;
/*@}*/
/* GL_ATI_fragment_shader */
*/
struct gl_config Visual;
- GLuint Width, Height; /**< size of frame buffer in pixels */
+ /**
+ * Size of frame buffer in pixels. If there are no attachments, then both
+ * of these are 0.
+ */
+ GLuint Width, Height;
- /** \name Drawing bounds (Intersection of buffer size and scissor box) */
+ /**
+ * In the case that the framebuffer has no attachment (i.e.
+ * GL_ARB_framebuffer_no_attachments) then the geometry of
+ * the framebuffer is specified by the default values.
+ */
+ struct {
+ GLuint Width, Height, Layers, NumSamples;
+ GLboolean FixedSampleLocations;
+ } DefaultGeometry;
+
+ /** \name Drawing bounds (Intersection of buffer size and scissor box)
+ * The drawing region is given by [_Xmin, _Xmax) x [_Ymin, _Ymax),
+ * (inclusive for _Xmin and _Ymin while exclusive for _Xmax and _Ymax)
+ */
/*@{*/
- GLint _Xmin, _Xmax; /**< inclusive */
- GLint _Ymin, _Ymax; /**< exclusive */
+ GLint _Xmin, _Xmax;
+ GLint _Ymin, _Ymax;
/*@}*/
/** \name Derived Z buffer stuff */
/** One of the GL_FRAMEBUFFER_(IN)COMPLETE_* tokens */
GLenum _Status;
+ /** Whether one of Attachment has Type != GL_NONE
+ * NOTE: the values for Width and Height are set to 0 in case of having
+ * no attachments, a backend driver supporting the extension
+ * GL_ARB_framebuffer_no_attachments must check for the flag _HasAttachments
+ * and if GL_FALSE, must then use the values in DefaultGeometry to initialize
+ * its viewport, scissor and so on (in particular _Xmin, _Xmax, _Ymin and
+ * _Ymax do NOT take into account _HasAttachments being false). To get the
+ * geometry of the framebuffer, the helper functions
+ * _mesa_geometric_width(),
+ * _mesa_geometric_height(),
+ * _mesa_geometric_samples() and
+ * _mesa_geometric_layers()
+ * are available that check _HasAttachments.
+ */
+ bool _HasAttachments;
+
/** Integer color values */
GLboolean _IntegerColor;
/**
* The maximum number of layers in the framebuffer, or 0 if the framebuffer
* is not layered. For cube maps and cube map arrays, each cube face
- * counts as a layer.
+ * counts as a layer. As the case for Width, Height a backend driver
+ * supporting GL_ARB_framebuffer_no_attachments must use DefaultGeometry
+ * in the case that _HasAttachments is false
*/
GLuint MaxNumLayers;
GLuint MaxRenderbufferSize; /**< GL_EXT_framebuffer_object */
GLuint MaxSamples; /**< GL_ARB_framebuffer_object */
+ /**
+ * GL_ARB_framebuffer_no_attachments
+ */
+ GLuint MaxFramebufferWidth;
+ GLuint MaxFramebufferHeight;
+ GLuint MaxFramebufferLayers;
+ GLuint MaxFramebufferSamples;
+
/** Number of varying vectors between any two shader stages. */
GLuint MaxVarying;
GLboolean ARB_fragment_program;
GLboolean ARB_fragment_program_shadow;
GLboolean ARB_fragment_shader;
+ GLboolean ARB_framebuffer_no_attachments;
GLboolean ARB_framebuffer_object;
GLboolean ARB_explicit_attrib_location;
GLboolean ARB_explicit_uniform_location;
DEBUG_INCOMPLETE_FBO = (1 << 3)
};
-
+ static inline bool
+ _mesa_active_fragment_shader_has_atomic_ops(const struct gl_context *ctx)
+ {
+ return ctx->Shader._CurrentFragmentProgram != NULL &&
+ ctx->Shader._CurrentFragmentProgram->NumAtomicBuffers > 0;
+ }
#ifdef __cplusplus
}
virtual void visit(ir_if *);
virtual void visit(ir_emit_vertex *);
virtual void visit(ir_end_primitive *);
+ virtual void visit(ir_barrier *);
/*@}*/
src_reg result;
dst_reg dst, src_reg src0, src_reg src1,
unsigned elements)
{
- static const gl_inst_opcode dot_opcodes[] = {
+ static const enum prog_opcode dot_opcodes[] = {
OPCODE_DP2, OPCODE_DP3, OPCODE_DP4
};
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
assert(!"Invalid type in type_size");
break;
}
assert(!"Geometry shaders not supported.");
}
+ void
+ ir_to_mesa_visitor::visit(ir_barrier *)
+ {
+ unreachable("GLSL barrier() not supported.");
+ }
+
ir_to_mesa_visitor::ir_to_mesa_visitor()
{
result.file = PROGRAM_UNDEFINED;
if (!found)
continue;
+ struct gl_uniform_storage *storage =
+ &shader_program->UniformStorage[location];
+
+ /* Do not associate any uniform storage to built-in uniforms */
+ if (storage->builtin)
+ continue;
+
if (location != last_location) {
- struct gl_uniform_storage *storage =
- &shader_program->UniformStorage[location];
enum gl_uniform_driver_format format = uniform_native;
unsigned columns = 0;
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
assert(!"Should not get here.");
break;
}
mesa_inst->Opcode = inst->op;
mesa_inst->CondUpdate = inst->cond_update;
if (inst->saturate)
- mesa_inst->SaturateMode = SATURATE_ZERO_ONE;
+ mesa_inst->Saturate = GL_TRUE;
mesa_inst->DstReg.File = inst->dst.file;
mesa_inst->DstReg.Index = inst->dst.index;
mesa_inst->DstReg.CondMask = inst->dst.cond_mask;
--- /dev/null
- brw_process_intel_debug_variable(compiler->brw);
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "private.h"
+
+#include <brw_context.h>
+#include <brw_wm.h> /* brw_new_shader_program is here */
+
+#include <brw_vs.h>
+#include <brw_gs.h>
+#include <brw_cs.h>
+
+#include <mesa/main/shaderobj.h>
+#include <mesa/main/fbobject.h>
+#include <mesa/main/context.h>
+#include <mesa/program/program.h>
+#include <glsl/program.h>
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+static void
+fail_if(int cond, const char *format, ...)
+{
+ va_list args;
+
+ if (!cond)
+ return;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+
+ exit(1);
+}
+
+static VkResult
+set_binding_table_layout(struct brw_stage_prog_data *prog_data,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ uint32_t bias, count, k, *map;
+ struct anv_pipeline_layout *layout = pipeline->layout;
+
+ /* No layout is valid for shaders that don't bind any resources. */
+ if (pipeline->layout == NULL)
+ return VK_SUCCESS;
+
+ if (stage == VK_SHADER_STAGE_FRAGMENT)
+ bias = MAX_RTS;
+ else
+ bias = 0;
+
+ count = layout->stage[stage].surface_count;
+ prog_data->map_entries =
+ (uint32_t *) malloc(count * sizeof(prog_data->map_entries[0]));
+ if (prog_data->map_entries == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ k = bias;
+ map = prog_data->map_entries;
+ for (uint32_t i = 0; i < layout->num_sets; i++) {
+ prog_data->bind_map[i].index = map;
+ for (uint32_t j = 0; j < layout->set[i].layout->stage[stage].surface_count; j++)
+ *map++ = k++;
+
+ prog_data->bind_map[i].index_count =
+ layout->set[i].layout->stage[stage].surface_count;
+ }
+
+ return VK_SUCCESS;
+}
+
+static void
+brw_vs_populate_key(struct brw_context *brw,
+ struct brw_vertex_program *vp,
+ struct brw_vs_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ /* BRW_NEW_VERTEX_PROGRAM */
+ struct gl_program *prog = (struct gl_program *) vp;
+
+ memset(key, 0, sizeof(*key));
+
+ /* Just upload the program verbatim for now. Always send it all
+ * the inputs it asks for, whether they are varying or not.
+ */
+ key->base.program_string_id = vp->id;
+ brw_setup_vue_key_clip_info(brw, &key->base,
+ vp->program.Base.UsesClipDistanceOut);
+
+ /* _NEW_POLYGON */
+ if (brw->gen < 6) {
+ key->copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
+ ctx->Polygon.BackMode != GL_FILL);
+ }
+
+ if (prog->OutputsWritten & (VARYING_BIT_COL0 | VARYING_BIT_COL1 |
+ VARYING_BIT_BFC0 | VARYING_BIT_BFC1)) {
+ /* _NEW_LIGHT | _NEW_BUFFERS */
+ key->clamp_vertex_color = ctx->Light._ClampVertexColor;
+ }
+
+ /* _NEW_POINT */
+ if (brw->gen < 6 && ctx->Point.PointSprite) {
+ for (int i = 0; i < 8; i++) {
+ if (ctx->Point.CoordReplace[i])
+ key->point_coord_replace |= (1 << i);
+ }
+ }
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
+ &key->base.tex);
+}
+
+static bool
+really_do_vs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_vertex_program *vp,
+ struct brw_vs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ GLuint program_size;
+ const GLuint *program;
+ struct brw_vs_compile c;
+ struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+ struct brw_stage_prog_data *stage_prog_data = &prog_data->base.base;
+ void *mem_ctx;
+ struct gl_shader *vs = NULL;
+
+ if (prog)
+ vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
+
+ memset(&c, 0, sizeof(c));
+ memcpy(&c.key, key, sizeof(*key));
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ mem_ctx = ralloc_context(NULL);
+
+ c.vp = vp;
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count;
+ if (vs) {
+ /* We add padding around uniform values below vec4 size, with the worst
+ * case being a float value that gets blown up to a vec4, so be
+ * conservative here.
+ */
+ param_count = vs->num_uniform_components * 4;
+
+ } else {
+ param_count = vp->program.Base.Parameters->NumParameters * 4;
+ }
+ /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
+ * planes as uniforms.
+ */
+ param_count += c.key.base.nr_userclip_plane_consts * 4;
+
+ /* Setting nr_params here NOT to the size of the param and pull_param
+ * arrays, but to the number of uniform components vec4_visitor
+ * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
+ */
+ stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
+ if (vs) {
+ stage_prog_data->nr_params += vs->num_samplers;
+ }
+
+ GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
+ prog_data->inputs_read = vp->program.Base.InputsRead;
+
+ if (c.key.copy_edgeflag) {
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
+ prog_data->inputs_read |= VERT_BIT_EDGEFLAG;
+ }
+
+ if (brw->gen < 6) {
+ /* Put dummy slots into the VUE for the SF to put the replaced
+ * point sprite coords in. We shouldn't need these dummy slots,
+ * which take up precious URB space, but it would mean that the SF
+ * doesn't get nice aligned pairs of input coords into output
+ * coords, which would be a pain to handle.
+ */
+ for (int i = 0; i < 8; i++) {
+ if (c.key.point_coord_replace & (1 << i))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
+ }
+
+ /* if back colors are written, allocate slots for front colors too */
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
+ }
+
+ /* In order for legacy clipping to work, we need to populate the clip
+ * distance varying slots whenever clipping is enabled, even if the vertex
+ * shader doesn't write to gl_ClipDistance.
+ */
+ if (c.key.base.userclip_active) {
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
+ }
+
+ brw_compute_vue_map(brw->intelScreen->devinfo,
+ &prog_data->base.vue_map, outputs_written);
+\
+ set_binding_table_layout(&prog_data->base.base, pipeline,
+ VK_SHADER_STAGE_VERTEX);
+
+ /* Emit GEN4 code.
+ */
+ program = brw_vs_emit(brw, prog, &c, prog_data, mem_ctx, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ struct anv_state vs_state = anv_state_stream_alloc(&pipeline->program_stream,
+ program_size, 64);
+ memcpy(vs_state.map, program, program_size);
+
+ pipeline->vs_simd8 = vs_state.offset;
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+void brw_wm_populate_key(struct brw_context *brw,
+ struct brw_fragment_program *fp,
+ struct brw_wm_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct gl_program *prog = (struct gl_program *) brw->fragment_program;
+ GLuint lookup = 0;
+ GLuint line_aa;
+ bool program_uses_dfdy = fp->program.UsesDFdy;
+ struct gl_framebuffer draw_buffer;
+ bool multisample_fbo;
+
+ memset(key, 0, sizeof(*key));
+
+ for (int i = 0; i < MAX_SAMPLERS; i++) {
+ /* Assume color sampler, no swizzling. */
+ key->tex.swizzles[i] = SWIZZLE_XYZW;
+ }
+
+ /* A non-zero framebuffer name indicates that the framebuffer was created by
+ * the user rather than the window system. */
+ draw_buffer.Name = 1;
+ draw_buffer.Visual.samples = 1;
+ draw_buffer._NumColorDrawBuffers = 1;
+ draw_buffer._NumColorDrawBuffers = 1;
+ draw_buffer.Width = 400;
+ draw_buffer.Height = 400;
+ ctx->DrawBuffer = &draw_buffer;
+
+ multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
+
+ /* Build the index for table lookup
+ */
+ if (brw->gen < 6) {
+ /* _NEW_COLOR */
+ if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
+ lookup |= IZ_PS_KILL_ALPHATEST_BIT;
+
+ if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
+
+ /* _NEW_DEPTH */
+ if (ctx->Depth.Test)
+ lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
+
+ if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
+ lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
+
+ /* _NEW_STENCIL | _NEW_BUFFERS */
+ if (ctx->Stencil._Enabled) {
+ lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
+
+ if (ctx->Stencil.WriteMask[0] ||
+ ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
+ lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
+ }
+ key->iz_lookup = lookup;
+ }
+
+ line_aa = AA_NEVER;
+
+ /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
+ if (ctx->Line.SmoothFlag) {
+ if (brw->reduced_primitive == GL_LINES) {
+ line_aa = AA_ALWAYS;
+ }
+ else if (brw->reduced_primitive == GL_TRIANGLES) {
+ if (ctx->Polygon.FrontMode == GL_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if (ctx->Polygon.BackMode == GL_LINE ||
+ (ctx->Polygon.CullFlag &&
+ ctx->Polygon.CullFaceMode == GL_BACK))
+ line_aa = AA_ALWAYS;
+ }
+ else if (ctx->Polygon.BackMode == GL_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if ((ctx->Polygon.CullFlag &&
+ ctx->Polygon.CullFaceMode == GL_FRONT))
+ line_aa = AA_ALWAYS;
+ }
+ }
+ }
+
+ key->line_aa = line_aa;
+
+ /* _NEW_HINT */
+ key->high_quality_derivatives =
+ ctx->Hint.FragmentShaderDerivative == GL_NICEST;
+
+ if (brw->gen < 6)
+ key->stats_wm = brw->stats_wm;
+
+ /* _NEW_LIGHT */
+ key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
+
+ /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
+ key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
+ &key->tex);
+
+ /* _NEW_BUFFERS */
+ /*
+ * Include the draw buffer origin and height so that we can calculate
+ * fragment position values relative to the bottom left of the drawable,
+ * from the incoming screen origin relative position we get as part of our
+ * payload.
+ *
+ * This is only needed for the WM_WPOSXY opcode when the fragment program
+ * uses the gl_FragCoord input.
+ *
+ * We could avoid recompiling by including this as a constant referenced by
+ * our program, but if we were to do that it would also be nice to handle
+ * getting that constant updated at batchbuffer submit time (when we
+ * hold the lock and know where the buffer really is) rather than at emit
+ * time when we don't hold the lock and are just guessing. We could also
+ * just avoid using this as key data if the program doesn't use
+ * fragment.position.
+ *
+ * For DRI2 the origin_x/y will always be (0,0) but we still need the
+ * drawable height in order to invert the Y axis.
+ */
+ if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
+ key->drawable_height = ctx->DrawBuffer->Height;
+ }
+
+ if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
+ key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
+ }
+
+ /* _NEW_BUFFERS */
+ key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
+
+ /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
+ key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
+ (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
+
+ /* _NEW_BUFFERS _NEW_MULTISAMPLE */
+ /* Ignore sample qualifier while computing this flag. */
+ key->persample_shading =
+ _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
+ if (key->persample_shading)
+ key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
+
+ key->compute_pos_offset =
+ _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
+ fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS;
+
+ key->compute_sample_id =
+ multisample_fbo &&
+ ctx->Multisample.Enabled &&
+ (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID);
+
+ /* BRW_NEW_VUE_MAP_GEOM_OUT */
+ if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) > 16)
+ key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
+
+
+ /* _NEW_COLOR | _NEW_BUFFERS */
+ /* Pre-gen6, the hardware alpha test always used each render
+ * target's alpha to do alpha test, as opposed to render target 0's alpha
+ * like GL requires. Fix that by building the alpha test into the
+ * shader, and we'll skip enabling the fixed function alpha test.
+ */
+ if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 && ctx->Color.AlphaEnabled) {
+ key->alpha_test_func = ctx->Color.AlphaFunc;
+ key->alpha_test_ref = ctx->Color.AlphaRef;
+ }
+
+ /* The unique fragment program ID */
+ key->program_string_id = fp->id;
+
+ ctx->DrawBuffer = NULL;
+}
+
+static uint8_t
+computed_depth_mode(struct gl_fragment_program *fp)
+{
+ if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ switch (fp->FragDepthLayout) {
+ case FRAG_DEPTH_LAYOUT_NONE:
+ case FRAG_DEPTH_LAYOUT_ANY:
+ return BRW_PSCDEPTH_ON;
+ case FRAG_DEPTH_LAYOUT_GREATER:
+ return BRW_PSCDEPTH_ON_GE;
+ case FRAG_DEPTH_LAYOUT_LESS:
+ return BRW_PSCDEPTH_ON_LE;
+ case FRAG_DEPTH_LAYOUT_UNCHANGED:
+ return BRW_PSCDEPTH_OFF;
+ }
+ }
+ return BRW_PSCDEPTH_OFF;
+}
+
+static bool
+really_do_wm_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_fragment_program *fp,
+ struct brw_wm_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct gl_context *ctx = &brw->ctx;
+ void *mem_ctx = ralloc_context(NULL);
+ struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
+ struct gl_shader *fs = NULL;
+ unsigned int program_size;
+ const uint32_t *program;
+
+ if (prog)
+ fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ /* key->alpha_test_func means simulating alpha testing via discards,
+ * so the shader definitely kills pixels.
+ */
+ prog_data->uses_kill = fp->program.UsesKill || key->alpha_test_func;
+
+ prog_data->computed_depth_mode = computed_depth_mode(&fp->program);
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count;
+ if (fs) {
+ param_count = fs->num_uniform_components;
+ } else {
+ param_count = fp->program.Base.Parameters->NumParameters * 4;
+ }
+ /* The backend also sometimes adds params for texture size. */
+ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
+ prog_data->base.param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.pull_param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.nr_params = param_count;
+
+ prog_data->barycentric_interp_modes =
+ brw_compute_barycentric_interp_modes(brw, key->flat_shade,
+ key->persample_shading,
+ &fp->program);
+
+ set_binding_table_layout(&prog_data->base, pipeline,
+ VK_SHADER_STAGE_FRAGMENT);
+ /* This needs to come after shader time and pull constant entries, but we
+ * don't have those set up now, so just put it after the layout entries.
+ */
+ prog_data->binding_table.render_target_start = 0;
+
+ program = brw_wm_fs_emit(brw, mem_ctx, key, prog_data,
+ &fp->program, prog, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ struct anv_state ps_state = anv_state_stream_alloc(&pipeline->program_stream,
+ program_size, 64);
+ memcpy(ps_state.map, program, program_size);
+
+ if (prog_data->no_8)
+ pipeline->ps_simd8 = NO_KERNEL;
+ else
+ pipeline->ps_simd8 = ps_state.offset;
+
+ if (prog_data->no_8 || prog_data->prog_offset_16) {
+ pipeline->ps_simd16 = ps_state.offset + prog_data->prog_offset_16;
+ } else {
+ pipeline->ps_simd16 = NO_KERNEL;
+ }
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+static void
+brw_gs_populate_key(struct brw_context *brw,
+ struct anv_pipeline *pipeline,
+ struct brw_geometry_program *gp,
+ struct brw_gs_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct brw_stage_state *stage_state = &brw->gs.base;
+ struct gl_program *prog = &gp->program.Base;
+
+ memset(key, 0, sizeof(*key));
+
+ key->base.program_string_id = gp->id;
+ brw_setup_vue_key_clip_info(brw, &key->base,
+ gp->program.Base.UsesClipDistanceOut);
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
+ &key->base.tex);
+
+ struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+
+ /* BRW_NEW_VUE_MAP_VS */
+ key->input_varyings = prog_data->base.vue_map.slots_valid;
+}
+
+static bool
+really_do_gs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_geometry_program *gp,
+ struct brw_gs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct brw_gs_compile_output output;
+
+ /* FIXME: We pass the bind map to the compile in the output struct. Need
+ * something better. */
+ set_binding_table_layout(&output.prog_data.base.base,
+ pipeline, VK_SHADER_STAGE_GEOMETRY);
+
+ brw_compile_gs_prog(brw, prog, gp, key, &output);
+
+ struct anv_state gs_state = anv_state_stream_alloc(&pipeline->program_stream,
+ output.program_size, 64);
+ memcpy(gs_state.map, output.program, output.program_size);
+
+ pipeline->gs_vec4 = gs_state.offset;
+ pipeline->gs_vertex_count = gp->program.VerticesIn;
+
+ ralloc_free(output.mem_ctx);
+
+ return true;
+}
+
+static bool
+brw_codegen_cs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_compute_program *cp,
+ struct brw_cs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct gl_context *ctx = &brw->ctx;
+ const GLuint *program;
+ void *mem_ctx = ralloc_context(NULL);
+ GLuint program_size;
+ struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+
+ struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
+ assert (cs);
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ set_binding_table_layout(&prog_data->base, pipeline, VK_SHADER_STAGE_COMPUTE);
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count = cs->num_uniform_components;
+
+ /* The backend also sometimes adds params for texture size. */
+ param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
+ prog_data->base.param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.pull_param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.nr_params = param_count;
+
+ program = brw_cs_emit(brw, mem_ctx, key, prog_data,
+ &cp->program, prog, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_CS))
+ fprintf(stderr, "\n");
+
+ struct anv_state cs_state = anv_state_stream_alloc(&pipeline->program_stream,
+ program_size, 64);
+ memcpy(cs_state.map, program, program_size);
+
+ pipeline->cs_simd = cs_state.offset;
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+static void
+brw_cs_populate_key(struct brw_context *brw,
+ struct brw_compute_program *bcp, struct brw_cs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ /* The unique compute program ID */
+ key->program_string_id = bcp->id;
+}
+
+static void
+fail_on_compile_error(int status, const char *msg)
+{
+ int source, line, column;
+ char error[256];
+
+ if (status)
+ return;
+
+ if (sscanf(msg, "%d:%d(%d): error: %255[^\n]", &source, &line, &column, error) == 4)
+ fail_if(!status, "%d:%s\n", line, error);
+ else
+ fail_if(!status, "%s\n", msg);
+}
+
+struct anv_compiler {
+ struct anv_device *device;
+ struct intel_screen *screen;
+ struct brw_context *brw;
+ struct gl_pipeline_object pipeline;
+};
+
+extern "C" {
+
+struct anv_compiler *
+anv_compiler_create(struct anv_device *device)
+{
+ const struct brw_device_info *devinfo = &device->info;
+ struct anv_compiler *compiler;
+ struct gl_context *ctx;
+
+ compiler = rzalloc(NULL, struct anv_compiler);
+ if (compiler == NULL)
+ return NULL;
+
+ compiler->screen = rzalloc(compiler, struct intel_screen);
+ if (compiler->screen == NULL)
+ goto fail;
+
+ compiler->brw = rzalloc(compiler, struct brw_context);
+ if (compiler->brw == NULL)
+ goto fail;
+
+ compiler->device = device;
+
+ compiler->brw->optionCache.info = NULL;
+ compiler->brw->bufmgr = NULL;
+ compiler->brw->gen = devinfo->gen;
+ compiler->brw->is_g4x = devinfo->is_g4x;
+ compiler->brw->is_baytrail = devinfo->is_baytrail;
+ compiler->brw->is_haswell = devinfo->is_haswell;
+ compiler->brw->is_cherryview = devinfo->is_cherryview;
+
+ /* We need this at least for CS, which will check brw->max_cs_threads
+ * against the work group size. */
+ compiler->brw->max_vs_threads = devinfo->max_vs_threads;
+ compiler->brw->max_hs_threads = devinfo->max_hs_threads;
+ compiler->brw->max_ds_threads = devinfo->max_ds_threads;
+ compiler->brw->max_gs_threads = devinfo->max_gs_threads;
+ compiler->brw->max_wm_threads = devinfo->max_wm_threads;
+ compiler->brw->max_cs_threads = devinfo->max_cs_threads;
+ compiler->brw->urb.size = devinfo->urb.size;
+ compiler->brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
+ compiler->brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
+ compiler->brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
+ compiler->brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
+ compiler->brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
+
+ compiler->brw->intelScreen = compiler->screen;
+ compiler->screen->devinfo = &device->info;
+
- if (device->info.gen >= 8 && !(INTEL_DEBUG & DEBUG_VEC4VS))
- compiler->brw->scalar_vs = true;
++ brw_process_intel_debug_variable(compiler->screen);
+
- compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
++ compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
+
+ ctx = &compiler->brw->ctx;
+ _mesa_init_shader_object_functions(&ctx->Driver);
+
+ _mesa_init_constants(&ctx->Const, API_OPENGL_CORE);
+
+ brw_initialize_context_constants(compiler->brw);
+
+ intelInitExtensions(ctx);
+
+ /* Set dd::NewShader */
+ brwInitFragProgFuncs(&ctx->Driver);
+
+ ctx->_Shader = &compiler->pipeline;
+
+ compiler->brw->precompile = false;
+
+ return compiler;
+
+ fail:
+ ralloc_free(compiler);
+ return NULL;
+}
+
+void
+anv_compiler_destroy(struct anv_compiler *compiler)
+{
+ _mesa_free_errors_data(&compiler->brw->ctx);
+ ralloc_free(compiler);
+}
+
+/* From gen7_urb.c */
+
+/* FIXME: Add to struct intel_device_info */
+
+static const int gen8_push_size = 32 * 1024;
+
+static void
+gen7_compute_urb_partition(struct anv_pipeline *pipeline)
+{
+ const struct brw_device_info *devinfo = &pipeline->device->info;
+ bool vs_present = pipeline->vs_simd8 != NO_KERNEL;
+ unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
+ unsigned vs_entry_size_bytes = vs_size * 64;
+ bool gs_present = pipeline->gs_vec4 != NO_KERNEL;
+ unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
+ unsigned gs_entry_size_bytes = gs_size * 64;
+
+ /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+ *
+ * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+ * Allocation Size is less than 9 512-bit URB entries.
+ *
+ * Similar text exists for GS.
+ */
+ unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+ unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+ /* URB allocations must be done in 8k chunks. */
+ unsigned chunk_size_bytes = 8192;
+
+ /* Determine the size of the URB in chunks. */
+ unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
+
+ /* Reserve space for push constants */
+ unsigned push_constant_bytes = gen8_push_size;
+ unsigned push_constant_chunks =
+ push_constant_bytes / chunk_size_bytes;
+
+ /* Initially, assign each stage the minimum amount of URB space it needs,
+ * and make a note of how much additional space it "wants" (the amount of
+ * additional space it could actually make use of).
+ */
+
+ /* VS has a lower limit on the number of URB entries */
+ unsigned vs_chunks =
+ ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ unsigned vs_wants =
+ ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+ unsigned gs_chunks = 0;
+ unsigned gs_wants = 0;
+ if (gs_present) {
+ /* There are two constraints on the minimum amount of URB space we can
+ * allocate:
+ *
+ * (1) We need room for at least 2 URB entries, since we always operate
+ * the GS in DUAL_OBJECT mode.
+ *
+ * (2) We can't allocate less than nr_gs_entries_granularity.
+ */
+ gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ gs_wants =
+ ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+ }
+
+ /* There should always be enough URB space to satisfy the minimum
+ * requirements of each stage.
+ */
+ unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+ assert(total_needs <= urb_chunks);
+
+ /* Mete out remaining space (if any) in proportion to "wants". */
+ unsigned total_wants = vs_wants + gs_wants;
+ unsigned remaining_space = urb_chunks - total_needs;
+ if (remaining_space > total_wants)
+ remaining_space = total_wants;
+ if (remaining_space > 0) {
+ unsigned vs_additional = (unsigned)
+ round(vs_wants * (((double) remaining_space) / total_wants));
+ vs_chunks += vs_additional;
+ remaining_space -= vs_additional;
+ gs_chunks += remaining_space;
+ }
+
+ /* Sanity check that we haven't over-allocated. */
+ assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
+
+ /* Finally, compute the number of entries that can fit in the space
+ * allocated to each stage.
+ */
+ unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+ unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+ /* Since we rounded up when computing *_wants, this may be slightly more
+ * than the maximum allowed amount, so correct for that.
+ */
+ nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
+ nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
+
+ /* Ensure that we program a multiple of the granularity. */
+ nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+ nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+ /* Finally, sanity check to make sure we have at least the minimum number
+ * of entries needed for each stage.
+ */
+ assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
+ if (gs_present)
+ assert(nr_gs_entries >= 2);
+
+ /* Lay out the URB in the following order:
+ * - push constants
+ * - VS
+ * - GS
+ */
+ pipeline->urb.vs_start = push_constant_chunks;
+ pipeline->urb.vs_size = vs_size;
+ pipeline->urb.nr_vs_entries = nr_vs_entries;
+
+ pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
+ pipeline->urb.gs_size = gs_size;
+ pipeline->urb.nr_gs_entries = nr_gs_entries;
+}
+
+static const struct {
+ uint32_t token;
+ const char *name;
+} stage_info[] = {
+ { GL_VERTEX_SHADER, "vertex" },
+ { GL_TESS_CONTROL_SHADER, "tess control" },
+ { GL_TESS_EVALUATION_SHADER, "tess evaluation" },
+ { GL_GEOMETRY_SHADER, "geometry" },
+ { GL_FRAGMENT_SHADER, "fragment" },
+ { GL_COMPUTE_SHADER, "compute" },
+};
+
+struct spirv_header{
+ uint32_t magic;
+ uint32_t version;
+ uint32_t gen_magic;
+};
+
+static const char *
+src_as_glsl(const char *data)
+{
+ const struct spirv_header *as_spirv = (const struct spirv_header *)data;
+
+ /* Check alignment */
+ if ((intptr_t)data & 0x3) {
+ return data;
+ }
+
+ if (as_spirv->magic == SPIR_V_MAGIC_NUMBER) {
+ /* LunarG back-door */
+ if (as_spirv->version == 0)
+ return data + 12;
+ else
+ return NULL;
+ } else {
+ return data;
+ }
+}
+
+static void
+anv_compile_shader_glsl(struct anv_compiler *compiler,
+ struct gl_shader_program *program,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ struct brw_context *brw = compiler->brw;
+ struct gl_shader *shader;
+ int name = 0;
+
+ shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
+ fail_if(shader == NULL, "failed to create %s shader\n", stage_info[stage].name);
+
+ shader->Source = strdup(src_as_glsl(pipeline->shaders[stage]->data));
+ _mesa_glsl_compile_shader(&brw->ctx, shader, false, false);
+ fail_on_compile_error(shader->CompileStatus, shader->InfoLog);
+
+ program->Shaders[program->NumShaders] = shader;
+ program->NumShaders++;
+}
+
+static void
+anv_compile_shader_spirv(struct anv_compiler *compiler,
+ struct gl_shader_program *program,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ unreachable("SPIR-V is not supported yet!");
+}
+
+static void
+add_compiled_stage(struct anv_pipeline *pipeline, uint32_t stage,
+ struct brw_stage_prog_data *prog_data)
+{
+ struct brw_device_info *devinfo = &pipeline->device->info;
+ uint32_t max_threads[] = {
+ [VK_SHADER_STAGE_VERTEX] = devinfo->max_vs_threads,
+ [VK_SHADER_STAGE_TESS_CONTROL] = 0,
+ [VK_SHADER_STAGE_TESS_EVALUATION] = 0,
+ [VK_SHADER_STAGE_GEOMETRY] = devinfo->max_gs_threads,
+ [VK_SHADER_STAGE_FRAGMENT] = devinfo->max_wm_threads,
+ [VK_SHADER_STAGE_COMPUTE] = devinfo->max_cs_threads,
+ };
+
+ pipeline->prog_data[stage] = prog_data;
+ pipeline->active_stages |= 1 << stage;
+ pipeline->scratch_start[stage] = pipeline->total_scratch;
+ pipeline->total_scratch =
+ ALIGN_U32(pipeline->total_scratch, 1024) +
+ prog_data->total_scratch * max_threads[stage];
+}
+
+int
+anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline)
+{
+ struct gl_shader_program *program;
+ int name = 0;
+ struct brw_context *brw = compiler->brw;
+
+ /* When we free the pipeline, we detect stages based on the NULL status
+ * of various prog_data pointers. Make them NULL by default.
+ */
+ memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+ memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
+
+ brw->use_rep_send = pipeline->use_repclear;
+ brw->no_simd8 = pipeline->use_repclear;
+
+ program = brw->ctx.Driver.NewShaderProgram(name);
+ program->Shaders = (struct gl_shader **)
+ calloc(VK_NUM_SHADER_STAGE, sizeof(struct gl_shader *));
+ fail_if(program == NULL || program->Shaders == NULL,
+ "failed to create program\n");
+
+ bool all_spirv = true;
+ for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
+ if (pipeline->shaders[i] == NULL)
+ continue;
+
+ /* You need at least this much for "void main() { }" anyway */
+ assert(pipeline->shaders[i]->size >= 12);
+
+ if (src_as_glsl(pipeline->shaders[i]->data)) {
+ all_spirv = false;
+ break;
+ }
+
+ assert(pipeline->shaders[i]->size % 4 == 0);
+ }
+
+ if (all_spirv) {
+ for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
+ if (pipeline->shaders[i])
+ anv_compile_shader_spirv(compiler, program, pipeline, i);
+ }
+
+ /* TODO: nir_link_shader? */
+ } else {
+ for (unsigned i = 0; i < VK_NUM_SHADER_STAGE; i++) {
+ if (pipeline->shaders[i])
+ anv_compile_shader_glsl(compiler, program, pipeline, i);
+ }
+
+ _mesa_glsl_link_shader(&brw->ctx, program);
+ fail_on_compile_error(program->LinkStatus,
+ program->InfoLog);
+ }
+
+ bool success;
+ pipeline->active_stages = 0;
+ pipeline->total_scratch = 0;
+
+ if (pipeline->shaders[VK_SHADER_STAGE_VERTEX]) {
+ struct brw_vs_prog_key vs_key;
+ struct gl_vertex_program *vp = (struct gl_vertex_program *)
+ program->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
+ struct brw_vertex_program *bvp = brw_vertex_program(vp);
+
+ brw_vs_populate_key(brw, bvp, &vs_key);
+
+ success = really_do_vs_prog(brw, program, bvp, &vs_key, pipeline);
+ fail_if(!success, "do_wm_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
+ &pipeline->vs_prog_data.base.base);
+ } else {
+ memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
+ pipeline->vs_simd8 = NO_KERNEL;
+ }
+
+
+ if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) {
+ struct brw_gs_prog_key gs_key;
+ struct gl_geometry_program *gp = (struct gl_geometry_program *)
+ program->_LinkedShaders[MESA_SHADER_GEOMETRY]->Program;
+ struct brw_geometry_program *bgp = brw_geometry_program(gp);
+
+ brw_gs_populate_key(brw, pipeline, bgp, &gs_key);
+
+ success = really_do_gs_prog(brw, program, bgp, &gs_key, pipeline);
+ fail_if(!success, "do_gs_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY,
+ &pipeline->gs_prog_data.base.base);
+ } else {
+ pipeline->gs_vec4 = NO_KERNEL;
+ }
+
+ if (pipeline->shaders[VK_SHADER_STAGE_FRAGMENT]) {
+ struct brw_wm_prog_key wm_key;
+ struct gl_fragment_program *fp = (struct gl_fragment_program *)
+ program->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program;
+ struct brw_fragment_program *bfp = brw_fragment_program(fp);
+
+ brw_wm_populate_key(brw, bfp, &wm_key);
+
+ success = really_do_wm_prog(brw, program, bfp, &wm_key, pipeline);
+ fail_if(!success, "do_wm_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
+ &pipeline->wm_prog_data.base);
+ }
+
+ if (pipeline->shaders[VK_SHADER_STAGE_COMPUTE]) {
+ struct brw_cs_prog_key cs_key;
+ struct gl_compute_program *cp = (struct gl_compute_program *)
+ program->_LinkedShaders[MESA_SHADER_COMPUTE]->Program;
+ struct brw_compute_program *bcp = brw_compute_program(cp);
+
+ brw_cs_populate_key(brw, bcp, &cs_key);
+
+ success = brw_codegen_cs_prog(brw, program, bcp, &cs_key, pipeline);
+ fail_if(!success, "brw_codegen_cs_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
+ &pipeline->cs_prog_data.base);
+ }
+
+ brw->ctx.Driver.DeleteShaderProgram(&brw->ctx, program);
+
+ struct anv_device *device = compiler->device;
+ while (device->scratch_block_pool.bo.size < pipeline->total_scratch)
+ anv_block_pool_alloc(&device->scratch_block_pool);
+
+ gen7_compute_urb_partition(pipeline);
+
+ return 0;
+}
+
+/* This badly named function frees the struct anv_pipeline data that the compiler
+ * allocates. Currently just the prog_data structs.
+ */
+void
+anv_compiler_free(struct anv_pipeline *pipeline)
+{
+ for (uint32_t stage = 0; stage < VK_NUM_SHADER_STAGE; stage++) {
+ if (pipeline->prog_data[stage]) {
+ free(pipeline->prog_data[stage]->map_entries);
+ ralloc_free(pipeline->prog_data[stage]->param);
+ ralloc_free(pipeline->prog_data[stage]->pull_param);
+ }
+ }
+}
+
+}