--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2014 Ilia Mirkin. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/imports.h"
+#include "program/prog_parameter.h"
+#include "program/prog_print.h"
+#include "compiler/glsl/ir_uniform.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_inlines.h"
+#include "util/u_surface.h"
+
+#include "st_debug.h"
+#include "st_cb_bufferobjects.h"
+#include "st_context.h"
+#include "st_atom.h"
+#include "st_program.h"
+
+static void
+st_bind_ssbos(struct st_context *st, struct gl_shader *shader,
+ unsigned shader_type)
+{
+ unsigned i;
+ struct pipe_shader_buffer buffers[MAX_SHADER_STORAGE_BUFFERS];
+ struct gl_program_constants *c = &st->ctx->Const.Program[shader->Stage];
+
+ if (!shader || !st->pipe->set_shader_buffers)
+ return;
+
+ for (i = 0; i < shader->NumShaderStorageBlocks; i++) {
+ struct gl_shader_storage_buffer_binding *binding;
+ struct st_buffer_object *st_obj;
+ struct pipe_shader_buffer *sb = &buffers[i];
+
+ binding = &st->ctx->ShaderStorageBufferBindings[
+ shader->ShaderStorageBlocks[i]->Binding];
+ st_obj = st_buffer_object(binding->BufferObject);
+
+ sb->buffer = st_obj->buffer;
+
+ if (sb->buffer) {
+ sb->buffer_offset = binding->Offset;
+ sb->buffer_size = sb->buffer->width0 - binding->Offset;
+
+ /* AutomaticSize is FALSE if the buffer was set with BindBufferRange.
+ * Take the minimum just to be sure.
+ */
+ if (!binding->AutomaticSize)
+ sb->buffer_size = MIN2(sb->buffer_size, (unsigned) binding->Size);
+ }
+ else {
+ sb->buffer_offset = 0;
+ sb->buffer_size = 0;
+ }
+ }
+ st->pipe->set_shader_buffers(st->pipe, shader_type, c->MaxAtomicBuffers,
+ shader->NumShaderStorageBlocks, buffers);
+ /* clear out any stale shader buffers */
+ if (shader->NumShaderStorageBlocks < c->MaxShaderStorageBlocks)
+ st->pipe->set_shader_buffers(
+ st->pipe, shader_type,
+ c->MaxAtomicBuffers + shader->NumShaderStorageBlocks,
+ c->MaxShaderStorageBlocks - shader->NumShaderStorageBlocks,
+ NULL);
+}
+
+static void bind_vs_ssbos(struct st_context *st)
+{
+ struct gl_shader_program *prog =
+ st->ctx->_Shader->CurrentProgram[MESA_SHADER_VERTEX];
+
+ if (!prog)
+ return;
+
+ st_bind_ssbos(st, prog->_LinkedShaders[MESA_SHADER_VERTEX],
+ PIPE_SHADER_VERTEX);
+}
+
+const struct st_tracked_state st_bind_vs_ssbos = {
+ "st_bind_vs_ssbos",
+ {
+ 0,
+ ST_NEW_VERTEX_PROGRAM | ST_NEW_STORAGE_BUFFER,
+ },
+ bind_vs_ssbos
+};
+
+static void bind_fs_ssbos(struct st_context *st)
+{
+ struct gl_shader_program *prog =
+ st->ctx->_Shader->CurrentProgram[MESA_SHADER_FRAGMENT];
+
+ if (!prog)
+ return;
+
+ st_bind_ssbos(st, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
+ PIPE_SHADER_FRAGMENT);
+}
+
+const struct st_tracked_state st_bind_fs_ssbos = {
+ "st_bind_fs_ssbos",
+ {
+ 0,
+ ST_NEW_FRAGMENT_PROGRAM | ST_NEW_STORAGE_BUFFER,
+ },
+ bind_fs_ssbos
+};
+
+static void bind_gs_ssbos(struct st_context *st)
+{
+ struct gl_shader_program *prog =
+ st->ctx->_Shader->CurrentProgram[MESA_SHADER_GEOMETRY];
+
+ if (!prog)
+ return;
+
+ st_bind_ssbos(st, prog->_LinkedShaders[MESA_SHADER_GEOMETRY],
+ PIPE_SHADER_GEOMETRY);
+}
+
+const struct st_tracked_state st_bind_gs_ssbos = {
+ "st_bind_gs_ssbos",
+ {
+ 0,
+ ST_NEW_GEOMETRY_PROGRAM | ST_NEW_STORAGE_BUFFER,
+ },
+ bind_gs_ssbos
+};
+
+static void bind_tcs_ssbos(struct st_context *st)
+{
+ struct gl_shader_program *prog =
+ st->ctx->_Shader->CurrentProgram[MESA_SHADER_TESS_CTRL];
+
+ if (!prog)
+ return;
+
+ st_bind_ssbos(st, prog->_LinkedShaders[MESA_SHADER_TESS_CTRL],
+ PIPE_SHADER_TESS_CTRL);
+}
+
+const struct st_tracked_state st_bind_tcs_ssbos = {
+ "st_bind_tcs_ssbos",
+ {
+ 0,
+ ST_NEW_TESSCTRL_PROGRAM | ST_NEW_STORAGE_BUFFER,
+ },
+ bind_tcs_ssbos
+};
+
+static void bind_tes_ssbos(struct st_context *st)
+{
+ struct gl_shader_program *prog =
+ st->ctx->_Shader->CurrentProgram[MESA_SHADER_TESS_EVAL];
+
+ if (!prog)
+ return;
+
+ st_bind_ssbos(st, prog->_LinkedShaders[MESA_SHADER_TESS_EVAL],
+ PIPE_SHADER_TESS_EVAL);
+}
+
+const struct st_tracked_state st_bind_tes_ssbos = {
+ "st_bind_tes_ssbos",
+ {
+ 0,
+ ST_NEW_TESSEVAL_PROGRAM | ST_NEW_STORAGE_BUFFER,
+ },
+ bind_tes_ssbos
+};
int dead_mask; /**< Used in dead code elimination */
st_src_reg buffer; /**< buffer register */
+ unsigned buffer_access; /**< buffer access type */
class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */
const struct tgsi_opcode_info *info;
/*@}*/
void visit_atomic_counter_intrinsic(ir_call *);
+ void visit_ssbo_intrinsic(ir_call *);
st_src_reg result;
}
}
- this->instructions.push_tail(inst);
-
/*
* This section contains the double processing.
* GLSL just represents doubles as single channel values,
int initial_src_swz[4], initial_src_idx[4];
int initial_dst_idx[2], initial_dst_writemask[2];
/* select the writemask for dst0 or dst1 */
- unsigned writemask = inst->dst[0].file == PROGRAM_UNDEFINED ? inst->dst[1].writemask : inst->dst[0].writemask;
+ unsigned writemask = inst->dst[1].file == PROGRAM_UNDEFINED ? inst->dst[0].writemask : inst->dst[1].writemask;
/* copy out the writemask, index and swizzles for all src/dsts. */
for (j = 0; j < 2; j++) {
* scan all the components in the dst writemask
* generate an instruction for each of them if required.
*/
+ st_src_reg addr;
while (writemask) {
int i = u_bit_scan(&writemask);
+ /* before emitting the instruction, see if we have to adjust store
+ * address */
+ if (i > 1 && inst->op == TGSI_OPCODE_STORE &&
+ addr.file == PROGRAM_UNDEFINED) {
+ /* We have to advance the buffer address by 16 */
+ addr = get_temp(glsl_type::uint_type);
+ emit_asm(ir, TGSI_OPCODE_UADD, st_dst_reg(addr),
+ inst->src[0], st_src_reg_for_int(16));
+ }
+
+
/* first time use previous instruction */
if (dinst == NULL) {
dinst = inst;
*dinst = *inst;
dinst->next = NULL;
dinst->prev = NULL;
- this->instructions.push_tail(dinst);
}
+ this->instructions.push_tail(dinst);
/* modify the destination if we are splitting */
for (j = 0; j < 2; j++) {
if (dst_is_double[j]) {
dinst->dst[j].writemask = (i & 1) ? WRITEMASK_ZW : WRITEMASK_XY;
dinst->dst[j].index = initial_dst_idx[j];
- if (i > 1)
+ if (i > 1) {
+ if (dinst->op == TGSI_OPCODE_STORE) {
+ dinst->src[0] = addr;
+ } else {
dinst->dst[j].index++;
+ }
+ }
} else {
/* if we aren't writing to a double, just get the bit of the initial writemask
for this channel */
}
}
inst = dinst;
+ } else {
+ this->instructions.push_tail(inst);
}
assert(src1.type != GLSL_TYPE_ARRAY);
assert(src1.type != GLSL_TYPE_STRUCT);
- if (src0.type == GLSL_TYPE_DOUBLE || src1.type == GLSL_TYPE_DOUBLE)
+ if (is_resource_instruction(op))
+ type = src1.type;
+ else if (src0.type == GLSL_TYPE_DOUBLE || src1.type == GLSL_TYPE_DOUBLE)
type = GLSL_TYPE_DOUBLE;
else if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
type = GLSL_TYPE_FLOAT;
case3fid(FLR, FLR, DFLR);
case3fid(ROUND, ROUND, DROUND);
+ case2iu(ATOMIMAX, ATOMUMAX);
+ case2iu(ATOMIMIN, ATOMUMIN);
+
default: break;
}
}
}
+void
+glsl_to_tgsi_visitor::visit_ssbo_intrinsic(ir_call *ir)
+{
+ const char *callee = ir->callee->function_name();
+ exec_node *param = ir->actual_parameters.get_head();
+
+ ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
+
+ param = param->get_next();
+ ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
+
+ ir_constant *const_block = block->as_constant();
+
+ st_src_reg buffer(
+ PROGRAM_BUFFER,
+ ctx->Const.Program[shader->Stage].MaxAtomicBuffers +
+ (const_block ? const_block->value.u[0] : 0),
+ GLSL_TYPE_UINT);
+
+ if (!const_block) {
+ block->accept(this);
+ emit_arl(ir, sampler_reladdr, this->result);
+ buffer.reladdr = ralloc(mem_ctx, st_src_reg);
+ memcpy(buffer.reladdr, &sampler_reladdr, sizeof(sampler_reladdr));
+ }
+
+ /* Calculate the surface offset */
+ offset->accept(this);
+ st_src_reg off = this->result;
+
+ st_dst_reg dst = undef_dst;
+ if (ir->return_deref) {
+ ir->return_deref->accept(this);
+ dst = st_dst_reg(this->result);
+ dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
+ }
+
+ glsl_to_tgsi_instruction *inst;
+
+ if (!strcmp("__intrinsic_load_ssbo", callee)) {
+ inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, off);
+ if (dst.type == GLSL_TYPE_BOOL)
+ emit_asm(ir, TGSI_OPCODE_USNE, dst, st_src_reg(dst), st_src_reg_for_int(0));
+ } else if (!strcmp("__intrinsic_store_ssbo", callee)) {
+ param = param->get_next();
+ ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
+ val->accept(this);
+
+ param = param->get_next();
+ ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
+ assert(write_mask);
+ dst.writemask = write_mask->value.u[0];
+
+ dst.type = this->result.type;
+ inst = emit_asm(ir, TGSI_OPCODE_STORE, dst, off, this->result);
+ } else {
+ param = param->get_next();
+ ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
+ val->accept(this);
+
+ st_src_reg data = this->result, data2 = undef_src;
+ unsigned opcode;
+ if (!strcmp("__intrinsic_atomic_add_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMUADD;
+ else if (!strcmp("__intrinsic_atomic_min_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMIMIN;
+ else if (!strcmp("__intrinsic_atomic_max_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMIMAX;
+ else if (!strcmp("__intrinsic_atomic_and_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMAND;
+ else if (!strcmp("__intrinsic_atomic_or_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMOR;
+ else if (!strcmp("__intrinsic_atomic_xor_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMXOR;
+ else if (!strcmp("__intrinsic_atomic_exchange_ssbo", callee))
+ opcode = TGSI_OPCODE_ATOMXCHG;
+ else if (!strcmp("__intrinsic_atomic_comp_swap_ssbo", callee)) {
+ opcode = TGSI_OPCODE_ATOMCAS;
+ param = param->get_next();
+ val = ((ir_instruction *)param)->as_rvalue();
+ val->accept(this);
+ data2 = this->result;
+ } else {
+ assert(!"Unexpected intrinsic");
+ return;
+ }
+
+ inst = emit_asm(ir, opcode, dst, off, data, data2);
+ }
+
+ param = param->get_next();
+ ir_constant *access = NULL;
+ if (!param->is_tail_sentinel()) {
+ access = ((ir_instruction *)param)->as_constant();
+ assert(access);
+ }
+
+ /* The emit_asm() might have actually split the op into pieces, e.g. for
+ * double stores. We have to go back and fix up all the generated ops.
+ */
+ unsigned op = inst->op;
+ do {
+ inst->buffer = buffer;
+ if (access)
+ inst->buffer_access = access->value.u[0];
+ inst = (glsl_to_tgsi_instruction *)inst->get_prev();
+ if (inst->op == TGSI_OPCODE_UADD)
+ inst = (glsl_to_tgsi_instruction *)inst->get_prev();
+ } while (inst && inst->buffer.file == PROGRAM_UNDEFINED && inst->op == op);
+}
+
void
glsl_to_tgsi_visitor::visit(ir_call *ir)
{
return;
}
+ if (!strcmp("__intrinsic_load_ssbo", callee) ||
+ !strcmp("__intrinsic_store_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_add_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_min_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_max_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_and_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_or_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_xor_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_exchange_ssbo", callee) ||
+ !strcmp("__intrinsic_atomic_comp_swap_ssbo", callee)) {
+ visit_ssbo_intrinsic(ir);
+ return;
+ }
+
entry = get_function_signature(sig);
/* Process in parameters. */
foreach_two_lists(formal_node, &sig->parameters,
src[0] = t->buffers[inst->buffer.index];
if (inst->buffer.reladdr)
src[0] = ureg_src_indirect(src[0], ureg_src(t->address[2]));
- ureg_insn(ureg, inst->op, dst, num_dst, src, num_src);
+ assert(src[0].File != TGSI_FILE_NULL);
+ ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
+ inst->buffer_access);
+ break;
+
+ case TGSI_OPCODE_STORE:
+ dst[0] = ureg_writemask(ureg_dst(t->buffers[inst->buffer.index]), inst->dst[0].writemask);
+ if (inst->buffer.reladdr)
+ dst[0] = ureg_dst_indirect(dst[0], ureg_src(t->address[2]));
+ assert(dst[0].File != TGSI_FILE_NULL);
+ ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
+ inst->buffer_access);
break;
case TGSI_OPCODE_SCS:
}
}
+ for (; i < frag_const->MaxAtomicBuffers + frag_const->MaxShaderStorageBlocks;
+ i++) {
+ if (program->buffers_used & (1 << i)) {
+ t->buffers[i] = ureg_DECL_buffer(ureg, i, false);
+ }
+ }
+
+
+
/* Emit each instruction in turn:
*/
foreach_in_list(glsl_to_tgsi_instruction, inst, &program->instructions) {