r600/sfn: Fix null pointer deref in live range evalation
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
index 7880d0f723b2c36a5a7f4fea45d7d32f4f514df7..98d4b97d7fc00014d5bf78761ae8a00f0a842299 100644 (file)
@@ -84,6 +84,25 @@ writable images will consume TEX slots, VTX slots too because of linear indexing
 
 */
 
+#ifdef HAVE_OPENCL
+static void radeon_shader_binary_init(struct r600_shader_binary *b)
+{
+       memset(b, 0, sizeof(*b));
+}
+
+static void radeon_shader_binary_clean(struct r600_shader_binary *b)
+{
+       if (!b)
+               return;
+       FREE(b->code);
+       FREE(b->config);
+       FREE(b->rodata);
+       FREE(b->global_symbol_offsets);
+       FREE(b->relocs);
+       FREE(b->disasm_string);
+}
+#endif
+
 struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
                                                     unsigned size)
 {
@@ -122,7 +141,8 @@ static void evergreen_set_rat(struct r600_pipe_compute *pipe,
        rat_templ.u.tex.first_layer = 0;
        rat_templ.u.tex.last_layer = 0;
 
-       /* Add the RAT the list of color buffers */
+       /* Add the RAT the list of color buffers. Drop the old buffer first. */
+       pipe_surface_reference(&pipe->ctx->framebuffer.state.cbufs[id], NULL);
        pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
                (struct pipe_context *)pipe->ctx,
                (struct pipe_resource *)bo, &rat_templ);
@@ -185,7 +205,7 @@ static void evergreen_cs_set_constant_buffer(struct r600_context *rctx,
 #ifdef HAVE_OPENCL
 static void parse_symbol_table(Elf_Data *symbol_table_data,
                                const GElf_Shdr *symbol_table_header,
-                               struct ac_shader_binary *binary)
+                               struct r600_shader_binary *binary)
 {
        GElf_Sym symbol;
        unsigned i = 0;
@@ -229,7 +249,7 @@ static void parse_symbol_table(Elf_Data *symbol_table_data,
 
 static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
                        unsigned symbol_sh_link,
-                       struct ac_shader_binary *binary)
+                       struct r600_shader_binary *binary)
 {
        unsigned i;
 
@@ -237,12 +257,12 @@ static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
                return;
        }
        binary->relocs = CALLOC(binary->reloc_count,
-                       sizeof(struct ac_shader_reloc));
+                       sizeof(struct r600_shader_reloc));
        for (i = 0; i < binary->reloc_count; i++) {
                GElf_Sym symbol;
                GElf_Rel rel;
                char *symbol_name;
-               struct ac_shader_reloc *reloc = &binary->relocs[i];
+               struct r600_shader_reloc *reloc = &binary->relocs[i];
 
                gelf_getrel(relocs, i, &rel);
                gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &symbol);
@@ -255,7 +275,7 @@ static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
 }
 
 static void r600_elf_read(const char *elf_data, unsigned elf_size,
-                struct ac_shader_binary *binary)
+                struct r600_shader_binary *binary)
 {
        char *elf_buffer;
        Elf *elf;
@@ -334,7 +354,7 @@ static void r600_elf_read(const char *elf_data, unsigned elf_size,
 }
 
 static const unsigned char *r600_shader_binary_config_start(
-       const struct ac_shader_binary *binary,
+       const struct r600_shader_binary *binary,
        uint64_t symbol_offset)
 {
        unsigned i;
@@ -347,7 +367,7 @@ static const unsigned char *r600_shader_binary_config_start(
        return binary->config;
 }
 
-static void r600_shader_binary_read_config(const struct ac_shader_binary *binary,
+static void r600_shader_binary_read_config(const struct r600_shader_binary *binary,
                                           struct r600_bytecode *bc,
                                           uint64_t symbol_offset,
                                           boolean *use_kill)
@@ -383,7 +403,7 @@ static void r600_shader_binary_read_config(const struct ac_shader_binary *binary
 }
 
 static unsigned r600_create_shader(struct r600_bytecode *bc,
-                                  const struct ac_shader_binary *binary,
+                                  const struct r600_shader_binary *binary,
                                   boolean *use_kill)
 
 {
@@ -409,8 +429,7 @@ static void *evergreen_create_compute_state(struct pipe_context *ctx,
        struct r600_context *rctx = (struct r600_context *)ctx;
        struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
 #ifdef HAVE_OPENCL
-       const struct pipe_llvm_program_header *header;
-       const char *code;
+       const struct pipe_binary_program_header *header;
        void *p;
        boolean use_kill;
 #endif
@@ -422,22 +441,24 @@ static void *evergreen_create_compute_state(struct pipe_context *ctx,
 
        shader->ir_type = cso->ir_type;
 
-       if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
-               shader->sel = r600_create_shader_state_tokens(ctx, cso->prog, PIPE_SHADER_COMPUTE);
+       if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
+           shader->ir_type == PIPE_SHADER_IR_NIR) {
+               shader->sel = r600_create_shader_state_tokens(ctx, cso->prog, cso->ir_type, PIPE_SHADER_COMPUTE);
                return shader;
        }
 #ifdef HAVE_OPENCL
        COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n");
        header = cso->prog;
-       code = cso->prog + sizeof(struct pipe_llvm_program_header);
        radeon_shader_binary_init(&shader->binary);
-       r600_elf_read(code, header->num_bytes, &shader->binary);
+       r600_elf_read(header->blob, header->num_bytes, &shader->binary);
        r600_create_shader(&shader->bc, &shader->binary, &use_kill);
 
        /* Upload code + ROdata */
        shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
                                                        shader->bc.ndw * 4);
-       p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
+       p = r600_buffer_map_sync_with_rings(
+               &rctx->b, shader->code_bo,
+               PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
        //TODO: use util_memcpy_cpu_to_le32 ?
        memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
        rctx->b.ws->buffer_unmap(shader->code_bo->buf);
@@ -456,16 +477,16 @@ static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state
        if (!shader)
                return;
 
-       if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
+       if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
+           shader->ir_type == PIPE_SHADER_IR_NIR) {
                r600_delete_shader_selector(ctx, shader->sel);
        } else {
 #ifdef HAVE_OPENCL
                radeon_shader_binary_clean(&shader->binary);
+               pipe_resource_reference((struct pipe_resource**)&shader->code_bo, NULL);
+               pipe_resource_reference((struct pipe_resource**)&shader->kernel_param, NULL);
 #endif
                r600_destroy_shader(&shader->bc);
-
-               /* TODO destroy shader->code_bo, shader->const_bo
-                * we'll need something like r600_buffer_free */
        }
        FREE(shader);
 }
@@ -481,12 +502,14 @@ static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state)
                return;
        }
 
-       if (cstate->ir_type == PIPE_SHADER_IR_TGSI) {
+       if (cstate->ir_type == PIPE_SHADER_IR_TGSI ||
+           cstate->ir_type == PIPE_SHADER_IR_NIR) {
                bool compute_dirty;
-
-               r600_shader_select(ctx, cstate->sel, &compute_dirty);
+               cstate->sel->ir_type = cstate->ir_type;
+               if (r600_shader_select(ctx, cstate->sel, &compute_dirty))
+                       R600_ERR("Failed to select compute shader\n");
        }
-
+       
        rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
 }
 
@@ -571,11 +594,13 @@ static void evergreen_compute_upload_input(struct pipe_context *ctx,
 }
 
 static void evergreen_emit_dispatch(struct r600_context *rctx,
-                                   const struct pipe_grid_info *info)
+                                   const struct pipe_grid_info *info,
+                                   uint32_t indirect_grid[3])
 {
        int i;
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
+       bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
        unsigned num_waves;
        unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
        unsigned wave_divisor = (16 * num_pipes);
@@ -583,9 +608,10 @@ static void evergreen_emit_dispatch(struct r600_context *rctx,
        int grid_size = 1;
        unsigned lds_size = shader->local_size / 4;
 
-       if (shader->ir_type != PIPE_SHADER_IR_TGSI)
+       if (shader->ir_type != PIPE_SHADER_IR_TGSI &&
+           shader->ir_type != PIPE_SHADER_IR_NIR)
                lds_size += shader->bc.nlds_dw;
-
+       
        /* Calculate group_size/grid_size */
        for (i = 0; i < 3; i++) {
                group_size *= info->block[i];
@@ -630,13 +656,21 @@ static void evergreen_emit_dispatch(struct r600_context *rctx,
        radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC,
                                        lds_size | (num_waves << 14));
 
-       /* Dispatch packet */
-       radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
-       radeon_emit(cs, info->grid[0]);
-       radeon_emit(cs, info->grid[1]);
-       radeon_emit(cs, info->grid[2]);
-       /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
-       radeon_emit(cs, 1);
+       if (info->indirect) {
+               radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
+               radeon_emit(cs, indirect_grid[0]);
+               radeon_emit(cs, indirect_grid[1]);
+               radeon_emit(cs, indirect_grid[2]);
+               radeon_emit(cs, 1);
+       } else {
+               /* Dispatch packet */
+               radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
+               radeon_emit(cs, info->grid[0]);
+               radeon_emit(cs, info->grid[1]);
+               radeon_emit(cs, info->grid[2]);
+               /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
+               radeon_emit(cs, 1);
+       }
 
        if (rctx->is_debug)
                eg_trace_emit(rctx);
@@ -644,7 +678,7 @@ static void evergreen_emit_dispatch(struct r600_context *rctx,
 
 static void compute_setup_cbs(struct r600_context *rctx)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        unsigned i;
 
        /* Emit colorbuffers. */
@@ -686,11 +720,12 @@ static void compute_setup_cbs(struct r600_context *rctx)
 static void compute_emit_cs(struct r600_context *rctx,
                            const struct pipe_grid_info *info)
 {
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        bool compute_dirty = false;
        struct r600_pipe_shader *current;
        struct r600_shader_atomic combined_atomics[8];
        uint8_t atomic_used_mask;
+       uint32_t indirect_grid[3] = { 0, 0, 0 };
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
@@ -704,9 +739,13 @@ static void compute_emit_cs(struct r600_context *rctx,
                rctx->cmd_buf_is_compute = true;
        }
 
-       r600_need_cs_space(rctx, 0, true);
-       if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
-               r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty);
+       if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI||
+           rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR) {
+               if (r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty)) {
+                       R600_ERR("Failed to select compute shader\n");
+                       return;
+               }
+               
                current = rctx->cs_shader_state.shader->sel->current;
                if (compute_dirty) {
                        rctx->cs_shader_state.atom.num_dw = current->command_buffer.num_dw;
@@ -717,22 +756,36 @@ static void compute_emit_cs(struct r600_context *rctx,
                bool need_buf_const = current->shader.uses_tex_buffers ||
                        current->shader.has_txq_cube_array_z_comp;
 
+               if (info->indirect) {
+                       struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
+                       unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_TRANSFER_READ);
+                       unsigned offset = info->indirect_offset / 4;
+                       indirect_grid[0] = data[offset];
+                       indirect_grid[1] = data[offset + 1];
+                       indirect_grid[2] = data[offset + 2];
+               }
                for (int i = 0; i < 3; i++) {
                        rctx->cs_block_grid_sizes[i] = info->block[i];
-                       rctx->cs_block_grid_sizes[i + 4] = info->grid[i];
+                       rctx->cs_block_grid_sizes[i + 4] = info->indirect ? indirect_grid[i] : info->grid[i];
                }
                rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0;
                rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true;
+
+               evergreen_emit_atomic_buffer_setup_count(rctx, current, combined_atomics, &atomic_used_mask);
+               r600_need_cs_space(rctx, 0, true, util_bitcount(atomic_used_mask));
+
                if (need_buf_const) {
                        eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE);
                }
                r600_update_driver_const_buffers(rctx, true);
 
-               if (evergreen_emit_atomic_buffer_setup(rctx, current, combined_atomics, &atomic_used_mask)) {
+               evergreen_emit_atomic_buffer_setup(rctx, true, combined_atomics, atomic_used_mask);
+               if (atomic_used_mask) {
                        radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
                        radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
                }
-       }
+       } else
+               r600_need_cs_space(rctx, 0, true, 0);
 
        /* Initialize all the compute-related registers.
         *
@@ -743,7 +796,8 @@ static void compute_emit_cs(struct r600_context *rctx,
 
        /* emit config state */
        if (rctx->b.chip_class == EVERGREEN) {
-               if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
+               if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI||
+                   rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR) {
                        radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
                        radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs));
                        radeon_emit(cs, 0);
@@ -756,7 +810,8 @@ static void compute_emit_cs(struct r600_context *rctx,
        rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
        r600_flush_emit(rctx);
 
-       if (rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_TGSI) {
+       if (rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_TGSI &&
+           rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_NIR) {
 
                compute_setup_cbs(rctx);
 
@@ -771,6 +826,8 @@ static void compute_emit_cs(struct r600_context *rctx,
                                               rat_mask);
        }
 
+       r600_emit_atom(rctx, &rctx->b.render_cond_atom);
+
        /* Emit constant buffer state */
        r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
 
@@ -790,7 +847,7 @@ static void compute_emit_cs(struct r600_context *rctx,
        r600_emit_atom(rctx, &rctx->cs_shader_state.atom);
 
        /* Emit dispatch state and dispatch packet */
-       evergreen_emit_dispatch(rctx, info);
+       evergreen_emit_dispatch(rctx, info, indirect_grid);
 
        /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
         */
@@ -810,7 +867,8 @@ static void compute_emit_cs(struct r600_context *rctx,
                radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0));
                radeon_emit(cs, 0);
        }
-       if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI)
+       if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI ||
+           rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR)
                evergreen_emit_atomic_buffer_save(rctx, true, combined_atomics, &atomic_used_mask);
 
 #if 0
@@ -832,12 +890,13 @@ void evergreen_emit_cs_shader(struct r600_context *rctx,
        struct r600_cs_shader_state *state =
                                        (struct r600_cs_shader_state*)atom;
        struct r600_pipe_compute *shader = state->shader;
-       struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
        uint64_t va;
        struct r600_resource *code_bo;
        unsigned ngpr, nstack;
 
-       if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
+       if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
+           shader->ir_type == PIPE_SHADER_IR_NIR) {
                code_bo = shader->sel->current->bo;
                va = shader->sel->current->bo->gpu_address;
                ngpr = shader->sel->current->shader.bc.ngpr;
@@ -871,7 +930,8 @@ static void evergreen_launch_grid(struct pipe_context *ctx,
        struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
        boolean use_kill;
 
-       if (shader->ir_type != PIPE_SHADER_IR_TGSI) {
+       if (shader->ir_type != PIPE_SHADER_IR_TGSI &&
+           shader->ir_type != PIPE_SHADER_IR_NIR) {
                rctx->cs_shader_state.pc = info->pc;
                /* Get the config information for this kernel. */
                r600_shader_binary_read_config(&shader->binary, &shader->bc,