--- /dev/null
+/*
+ * Copyright (C) 2019 Rob Clark <robclark@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#include "pipe/p_state.h"
+
+#include "freedreno_resource.h"
+
+#include "fd6_compute.h"
+#include "fd6_context.h"
+#include "fd6_emit.h"
+
+struct fd6_compute_stateobj {
+ struct ir3_shader *shader;
+};
+
+
+static void *
+fd6_create_compute_state(struct pipe_context *pctx,
+ const struct pipe_compute_state *cso)
+{
+ struct fd_context *ctx = fd_context(pctx);
+
+ /* req_input_mem will only be non-zero for cl kernels (ie. clover).
+ * This isn't a perfect test because I guess it is possible (but
+ * uncommon) for none for the kernel parameters to be a global,
+ * but ctx->set_global_bindings() can't fail, so this is the next
+ * best place to fail if we need a newer version of kernel driver:
+ */
+ if ((cso->req_input_mem > 0) &&
+ fd_device_version(ctx->dev) < FD_VERSION_BO_IOVA) {
+ return NULL;
+ }
+
+ struct ir3_compiler *compiler = ctx->screen->compiler;
+ struct fd6_compute_stateobj *so = CALLOC_STRUCT(fd6_compute_stateobj);
+ so->shader = ir3_shader_create_compute(compiler, cso, &ctx->debug);
+ return so;
+}
+
+static void
+fd6_delete_compute_state(struct pipe_context *pctx, void *hwcso)
+{
+ struct fd6_compute_stateobj *so = hwcso;
+ ir3_shader_destroy(so->shader);
+ free(so);
+}
+
+/* maybe move to fd6_program? */
+static void
+cs_program_emit(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
+ const struct pipe_grid_info *info)
+{
+ const struct ir3_info *i = &v->info;
+ enum a3xx_threadsize thrsz = FOUR_QUADS;
+
+ OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
+ OUT_RING(ring, 0xff);
+
+ unsigned constlen = align(v->constlen, 4);
+ OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
+ OUT_RING(ring, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) |
+ A6XX_HLSQ_CS_CNTL_ENABLED);
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
+ OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
+ A6XX_SP_CS_CONFIG_NIBO(v->image_mapping.num_ibo) |
+ A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
+ A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
+ OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
+ OUT_RING(ring, A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
+ A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
+ A6XX_SP_CS_CTRL_REG0_MERGEDREGS |
+ A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v->branchstack) |
+ COND(v->num_samp > 0, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE));
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
+ OUT_RING(ring, 0x41);
+
+ uint32_t local_invocation_id, work_group_id;
+ local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
+ work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
+
+ OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
+ OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
+ A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
+ A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
+ A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
+ OUT_RING(ring, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
+ OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
+
+ if (v->instrlen > 0)
+ fd6_emit_shader(ring, v);
+}
+
+static void
+fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
+{
+ struct fd6_compute_stateobj *so = ctx->compute;
+ struct ir3_shader_key key = {};
+ struct ir3_shader_variant *v;
+ struct fd_ringbuffer *ring = ctx->batch->draw;
+ unsigned i, nglobal = 0;
+
+ fd6_emit_restore(ctx->batch, ring);
+
+ v = ir3_shader_variant(so->shader, key, false, &ctx->debug);
+ if (!v)
+ return;
+
+ if (ctx->dirty_shader[PIPE_SHADER_COMPUTE] & FD_DIRTY_SHADER_PROG)
+ cs_program_emit(ring, v, info);
+
+ fd6_emit_cs_state(ctx, ring, v);
+ ir3_emit_cs_consts(v, ring, ctx, info);
+
+ foreach_bit(i, ctx->global_bindings.enabled_mask)
+ nglobal++;
+
+ if (nglobal > 0) {
+ /* global resources don't otherwise get an OUT_RELOC(), since
+ * the raw ptr address is emitted in ir3_emit_cs_consts().
+ * So to make the kernel aware that these buffers are referenced
+ * by the batch, emit dummy reloc's as part of a no-op packet
+ * payload:
+ */
+ OUT_PKT7(ring, CP_NOP, 2 * nglobal);
+ foreach_bit(i, ctx->global_bindings.enabled_mask) {
+ struct pipe_resource *prsc = ctx->global_bindings.buf[i];
+ OUT_RELOCW(ring, fd_resource(prsc)->bo, 0, 0, 0);
+ }
+ }
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, A2XX_CP_SET_MARKER_0_MODE(0x8));
+
+ const unsigned *local_size = info->block; // v->shader->nir->info->cs.local_size;
+ const unsigned *num_groups = info->grid;
+ /* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
+ const unsigned work_dim = info->work_dim ? info->work_dim : 3;
+ OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
+ OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
+ A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
+ A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
+ A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
+ OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
+ OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
+ OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
+ OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
+ OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
+ OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
+
+ OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
+ OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
+ OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
+ OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
+
+ if (info->indirect) {
+ struct fd_resource *rsc = fd_resource(info->indirect);
+
+ OUT_PKT7(ring, CP_EXEC_CS_INDIRECT, 4);
+ OUT_RING(ring, 0x00000000);
+ OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0); /* ADDR_LO/HI */
+ OUT_RING(ring, A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
+ A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
+ A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
+ } else {
+ OUT_PKT7(ring, CP_EXEC_CS, 4);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
+ OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
+ OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
+ }
+
+ OUT_WFI5(ring);
+}
+
+void
+fd6_compute_init(struct pipe_context *pctx)
+{
+ struct fd_context *ctx = fd_context(pctx);
+ ctx->launch_grid = fd6_launch_grid;
+ pctx->create_compute_state = fd6_create_compute_state;
+ pctx->delete_compute_state = fd6_delete_compute_state;
+}
opcode = CP_LOAD_STATE6_FRAG;
tex_samp_reg = REG_A6XX_SP_CS_TEX_SAMP_LO;
tex_const_reg = REG_A6XX_SP_CS_TEX_CONST_LO;
- tex_count_reg = 0; //REG_A6XX_SP_CS_TEX_COUNT;
+ tex_count_reg = REG_A6XX_SP_CS_TEX_COUNT;
break;
default:
unreachable("bad state block");
fd_ringbuffer_del(state);
}
- if (tex_count_reg) {
- OUT_PKT4(ring, tex_count_reg, 1);
- OUT_RING(ring, num_merged_textures);
- }
+ OUT_PKT4(ring, tex_count_reg, 1);
+ OUT_RING(ring, num_merged_textures);
return needs_border;
}
{
enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_COMPUTE];
- if (dirty & FD_DIRTY_SHADER_TEX) {
- bool needs_border = false;
- needs_border |= fd6_emit_textures(ctx->pipe, ring, SB6_CS_TEX,
- &ctx->tex[PIPE_SHADER_COMPUTE], 0, NULL, NULL, NULL);
+ if (dirty & (FD_DIRTY_SHADER_TEX | FD_DIRTY_SHADER_PROG |
+ FD_DIRTY_SHADER_IMAGE | FD_DIRTY_SHADER_SSBO)) {
+ struct fd_texture_stateobj *tex = &ctx->tex[PIPE_SHADER_COMPUTE];
+ struct fd_shaderbuf_stateobj *buf = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
+ struct fd_shaderimg_stateobj *img = &ctx->shaderimg[PIPE_SHADER_COMPUTE];
+ unsigned bcolor_offset = fd6_border_color_offset(ctx, SB6_CS_TEX, tex);
+
+ bool needs_border = fd6_emit_textures(ctx->pipe, ring, SB6_CS_TEX, tex,
+ bcolor_offset, cp, buf, img);
if (needs_border)
emit_border_color(ctx, ring);
-#if 0
- OUT_PKT4(ring, REG_A6XX_TPL1_VS_TEX_COUNT, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_VS_TEX_COUNT, 1);
OUT_RING(ring, 0);
- OUT_PKT4(ring, REG_A6XX_TPL1_HS_TEX_COUNT, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_HS_TEX_COUNT, 1);
OUT_RING(ring, 0);
- OUT_PKT4(ring, REG_A6XX_TPL1_DS_TEX_COUNT, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_DS_TEX_COUNT, 1);
OUT_RING(ring, 0);
- OUT_PKT4(ring, REG_A6XX_TPL1_GS_TEX_COUNT, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_GS_TEX_COUNT, 1);
OUT_RING(ring, 0);
- OUT_PKT4(ring, REG_A6XX_TPL1_FS_TEX_COUNT, 1);
+ OUT_PKT4(ring, REG_A6XX_SP_FS_TEX_COUNT, 1);
OUT_RING(ring, 0);
-#endif
}
-#if 0
- OUT_PKT4(ring, REG_A6XX_TPL1_CS_TEX_COUNT, 1);
- OUT_RING(ring, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask ?
- ~0 : ctx->tex[PIPE_SHADER_COMPUTE].num_textures);
-#endif
+ if (dirty & (FD_DIRTY_SHADER_SSBO | FD_DIRTY_SHADER_IMAGE)) {
+ struct fd_ringbuffer *state =
+ fd6_build_ibo_state(ctx, cp, PIPE_SHADER_COMPUTE);
+ const struct ir3_ibo_mapping *mapping = &cp->image_mapping;
-// if (dirty & FD_DIRTY_SHADER_SSBO)
-// fd6_emit_ssbos(ctx, ring, PIPE_SHADER_COMPUTE);
-//
-// if (dirty & FD_DIRTY_SHADER_IMAGE)
-// fd6_emit_images(ctx, ring, PIPE_SHADER_COMPUTE);
+ OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
+ OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
+ CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
+ CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
+ CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
+ CP_LOAD_STATE6_0_NUM_UNIT(mapping->num_ibo));
+ OUT_RB(ring, state);
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_LO, 2);
+ OUT_RB(ring, state);
+
+ OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
+ OUT_RING(ring, mapping->num_ibo);
+
+ fd_ringbuffer_del(state);
+ }
}