/* Upload the uniforms to the indirect CL first */
struct v3d_cl_reloc fs_uniforms =
- v3d_write_uniforms(v3d, v3d->prog.fs,
+ v3d_write_uniforms(v3d, job, v3d->prog.fs,
PIPE_SHADER_FRAGMENT);
struct v3d_cl_reloc vs_uniforms =
- v3d_write_uniforms(v3d, v3d->prog.vs,
+ v3d_write_uniforms(v3d, job, v3d->prog.vs,
PIPE_SHADER_VERTEX);
struct v3d_cl_reloc cs_uniforms =
- v3d_write_uniforms(v3d, v3d->prog.cs,
+ v3d_write_uniforms(v3d, job, v3d->prog.cs,
PIPE_SHADER_VERTEX);
/* Update the cache dirty flag based on the shader progs data */
v3d_flush(pctx);
}
+#if V3D_VERSION >= 41
+#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
+#define V3D_CSD_CFG012_WG_OFFSET_SHIFT 0
+/* Allow this dispatch to start while the last one is still running. */
+#define V3D_CSD_CFG3_OVERLAP_WITH_PREV (1 << 26)
+/* Maximum supergroup ID. 6 bits. */
+#define V3D_CSD_CFG3_MAX_SG_ID_SHIFT 20
+/* Batches per supergroup minus 1. 8 bits. */
+#define V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT 12
+/* Workgroups per supergroup, 0 means 16 */
+#define V3D_CSD_CFG3_WGS_PER_SG_SHIFT 8
+#define V3D_CSD_CFG3_WG_SIZE_SHIFT 0
+
+#define V3D_CSD_CFG5_PROPAGATE_NANS (1 << 2)
+#define V3D_CSD_CFG5_SINGLE_SEG (1 << 1)
+#define V3D_CSD_CFG5_THREADING (1 << 0)
+
+static void
+v3d_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
+{
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_screen *screen = v3d->screen;
+
+ v3d_predraw_check_stage_inputs(pctx, PIPE_SHADER_COMPUTE);
+
+ v3d_update_compiled_cs(v3d);
+
+ if (!v3d->prog.compute->resource) {
+ static bool warned = false;
+ if (!warned) {
+ fprintf(stderr,
+ "Compute shader failed to compile. "
+ "Expect corruption.\n");
+ warned = true;
+ }
+ return;
+ }
+
+ /* Some of the units of scale:
+ *
+ * - Batches of 16 work items (shader invocations) that will be queued
+ * to the run on a QPU at once.
+ *
+ * - Workgroups composed of work items based on the shader's layout
+ * declaration.
+ *
+ * - Supergroups of 1-16 workgroups. There can only be 16 supergroups
+ * running at a time on the core, so we want to keep them large to
+ * keep the QPUs busy, but a whole supergroup will sync at a barrier
+ * so we want to keep them small if one is present.
+ */
+ struct drm_v3d_submit_csd submit = { 0 };
+ struct v3d_job *job = v3d_job_create(v3d);
+
+ /* Set up the actual number of workgroups, synchronously mapping the
+ * indirect buffer if necessary to get the dimensions.
+ */
+ if (info->indirect) {
+ struct pipe_transfer *transfer;
+ uint32_t *map = pipe_buffer_map_range(pctx, info->indirect,
+ info->indirect_offset,
+ 3 * sizeof(uint32_t),
+ PIPE_TRANSFER_READ,
+ &transfer);
+ memcpy(v3d->compute_num_workgroups, map, 3 * sizeof(uint32_t));
+ pipe_buffer_unmap(pctx, transfer);
+
+ if (v3d->compute_num_workgroups[0] == 0 ||
+ v3d->compute_num_workgroups[1] == 0 ||
+ v3d->compute_num_workgroups[2] == 0) {
+ /* Nothing to dispatch, so skip the draw (CSD can't
+ * handle 0 workgroups).
+ */
+ return;
+ }
+ } else {
+ v3d->compute_num_workgroups[0] = info->grid[0];
+ v3d->compute_num_workgroups[1] = info->grid[1];
+ v3d->compute_num_workgroups[2] = info->grid[2];
+ }
+
+ for (int i = 0; i < 3; i++) {
+ submit.cfg[i] |= (v3d->compute_num_workgroups[i] <<
+ V3D_CSD_CFG012_WG_COUNT_SHIFT);
+ }
+
+ perf_debug("CSD only using single WG per SG currently, "
+ "should increase that when possible.");
+ int wgs_per_sg = 1;
+ int wg_size = info->block[0] * info->block[1] * info->block[2];
+ submit.cfg[3] |= wgs_per_sg << V3D_CSD_CFG3_WGS_PER_SG_SHIFT;
+ submit.cfg[3] |= ((DIV_ROUND_UP(wgs_per_sg * wg_size, 16) - 1) <<
+ V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT);
+ submit.cfg[3] |= (wg_size & 0xff) << V3D_CSD_CFG3_WG_SIZE_SHIFT;
+
+ int batches_per_wg = DIV_ROUND_UP(wg_size, 16);
+ /* Number of batches the dispatch will invoke (minus 1). */
+ submit.cfg[4] = batches_per_wg * (v3d->compute_num_workgroups[0] *
+ v3d->compute_num_workgroups[1] *
+ v3d->compute_num_workgroups[2]) - 1;
+
+ /* Make sure we didn't accidentally underflow. */
+ assert(submit.cfg[4] != ~0);
+
+ v3d_job_add_bo(job, v3d_resource(v3d->prog.compute->resource)->bo);
+ submit.cfg[5] = (v3d_resource(v3d->prog.compute->resource)->bo->offset +
+ v3d->prog.compute->offset);
+ submit.cfg[5] |= V3D_CSD_CFG5_PROPAGATE_NANS;
+ if (v3d->prog.compute->prog_data.base->single_seg)
+ submit.cfg[5] |= V3D_CSD_CFG5_SINGLE_SEG;
+ if (v3d->prog.compute->prog_data.base->threads == 4)
+ submit.cfg[5] |= V3D_CSD_CFG5_THREADING;
+
+ if (v3d->prog.compute->prog_data.compute->shared_size) {
+ v3d->compute_shared_memory =
+ v3d_bo_alloc(v3d->screen,
+ v3d->prog.compute->prog_data.compute->shared_size *
+ wgs_per_sg,
+ "shared_vars");
+ }
+
+ struct v3d_cl_reloc uniforms = v3d_write_uniforms(v3d, job,
+ v3d->prog.compute,
+ PIPE_SHADER_COMPUTE);
+ v3d_job_add_bo(job, uniforms.bo);
+ submit.cfg[6] = uniforms.bo->offset + uniforms.offset;
+
+ /* Pull some job state that was stored in a SUBMIT_CL struct out to
+ * our SUBMIT_CSD struct
+ */
+ submit.bo_handles = job->submit.bo_handles;
+ submit.bo_handle_count = job->submit.bo_handle_count;
+
+ /* Serialize this in the rest of our command stream. */
+ submit.in_sync = v3d->out_sync;
+ submit.out_sync = v3d->out_sync;
+
+ if (!(V3D_DEBUG & V3D_DEBUG_NORAST)) {
+ int ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_SUBMIT_CSD,
+ &submit);
+ static bool warned = false;
+ if (ret && !warned) {
+ fprintf(stderr, "CSD submit call returned %s. "
+ "Expect corruption.\n", strerror(errno));
+ warned = true;
+ }
+ }
+
+ v3d_job_free(v3d, job);
+
+ /* Mark SSBOs as being written.. we don't actually know which ones are
+ * read vs written, so just assume the worst
+ */
+ foreach_bit(i, v3d->ssbo[PIPE_SHADER_COMPUTE].enabled_mask) {
+ struct v3d_resource *rsc = v3d_resource(
+ v3d->ssbo[PIPE_SHADER_COMPUTE].sb[i].buffer);
+ rsc->writes++; /* XXX */
+ }
+
+ foreach_bit(i, v3d->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
+ struct v3d_resource *rsc = v3d_resource(
+ v3d->shaderimg[PIPE_SHADER_COMPUTE].si[i].base.resource);
+ rsc->writes++;
+ }
+
+ v3d_bo_unreference(&uniforms.bo);
+ v3d_bo_unreference(&v3d->compute_shared_memory);
+}
+#endif
+
/**
* Implements gallium's clear() hook (glClear()) by drawing a pair of triangles.
*/
pctx->clear = v3d_clear;
pctx->clear_render_target = v3d_clear_render_target;
pctx->clear_depth_stencil = v3d_clear_depth_stencil;
+#if V3D_VERSION >= 41
+ if (v3d_context(pctx)->screen->has_csd)
+ pctx->launch_grid = v3d_launch_grid;
+#endif
}
(0 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB));
}
+/* Flushes dirty texture cachelines from the L1 write combiner */
+static void
+v3d_flush_l1td(struct v3d_hw *v3d)
+{
+ V3D_WRITE(V3D_CTL_0_L2TCACTL,
+ V3D_CTL_0_L2TCACTL_TMUWCF_SET);
+
+ assert(!(V3D_READ(V3D_CTL_0_L2TCACTL) & V3D_CTL_0_L2TCACTL_L2TFLS_SET));
+}
+
+/* Flushes dirty texture L2 cachelines */
+static void
+v3d_flush_l2t(struct v3d_hw *v3d)
+{
+ V3D_WRITE(V3D_CTL_0_L2TFLSTA, 0);
+ V3D_WRITE(V3D_CTL_0_L2TFLEND, ~0);
+ V3D_WRITE(V3D_CTL_0_L2TCACTL,
+ V3D_CTL_0_L2TCACTL_L2TFLS_SET |
+ (2 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB));
+
+ assert(!(V3D_READ(V3D_CTL_0_L2TCACTL) & V3D_CTL_0_L2TCACTL_L2TFLS_SET));
+}
+
/* Invalidates the slice caches. These are read-only caches. */
static void
v3d_invalidate_slices(struct v3d_hw *v3d)
}
}
+static UNUSED void
+v3d_flush_caches(struct v3d_hw *v3d)
+{
+ v3d_flush_l1td(v3d);
+ v3d_flush_l2t(v3d);
+}
+
int
v3dX(simulator_submit_tfu_ioctl)(struct v3d_hw *v3d,
struct drm_v3d_submit_tfu *args)
return 0;
}
+#if V3D_VERSION >= 41
+int
+v3dX(simulator_submit_csd_ioctl)(struct v3d_hw *v3d,
+ struct drm_v3d_submit_csd *args,
+ uint32_t gmp_ofs)
+{
+ g_gmp_ofs = gmp_ofs;
+ v3d_reload_gmp(v3d);
+
+ v3d_invalidate_caches(v3d);
+
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG1, args->cfg[1]);
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG2, args->cfg[2]);
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG3, args->cfg[3]);
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG4, args->cfg[4]);
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG5, args->cfg[5]);
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG6, args->cfg[6]);
+ /* CFG0 kicks off the job */
+ V3D_WRITE(V3D_CSD_0_QUEUED_CFG0, args->cfg[0]);
+
+ while (V3D_READ(V3D_CSD_0_STATUS) &
+ (V3D_CSD_0_STATUS_HAVE_CURRENT_DISPATCH_SET |
+ V3D_CSD_0_STATUS_HAVE_QUEUED_DISPATCH_SET)) {
+ v3d_hw_tick(v3d);
+ }
+
+ v3d_flush_caches(v3d);
+
+ return 0;
+}
+#endif
+
int
v3dX(simulator_get_param_ioctl)(struct v3d_hw *v3d,
struct drm_v3d_get_param *args)
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CSD:
+ args->value = V3D_VERSION >= 41;
+ return 0;
}
if (args->param < ARRAY_SIZE(reg_map) && reg_map[args->param]) {