- uint32_t width = vc4->framebuffer.width;
- uint32_t height = vc4->framebuffer.height;
- uint32_t tilew = align(width, 64) / 64;
- uint32_t tileh = align(height, 64) / 64;
-
- uint32_t tile_alloc_size = 32 * tilew * tileh * 16;
- uint32_t tile_state_size = 48 * tilew * tileh;
- if (!vc4->tile_alloc || vc4->tile_alloc->size < tile_alloc_size) {
- vc4_bo_unreference(&vc4->tile_alloc);
- vc4->tile_alloc = vc4_bo_alloc(vc4->screen, tile_alloc_size,
- "tile_alloc");
- }
- if (!vc4->tile_state || vc4->tile_state->size < tile_state_size) {
- vc4_bo_unreference(&vc4->tile_state);
- vc4->tile_state = vc4_bo_alloc(vc4->screen, tile_state_size,
- "tile_state");
- }
-
- // Tile state data is 48 bytes per tile, I think it can be thrown away
- // as soon as binning is finished.
- cl_start_reloc(&vc4->bcl, 2);
- cl_u8(&vc4->bcl, VC4_PACKET_TILE_BINNING_MODE_CONFIG);
- cl_reloc(vc4, &vc4->bcl, vc4->tile_alloc, 0);
- cl_u32(&vc4->bcl, vc4->tile_alloc->size);
- cl_reloc(vc4, &vc4->bcl, vc4->tile_state, 0);
- cl_u8(&vc4->bcl, tilew);
- cl_u8(&vc4->bcl, tileh);
- cl_u8(&vc4->bcl, VC4_BIN_CONFIG_AUTO_INIT_TSDA);
-
- cl_u8(&vc4->bcl, VC4_PACKET_START_TILE_BINNING);
-
- cl_u8(&vc4->bcl, VC4_PACKET_PRIMITIVE_LIST_FORMAT);
- cl_u8(&vc4->bcl, 0x12); // 16 bit triangle
-
- vc4->needs_flush = true;
- vc4->draw_call_queued = true;
+ vc4_get_draw_cl_space(job, 0);
+
+ cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION, bin) {
+ bin.width_in_tiles = job->draw_tiles_x;
+ bin.height_in_tiles = job->draw_tiles_y;
+ bin.multisample_mode_4x = job->msaa;
+ }
+
+ /* START_TILE_BINNING resets the statechange counters in the hardware,
+ * which are what is used when a primitive is binned to a tile to
+ * figure out what new state packets need to be written to that tile's
+ * command list.
+ */
+ cl_emit(&job->bcl, START_TILE_BINNING, start);
+
+ /* Reset the current compressed primitives format. This gets modified
+ * by VC4_PACKET_GL_INDEXED_PRIMITIVE and
+ * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start
+ * of every tile.
+ */
+ cl_emit(&job->bcl, PRIMITIVE_LIST_FORMAT, list) {
+ list.data_type = _16_BIT_INDEX;
+ list.primitive_type = TRIANGLES_LIST;
+ }
+
+ job->needs_flush = true;
+ job->draw_width = vc4->framebuffer.width;
+ job->draw_height = vc4->framebuffer.height;
+}
+
+static void
+vc4_predraw_check_textures(struct pipe_context *pctx,
+ struct vc4_texture_stateobj *stage_tex)
+{
+ struct vc4_context *vc4 = vc4_context(pctx);
+
+ for (int i = 0; i < stage_tex->num_textures; i++) {
+ struct pipe_sampler_view *view = stage_tex->textures[i];
+ if (!view)
+ continue;
+ struct vc4_resource *rsc = vc4_resource(view->texture);
+ if (rsc->shadow_parent)
+ vc4_update_shadow_baselevel_texture(pctx, view);
+
+ vc4_flush_jobs_writing_resource(vc4, view->texture);
+ }
+}
+
+static void
+vc4_emit_gl_shader_state(struct vc4_context *vc4,
+ const struct pipe_draw_info *info,
+ uint32_t extra_index_bias)
+{
+ struct vc4_job *job = vc4->job;
+ /* VC4_DIRTY_VTXSTATE */
+ struct vc4_vertex_stateobj *vtx = vc4->vtx;
+ /* VC4_DIRTY_VTXBUF */
+ struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;
+
+ /* The simulator throws a fit if VS or CS don't read an attribute, so
+ * we emit a dummy read.
+ */
+ uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
+
+ /* Emit the shader record. */
+ cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
+
+ cl_emit(&job->shader_rec, SHADER_RECORD, rec) {
+ rec.enable_clipping = true;
+
+ /* VC4_DIRTY_COMPILED_FS */
+ rec.fragment_shader_is_single_threaded =
+ !vc4->prog.fs->fs_threaded;
+
+ /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
+ rec.point_size_included_in_shaded_vertex_data =
+ (info->mode == PIPE_PRIM_POINTS &&
+ vc4->rasterizer->base.point_size_per_vertex);
+
+ /* VC4_DIRTY_COMPILED_FS */
+ rec.fragment_shader_number_of_varyings =
+ vc4->prog.fs->num_inputs;
+ rec.fragment_shader_code_address =
+ cl_address(vc4->prog.fs->bo, 0);
+
+ rec.coordinate_shader_attribute_array_select_bits =
+ vc4->prog.cs->vattrs_live;
+ rec.coordinate_shader_total_attributes_size =
+ vc4->prog.cs->vattr_offsets[8];
+ rec.coordinate_shader_code_address =
+ cl_address(vc4->prog.cs->bo, 0);
+
+ rec.vertex_shader_attribute_array_select_bits =
+ vc4->prog.vs->vattrs_live;
+ rec.vertex_shader_total_attributes_size =
+ vc4->prog.vs->vattr_offsets[8];
+ rec.vertex_shader_code_address =
+ cl_address(vc4->prog.vs->bo, 0);
+ };
+
+ uint32_t max_index = 0xffff;
+ for (int i = 0; i < vtx->num_elements; i++) {
+ struct pipe_vertex_element *elem = &vtx->pipe[i];
+ struct pipe_vertex_buffer *vb =
+ &vertexbuf->vb[elem->vertex_buffer_index];
+ struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
+ /* not vc4->dirty tracked: vc4->last_index_bias */
+ uint32_t offset = (vb->buffer_offset +
+ elem->src_offset +
+ vb->stride * (info->index_bias +
+ extra_index_bias));
+ uint32_t vb_size = rsc->bo->size - offset;
+ uint32_t elem_size =
+ util_format_get_blocksize(elem->src_format);
+
+ cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
+ attr.address = cl_address(rsc->bo, offset);
+ attr.number_of_bytes_minus_1 = elem_size - 1;
+ attr.stride = vb->stride;
+ attr.coordinate_shader_vpm_offset =
+ vc4->prog.cs->vattr_offsets[i];
+ attr.vertex_shader_vpm_offset =
+ vc4->prog.vs->vattr_offsets[i];
+ }
+
+ if (vb->stride > 0) {
+ max_index = MIN2(max_index,
+ (vb_size - elem_size) / vb->stride);
+ }
+ }
+
+ if (vtx->num_elements == 0) {
+ assert(num_elements_emit == 1);
+ struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
+
+ cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
+ attr.address = cl_address(bo, 0);
+ attr.number_of_bytes_minus_1 = 16 - 1;
+ attr.stride = 0;
+ attr.coordinate_shader_vpm_offset = 0;
+ attr.vertex_shader_vpm_offset = 0;
+ }
+ }
+
+ cl_emit(&job->bcl, GL_SHADER_STATE, shader_state) {
+ /* Note that number of attributes == 0 in the packet means 8
+ * attributes. This field also contains the offset into
+ * shader_rec.
+ */
+ assert(vtx->num_elements <= 8);
+ shader_state.number_of_attribute_arrays =
+ num_elements_emit & 0x7;
+ }
+
+ vc4_write_uniforms(vc4, vc4->prog.fs,
+ &vc4->constbuf[PIPE_SHADER_FRAGMENT],
+ &vc4->fragtex);
+ vc4_write_uniforms(vc4, vc4->prog.vs,
+ &vc4->constbuf[PIPE_SHADER_VERTEX],
+ &vc4->verttex);
+ vc4_write_uniforms(vc4, vc4->prog.cs,
+ &vc4->constbuf[PIPE_SHADER_VERTEX],
+ &vc4->verttex);
+
+ vc4->last_index_bias = info->index_bias + extra_index_bias;
+ vc4->max_index = max_index;
+ job->shader_rec_count++;
+}
+
+/**
+ * HW-2116 workaround: Flush the batch before triggering the hardware state
+ * counter wraparound behavior.
+ *
+ * State updates are tracked by a global counter which increments at the first
+ * state update after a draw or a START_BINNING. Tiles can then have their
+ * state updated at draw time with a set of cheap checks for whether the
+ * state's copy of the global counter matches the global counter the last time
+ * that state was written to the tile.
+ *
+ * The state counters are relatively small and wrap around quickly, so you
+ * could get false negatives for needing to update a particular state in the
+ * tile. To avoid this, the hardware attempts to write all of the state in
+ * the tile at wraparound time. This apparently is broken, so we just flush
+ * everything before that behavior is triggered. A batch flush is sufficient
+ * to get our current contents drawn and reset the counters to 0.
+ *
+ * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
+ * tiles with VC4_PACKET_RETURN_FROM_LIST.
+ */
+static void
+vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
+{
+ struct vc4_context *vc4 = vc4_context(pctx);
+ struct vc4_job *job = vc4_get_job_for_fbo(vc4);
+
+ if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
+ perf_debug("Flushing batch due to HW-2116 workaround "
+ "(too many draw calls per scene\n");
+ vc4_job_submit(vc4, job);
+ }