freedreno: clear last_fence after resource tracking
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/u_draw.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32 #include "util/format/u_format.h"
33 #include "util/u_helpers.h"
34
35 #include "freedreno_blitter.h"
36 #include "freedreno_draw.h"
37 #include "freedreno_context.h"
38 #include "freedreno_fence.h"
39 #include "freedreno_state.h"
40 #include "freedreno_resource.h"
41 #include "freedreno_query_acc.h"
42 #include "freedreno_query_hw.h"
43 #include "freedreno_util.h"
44
45 static void
46 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
47 {
48 if (!prsc)
49 return;
50 fd_batch_resource_read(batch, fd_resource(prsc));
51 }
52
53 static void
54 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
55 {
56 if (!prsc)
57 return;
58 fd_batch_resource_write(batch, fd_resource(prsc));
59 }
60
61 static void
62 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
63 {
64 struct fd_context *ctx = fd_context(pctx);
65 struct fd_batch *batch = fd_context_batch(ctx);
66 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
67 unsigned i, prims, buffers = 0, restore_buffers = 0;
68
69 /* for debugging problems with indirect draw, it is convenient
70 * to be able to emulate it, to determine if game is feeding us
71 * bogus data:
72 */
73 if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
74 util_draw_indirect(pctx, info);
75 return;
76 }
77
78 if (!info->count_from_stream_output && !info->indirect &&
79 !info->primitive_restart &&
80 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
81 return;
82
83 /* TODO: push down the region versions into the tiles */
84 if (!fd_render_condition_check(pctx))
85 return;
86
87 /* emulate unsupported primitives: */
88 if (!fd_supported_prim(ctx, info->mode)) {
89 if (ctx->streamout.num_targets > 0)
90 debug_error("stream-out with emulated prims");
91 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
92 util_primconvert_draw_vbo(ctx->primconvert, info);
93 return;
94 }
95
96 /* Upload a user index buffer. */
97 struct pipe_resource *indexbuf = NULL;
98 unsigned index_offset = 0;
99 struct pipe_draw_info new_info;
100 if (info->index_size) {
101 if (info->has_user_indices) {
102 if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset, 4))
103 return;
104 new_info = *info;
105 new_info.index.resource = indexbuf;
106 new_info.has_user_indices = false;
107 info = &new_info;
108 } else {
109 indexbuf = info->index.resource;
110 }
111 }
112
113 if (ctx->in_discard_blit) {
114 fd_batch_reset(batch);
115 fd_context_all_dirty(ctx);
116 }
117
118 batch->blit = ctx->in_discard_blit;
119 batch->back_blit = ctx->in_shadow;
120
121 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
122 * query_buf may not be created yet.
123 */
124 fd_batch_set_stage(batch, FD_STAGE_DRAW);
125
126 /*
127 * Figure out the buffers/features we need:
128 */
129
130 fd_screen_lock(ctx->screen);
131
132 if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
133 if (fd_depth_enabled(ctx)) {
134 if (fd_resource(pfb->zsbuf->texture)->valid) {
135 restore_buffers |= FD_BUFFER_DEPTH;
136 } else {
137 batch->invalidated |= FD_BUFFER_DEPTH;
138 }
139 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
140 if (fd_depth_write_enabled(ctx)) {
141 buffers |= FD_BUFFER_DEPTH;
142 resource_written(batch, pfb->zsbuf->texture);
143 } else {
144 resource_read(batch, pfb->zsbuf->texture);
145 }
146 }
147
148 if (fd_stencil_enabled(ctx)) {
149 if (fd_resource(pfb->zsbuf->texture)->valid) {
150 restore_buffers |= FD_BUFFER_STENCIL;
151 } else {
152 batch->invalidated |= FD_BUFFER_STENCIL;
153 }
154 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
155 buffers |= FD_BUFFER_STENCIL;
156 resource_written(batch, pfb->zsbuf->texture);
157 }
158 }
159
160 if (fd_logicop_enabled(ctx))
161 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
162
163 for (i = 0; i < pfb->nr_cbufs; i++) {
164 struct pipe_resource *surf;
165
166 if (!pfb->cbufs[i])
167 continue;
168
169 surf = pfb->cbufs[i]->texture;
170
171 if (fd_resource(surf)->valid) {
172 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
173 } else {
174 batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
175 }
176
177 buffers |= PIPE_CLEAR_COLOR0 << i;
178
179 if (fd_blend_enabled(ctx, i))
180 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
181
182 if (ctx->dirty & FD_DIRTY_FRAMEBUFFER)
183 resource_written(batch, pfb->cbufs[i]->texture);
184 }
185
186 /* Mark SSBOs */
187 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
188 const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_FRAGMENT];
189
190 foreach_bit (i, so->enabled_mask & so->writable_mask)
191 resource_written(batch, so->sb[i].buffer);
192
193 foreach_bit (i, so->enabled_mask & ~so->writable_mask)
194 resource_read(batch, so->sb[i].buffer);
195 }
196
197 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
198 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
199 struct pipe_image_view *img =
200 &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
201 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
202 resource_written(batch, img->resource);
203 else
204 resource_read(batch, img->resource);
205 }
206 }
207
208 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
209 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
210 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
211 }
212
213 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
214 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
215 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
216 }
217
218 /* Mark VBOs as being read */
219 if (ctx->dirty & FD_DIRTY_VTXBUF) {
220 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
221 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
222 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
223 }
224 }
225
226 /* Mark index buffer as being read */
227 resource_read(batch, indexbuf);
228
229 /* Mark indirect draw buffer as being read */
230 if (info->indirect)
231 resource_read(batch, info->indirect->buffer);
232
233 /* Mark textures as being read */
234 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
235 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
236 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
237 }
238
239 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
240 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
241 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
242 }
243
244 /* Mark streamout buffers as being written.. */
245 if (ctx->dirty & FD_DIRTY_STREAMOUT) {
246 for (i = 0; i < ctx->streamout.num_targets; i++)
247 if (ctx->streamout.targets[i])
248 resource_written(batch, ctx->streamout.targets[i]->buffer);
249 }
250
251 resource_written(batch, batch->query_buf);
252
253 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
254 resource_written(batch, aq->prsc);
255
256 fd_screen_unlock(ctx->screen);
257
258 batch->num_draws++;
259
260 /* Counting prims in sw doesn't work for GS and tesselation. For older
261 * gens we don't have those stages and don't have the hw counters enabled,
262 * so keep the count accurate for non-patch geometry.
263 */
264 if (info->mode != PIPE_PRIM_PATCHES)
265 prims = u_reduced_prims_for_vertices(info->mode, info->count);
266 else
267 prims = 0;
268
269 ctx->stats.draw_calls++;
270
271 /* TODO prims_emitted should be clipped when the stream-out buffer is
272 * not large enough. See max_tf_vtx().. probably need to move that
273 * into common code. Although a bit more annoying since a2xx doesn't
274 * use ir3 so no common way to get at the pipe_stream_output_info
275 * which is needed for this calculation.
276 */
277 if (ctx->streamout.num_targets > 0)
278 ctx->stats.prims_emitted += prims;
279 ctx->stats.prims_generated += prims;
280
281 /* any buffers that haven't been cleared yet, we need to restore: */
282 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
283 /* and any buffers used, need to be resolved: */
284 batch->resolve |= buffers;
285
286 /* Clearing last_fence must come after the batch dependency tracking
287 * (resource_read()/resource_written()), as that can trigger a flush,
288 * re-populating last_fence
289 */
290 fd_fence_ref(&ctx->last_fence, NULL);
291
292 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
293 pfb->width, pfb->height, batch->num_draws,
294 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
295 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
296
297 if (ctx->draw_vbo(ctx, info, index_offset))
298 batch->needs_flush = true;
299
300 batch->num_vertices += info->count * info->instance_count;
301
302 for (i = 0; i < ctx->streamout.num_targets; i++)
303 ctx->streamout.offsets[i] += info->count;
304
305 if (fd_mesa_debug & FD_DBG_DDRAW)
306 fd_context_all_dirty(ctx);
307
308 fd_batch_check_size(batch);
309
310 if (info == &new_info)
311 pipe_resource_reference(&indexbuf, NULL);
312 }
313
314 static void
315 fd_clear(struct pipe_context *pctx, unsigned buffers, const struct pipe_scissor_state *scissor_state,
316 const union pipe_color_union *color, double depth, unsigned stencil)
317 {
318 struct fd_context *ctx = fd_context(pctx);
319 struct fd_batch *batch = fd_context_batch(ctx);
320 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
321 unsigned cleared_buffers;
322 int i;
323
324 /* TODO: push down the region versions into the tiles */
325 if (!fd_render_condition_check(pctx))
326 return;
327
328 if (ctx->in_discard_blit) {
329 fd_batch_reset(batch);
330 fd_context_all_dirty(ctx);
331 }
332
333 /* pctx->clear() is only for full-surface clears, so scissor is
334 * equivalent to having GL_SCISSOR_TEST disabled:
335 */
336 batch->max_scissor.minx = 0;
337 batch->max_scissor.miny = 0;
338 batch->max_scissor.maxx = pfb->width;
339 batch->max_scissor.maxy = pfb->height;
340
341 /* for bookkeeping about which buffers have been cleared (and thus
342 * can fully or partially skip mem2gmem) we need to ignore buffers
343 * that have already had a draw, in case apps do silly things like
344 * clear after draw (ie. if you only clear the color buffer, but
345 * something like alpha-test causes side effects from the draw in
346 * the depth buffer, etc)
347 */
348 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
349 batch->cleared |= buffers;
350 batch->invalidated |= cleared_buffers;
351
352 batch->resolve |= buffers;
353 batch->needs_flush = true;
354
355 fd_screen_lock(ctx->screen);
356
357 if (buffers & PIPE_CLEAR_COLOR)
358 for (i = 0; i < pfb->nr_cbufs; i++)
359 if (buffers & (PIPE_CLEAR_COLOR0 << i))
360 resource_written(batch, pfb->cbufs[i]->texture);
361
362 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
363 resource_written(batch, pfb->zsbuf->texture);
364 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
365 }
366
367 resource_written(batch, batch->query_buf);
368
369 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
370 resource_written(batch, aq->prsc);
371
372 fd_screen_unlock(ctx->screen);
373
374 /* Clearing last_fence must come after the batch dependency tracking
375 * (resource_read()/resource_written()), as that can trigger a flush,
376 * re-populating last_fence
377 */
378 fd_fence_ref(&ctx->last_fence, NULL);
379
380 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
381 pfb->width, pfb->height, depth, stencil,
382 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
383 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
384
385 /* if per-gen backend doesn't implement ctx->clear() generic
386 * blitter clear:
387 */
388 bool fallback = true;
389
390 if (ctx->clear) {
391 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
392
393 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
394 if (fd_mesa_debug & FD_DBG_DCLEAR)
395 fd_context_all_dirty(ctx);
396
397 fallback = false;
398 }
399 }
400
401 if (fallback) {
402 fd_blitter_clear(pctx, buffers, color, depth, stencil);
403 }
404
405 fd_batch_check_size(batch);
406 }
407
408 static void
409 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
410 const union pipe_color_union *color,
411 unsigned x, unsigned y, unsigned w, unsigned h,
412 bool render_condition_enabled)
413 {
414 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
415 }
416
417 static void
418 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
419 unsigned buffers, double depth, unsigned stencil,
420 unsigned x, unsigned y, unsigned w, unsigned h,
421 bool render_condition_enabled)
422 {
423 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
424 buffers, depth, stencil, x, y, w, h);
425 }
426
427 static void
428 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
429 {
430 struct fd_context *ctx = fd_context(pctx);
431 const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
432 struct fd_batch *batch, *save_batch = NULL;
433 unsigned i;
434
435 batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
436 fd_batch_reference(&save_batch, ctx->batch);
437 fd_batch_reference(&ctx->batch, batch);
438 fd_context_all_dirty(ctx);
439
440 fd_screen_lock(ctx->screen);
441
442 /* Mark SSBOs */
443 foreach_bit (i, so->enabled_mask & so->writable_mask)
444 resource_written(batch, so->sb[i].buffer);
445
446 foreach_bit (i, so->enabled_mask & ~so->writable_mask)
447 resource_read(batch, so->sb[i].buffer);
448
449 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
450 struct pipe_image_view *img =
451 &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
452 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
453 resource_written(batch, img->resource);
454 else
455 resource_read(batch, img->resource);
456 }
457
458 /* UBO's are read */
459 foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
460 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
461
462 /* Mark textures as being read */
463 foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
464 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
465
466 /* For global buffers, we don't really know if read or written, so assume
467 * the worst:
468 */
469 foreach_bit(i, ctx->global_bindings.enabled_mask)
470 resource_written(batch, ctx->global_bindings.buf[i]);
471
472 if (info->indirect)
473 resource_read(batch, info->indirect);
474
475 fd_screen_unlock(ctx->screen);
476
477 batch->needs_flush = true;
478 ctx->launch_grid(ctx, info);
479
480 fd_batch_flush(batch);
481
482 fd_batch_reference(&ctx->batch, save_batch);
483 fd_context_all_dirty(ctx);
484 fd_batch_reference(&save_batch, NULL);
485 fd_batch_reference(&batch, NULL);
486 }
487
488 void
489 fd_draw_init(struct pipe_context *pctx)
490 {
491 pctx->draw_vbo = fd_draw_vbo;
492 pctx->clear = fd_clear;
493 pctx->clear_render_target = fd_clear_render_target;
494 pctx->clear_depth_stencil = fd_clear_depth_stencil;
495
496 if (has_compute(fd_screen(pctx->screen))) {
497 pctx->launch_grid = fd_launch_grid;
498 }
499 }