freedreno: allocate batches from the cache in launch_grid
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_draw.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_prim.h"
34 #include "util/u_format.h"
35 #include "util/u_helpers.h"
36
37 #include "freedreno_draw.h"
38 #include "freedreno_context.h"
39 #include "freedreno_state.h"
40 #include "freedreno_resource.h"
41 #include "freedreno_query_acc.h"
42 #include "freedreno_query_hw.h"
43 #include "freedreno_util.h"
44
45 static void
46 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
47 {
48 if (!prsc)
49 return;
50 fd_batch_resource_used(batch, fd_resource(prsc), false);
51 }
52
53 static void
54 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
55 {
56 if (!prsc)
57 return;
58 fd_batch_resource_used(batch, fd_resource(prsc), true);
59 }
60
61 static void
62 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
63 {
64 struct fd_context *ctx = fd_context(pctx);
65 struct fd_batch *batch = fd_context_batch(ctx);
66 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
67 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
68 unsigned i, prims, buffers = 0, restore_buffers = 0;
69
70 /* for debugging problems with indirect draw, it is convenient
71 * to be able to emulate it, to determine if game is feeding us
72 * bogus data:
73 */
74 if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
75 util_draw_indirect(pctx, info);
76 return;
77 }
78
79 if (!info->count_from_stream_output && !info->indirect &&
80 !info->primitive_restart &&
81 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
82 return;
83
84 /* if we supported transform feedback, we'd have to disable this: */
85 if (((scissor->maxx - scissor->minx) *
86 (scissor->maxy - scissor->miny)) == 0) {
87 return;
88 }
89
90 /* TODO: push down the region versions into the tiles */
91 if (!fd_render_condition_check(pctx))
92 return;
93
94 /* emulate unsupported primitives: */
95 if (!fd_supported_prim(ctx, info->mode)) {
96 if (ctx->streamout.num_targets > 0)
97 debug_error("stream-out with emulated prims");
98 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
99 util_primconvert_draw_vbo(ctx->primconvert, info);
100 return;
101 }
102
103 /* Upload a user index buffer. */
104 struct pipe_resource *indexbuf = NULL;
105 unsigned index_offset = 0;
106 struct pipe_draw_info new_info;
107 if (info->index_size) {
108 if (info->has_user_indices) {
109 if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset))
110 return;
111 new_info = *info;
112 new_info.index.resource = indexbuf;
113 new_info.has_user_indices = false;
114 info = &new_info;
115 } else {
116 indexbuf = info->index.resource;
117 }
118 }
119
120 if (ctx->in_blit) {
121 fd_batch_reset(batch);
122 fd_context_all_dirty(ctx);
123 }
124
125 batch->blit = ctx->in_blit;
126 batch->back_blit = ctx->in_shadow;
127
128 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
129 * query_buf may not be created yet.
130 */
131 fd_batch_set_stage(batch, FD_STAGE_DRAW);
132
133 /*
134 * Figure out the buffers/features we need:
135 */
136
137 mtx_lock(&ctx->screen->lock);
138
139 if (fd_depth_enabled(ctx)) {
140 if (fd_resource(pfb->zsbuf->texture)->valid) {
141 restore_buffers |= FD_BUFFER_DEPTH;
142 } else {
143 batch->invalidated |= FD_BUFFER_DEPTH;
144 }
145 buffers |= FD_BUFFER_DEPTH;
146 resource_written(batch, pfb->zsbuf->texture);
147 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
148 }
149
150 if (fd_stencil_enabled(ctx)) {
151 if (fd_resource(pfb->zsbuf->texture)->valid) {
152 restore_buffers |= FD_BUFFER_STENCIL;
153 } else {
154 batch->invalidated |= FD_BUFFER_STENCIL;
155 }
156 buffers |= FD_BUFFER_STENCIL;
157 resource_written(batch, pfb->zsbuf->texture);
158 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
159 }
160
161 if (fd_logicop_enabled(ctx))
162 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
163
164 for (i = 0; i < pfb->nr_cbufs; i++) {
165 struct pipe_resource *surf;
166
167 if (!pfb->cbufs[i])
168 continue;
169
170 surf = pfb->cbufs[i]->texture;
171
172 if (fd_resource(surf)->valid) {
173 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
174 } else {
175 batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
176 }
177
178 resource_written(batch, surf);
179
180 buffers |= PIPE_CLEAR_COLOR0 << i;
181
182 if (fd_blend_enabled(ctx, i))
183 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
184 }
185
186 /* Mark SSBOs as being written.. we don't actually know which ones are
187 * read vs written, so just assume the worst
188 */
189 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
190 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
191
192 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
193 struct pipe_image_view *img =
194 &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
195 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
196 resource_written(batch, img->resource);
197 else
198 resource_read(batch, img->resource);
199 }
200
201 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
202 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
203 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
204 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
205
206 /* Mark VBOs as being read */
207 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
208 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
209 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
210 }
211
212 /* Mark index buffer as being read */
213 resource_read(batch, indexbuf);
214
215 /* Mark indirect draw buffer as being read */
216 if (info->indirect)
217 resource_read(batch, info->indirect->buffer);
218
219 /* Mark textures as being read */
220 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
221 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
222 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
223 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
224
225 /* Mark streamout buffers as being written.. */
226 for (i = 0; i < ctx->streamout.num_targets; i++)
227 if (ctx->streamout.targets[i])
228 resource_written(batch, ctx->streamout.targets[i]->buffer);
229
230 resource_written(batch, batch->query_buf);
231
232 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
233 resource_written(batch, aq->prsc);
234
235 mtx_unlock(&ctx->screen->lock);
236
237 batch->num_draws++;
238
239 prims = u_reduced_prims_for_vertices(info->mode, info->count);
240
241 ctx->stats.draw_calls++;
242
243 /* TODO prims_emitted should be clipped when the stream-out buffer is
244 * not large enough. See max_tf_vtx().. probably need to move that
245 * into common code. Although a bit more annoying since a2xx doesn't
246 * use ir3 so no common way to get at the pipe_stream_output_info
247 * which is needed for this calculation.
248 */
249 if (ctx->streamout.num_targets > 0)
250 ctx->stats.prims_emitted += prims;
251 ctx->stats.prims_generated += prims;
252
253 /* any buffers that haven't been cleared yet, we need to restore: */
254 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
255 /* and any buffers used, need to be resolved: */
256 batch->resolve |= buffers;
257
258 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
259 pfb->width, pfb->height, batch->num_draws,
260 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
261 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
262
263 if (ctx->draw_vbo(ctx, info, index_offset))
264 batch->needs_flush = true;
265
266 for (i = 0; i < ctx->streamout.num_targets; i++)
267 ctx->streamout.offsets[i] += info->count;
268
269 if (fd_mesa_debug & FD_DBG_DDRAW)
270 fd_context_all_dirty(ctx);
271
272 fd_batch_check_size(batch);
273
274 if (info == &new_info)
275 pipe_resource_reference(&indexbuf, NULL);
276 }
277
278 /* Generic clear implementation (partially) using u_blitter: */
279 static void
280 fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
281 const union pipe_color_union *color, double depth, unsigned stencil)
282 {
283 struct fd_context *ctx = fd_context(pctx);
284 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
285 struct blitter_context *blitter = ctx->blitter;
286
287 fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR);
288
289 util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
290 buffers, NULL, NULL);
291
292 struct pipe_stencil_ref sr = {
293 .ref_value = { stencil & 0xff }
294 };
295 pctx->set_stencil_ref(pctx, &sr);
296
297 struct pipe_constant_buffer cb = {
298 .buffer_size = 16,
299 .user_buffer = &color->ui,
300 };
301 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
302
303 if (!ctx->clear_rs_state) {
304 const struct pipe_rasterizer_state tmpl = {
305 .cull_face = PIPE_FACE_NONE,
306 .half_pixel_center = 1,
307 .bottom_edge_rule = 1,
308 .flatshade = 1,
309 .depth_clip_near = 1,
310 .depth_clip_far = 1,
311 };
312 ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl);
313 }
314 pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state);
315
316 struct pipe_viewport_state vp = {
317 .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth },
318 .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f },
319 };
320 pctx->set_viewport_states(pctx, 0, 1, &vp);
321
322 pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
323 pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
324 &ctx->solid_vbuf_state.vertexbuf.vb[0]);
325 pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
326 pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
327 pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
328
329 struct pipe_draw_info info = {
330 .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
331 .count = 2,
332 .max_index = 1,
333 .instance_count = 1,
334 };
335 ctx->draw_vbo(ctx, &info, 0);
336
337 util_blitter_restore_constant_buffer_state(blitter);
338 util_blitter_restore_vertex_states(blitter);
339 util_blitter_restore_fragment_states(blitter);
340 util_blitter_restore_textures(blitter);
341 util_blitter_restore_fb_state(blitter);
342 util_blitter_restore_render_cond(blitter);
343 util_blitter_unset_running_flag(blitter);
344
345 fd_blitter_pipe_end(ctx);
346 }
347
348 static void
349 fd_clear(struct pipe_context *pctx, unsigned buffers,
350 const union pipe_color_union *color, double depth, unsigned stencil)
351 {
352 struct fd_context *ctx = fd_context(pctx);
353 struct fd_batch *batch = fd_context_batch(ctx);
354 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
355 unsigned cleared_buffers;
356 int i;
357
358 /* TODO: push down the region versions into the tiles */
359 if (!fd_render_condition_check(pctx))
360 return;
361
362 if (ctx->in_blit) {
363 fd_batch_reset(batch);
364 fd_context_all_dirty(ctx);
365 }
366
367 /* pctx->clear() is only for full-surface clears, so scissor is
368 * equivalent to having GL_SCISSOR_TEST disabled:
369 */
370 batch->max_scissor.minx = 0;
371 batch->max_scissor.miny = 0;
372 batch->max_scissor.maxx = pfb->width;
373 batch->max_scissor.maxy = pfb->height;
374
375 /* for bookkeeping about which buffers have been cleared (and thus
376 * can fully or partially skip mem2gmem) we need to ignore buffers
377 * that have already had a draw, in case apps do silly things like
378 * clear after draw (ie. if you only clear the color buffer, but
379 * something like alpha-test causes side effects from the draw in
380 * the depth buffer, etc)
381 */
382 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
383 batch->cleared |= cleared_buffers;
384 batch->invalidated |= cleared_buffers;
385
386 batch->resolve |= buffers;
387 batch->needs_flush = true;
388
389 mtx_lock(&ctx->screen->lock);
390
391 if (buffers & PIPE_CLEAR_COLOR)
392 for (i = 0; i < pfb->nr_cbufs; i++)
393 if (buffers & (PIPE_CLEAR_COLOR0 << i))
394 resource_written(batch, pfb->cbufs[i]->texture);
395
396 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
397 resource_written(batch, pfb->zsbuf->texture);
398 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
399 }
400
401 resource_written(batch, batch->query_buf);
402
403 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
404 resource_written(batch, aq->prsc);
405
406 mtx_unlock(&ctx->screen->lock);
407
408 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
409 pfb->width, pfb->height, depth, stencil,
410 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
411 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
412
413 /* if per-gen backend doesn't implement ctx->clear() generic
414 * blitter clear:
415 */
416 bool fallback = true;
417
418 if (ctx->clear) {
419 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
420
421 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
422 if (fd_mesa_debug & FD_DBG_DCLEAR)
423 fd_context_all_dirty(ctx);
424
425 fallback = false;
426 }
427 }
428
429 if (fallback) {
430 fd_blitter_clear(pctx, buffers, color, depth, stencil);
431 }
432
433 fd_batch_check_size(batch);
434 }
435
436 static void
437 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
438 const union pipe_color_union *color,
439 unsigned x, unsigned y, unsigned w, unsigned h,
440 bool render_condition_enabled)
441 {
442 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
443 }
444
445 static void
446 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
447 unsigned buffers, double depth, unsigned stencil,
448 unsigned x, unsigned y, unsigned w, unsigned h,
449 bool render_condition_enabled)
450 {
451 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
452 buffers, depth, stencil, x, y, w, h);
453 }
454
455 static void
456 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
457 {
458 struct fd_context *ctx = fd_context(pctx);
459 struct fd_batch *batch, *save_batch = NULL;
460 unsigned i;
461
462 batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
463 fd_batch_reference(&save_batch, ctx->batch);
464 fd_batch_reference(&ctx->batch, batch);
465
466 mtx_lock(&ctx->screen->lock);
467
468 /* Mark SSBOs as being written.. we don't actually know which ones are
469 * read vs written, so just assume the worst
470 */
471 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
472 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
473
474 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
475 struct pipe_image_view *img =
476 &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
477 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
478 resource_written(batch, img->resource);
479 else
480 resource_read(batch, img->resource);
481 }
482
483 /* UBO's are read */
484 foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
485 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
486
487 /* Mark textures as being read */
488 foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
489 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
490
491 /* For global buffers, we don't really know if read or written, so assume
492 * the worst:
493 */
494 foreach_bit(i, ctx->global_bindings.enabled_mask)
495 resource_written(batch, ctx->global_bindings.buf[i]);
496
497 if (info->indirect)
498 resource_read(batch, info->indirect);
499
500 mtx_unlock(&ctx->screen->lock);
501
502 batch->needs_flush = true;
503 ctx->launch_grid(ctx, info);
504
505 fd_batch_flush(batch, false, false);
506
507 fd_batch_reference(&ctx->batch, save_batch);
508 fd_batch_reference(&save_batch, NULL);
509 fd_batch_reference(&batch, NULL);
510 }
511
512 void
513 fd_draw_init(struct pipe_context *pctx)
514 {
515 pctx->draw_vbo = fd_draw_vbo;
516 pctx->clear = fd_clear;
517 pctx->clear_render_target = fd_clear_render_target;
518 pctx->clear_depth_stencil = fd_clear_depth_stencil;
519
520 if (has_compute(fd_screen(pctx->screen))) {
521 pctx->launch_grid = fd_launch_grid;
522 }
523 }