freedreno: a2xx: NIR backend
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/u_draw.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32 #include "util/u_format.h"
33 #include "util/u_helpers.h"
34
35 #include "freedreno_draw.h"
36 #include "freedreno_context.h"
37 #include "freedreno_fence.h"
38 #include "freedreno_state.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_query_acc.h"
41 #include "freedreno_query_hw.h"
42 #include "freedreno_util.h"
43
44 static void
45 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
46 {
47 if (!prsc)
48 return;
49 fd_batch_resource_used(batch, fd_resource(prsc), false);
50 }
51
52 static void
53 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
54 {
55 if (!prsc)
56 return;
57 fd_batch_resource_used(batch, fd_resource(prsc), true);
58 }
59
60 static void
61 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
62 {
63 struct fd_context *ctx = fd_context(pctx);
64 struct fd_batch *batch = fd_context_batch(ctx);
65 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
66 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
67 unsigned i, prims, buffers = 0, restore_buffers = 0;
68
69 /* for debugging problems with indirect draw, it is convenient
70 * to be able to emulate it, to determine if game is feeding us
71 * bogus data:
72 */
73 if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
74 util_draw_indirect(pctx, info);
75 return;
76 }
77
78 if (!info->count_from_stream_output && !info->indirect &&
79 !info->primitive_restart &&
80 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
81 return;
82
83 /* if we supported transform feedback, we'd have to disable this: */
84 if (((scissor->maxx - scissor->minx) *
85 (scissor->maxy - scissor->miny)) == 0) {
86 return;
87 }
88
89 /* TODO: push down the region versions into the tiles */
90 if (!fd_render_condition_check(pctx))
91 return;
92
93 /* emulate unsupported primitives: */
94 if (!fd_supported_prim(ctx, info->mode)) {
95 if (ctx->streamout.num_targets > 0)
96 debug_error("stream-out with emulated prims");
97 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
98 util_primconvert_draw_vbo(ctx->primconvert, info);
99 return;
100 }
101
102 fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
103
104 /* Upload a user index buffer. */
105 struct pipe_resource *indexbuf = NULL;
106 unsigned index_offset = 0;
107 struct pipe_draw_info new_info;
108 if (info->index_size) {
109 if (info->has_user_indices) {
110 if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset))
111 return;
112 new_info = *info;
113 new_info.index.resource = indexbuf;
114 new_info.has_user_indices = false;
115 info = &new_info;
116 } else {
117 indexbuf = info->index.resource;
118 }
119 }
120
121 if (ctx->in_blit) {
122 fd_batch_reset(batch);
123 fd_context_all_dirty(ctx);
124 }
125
126 batch->blit = ctx->in_blit;
127 batch->back_blit = ctx->in_shadow;
128
129 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
130 * query_buf may not be created yet.
131 */
132 fd_batch_set_stage(batch, FD_STAGE_DRAW);
133
134 /*
135 * Figure out the buffers/features we need:
136 */
137
138 mtx_lock(&ctx->screen->lock);
139
140 if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
141 if (fd_depth_enabled(ctx)) {
142 if (fd_resource(pfb->zsbuf->texture)->valid) {
143 restore_buffers |= FD_BUFFER_DEPTH;
144 } else {
145 batch->invalidated |= FD_BUFFER_DEPTH;
146 }
147 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
148 if (fd_depth_write_enabled(ctx)) {
149 buffers |= FD_BUFFER_DEPTH;
150 resource_written(batch, pfb->zsbuf->texture);
151 } else {
152 resource_read(batch, pfb->zsbuf->texture);
153 }
154 }
155
156 if (fd_stencil_enabled(ctx)) {
157 if (fd_resource(pfb->zsbuf->texture)->valid) {
158 restore_buffers |= FD_BUFFER_STENCIL;
159 } else {
160 batch->invalidated |= FD_BUFFER_STENCIL;
161 }
162 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
163 buffers |= FD_BUFFER_STENCIL;
164 resource_written(batch, pfb->zsbuf->texture);
165 }
166 }
167
168 if (ctx->dirty & FD_DIRTY_FRAMEBUFFER) {
169 for (i = 0; i < pfb->nr_cbufs; i++) {
170 if (!pfb->cbufs[i])
171 continue;
172
173 resource_written(batch, pfb->cbufs[i]->texture);
174 }
175 }
176
177 if (fd_logicop_enabled(ctx))
178 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
179
180 for (i = 0; i < pfb->nr_cbufs; i++) {
181 struct pipe_resource *surf;
182
183 if (!pfb->cbufs[i])
184 continue;
185
186 surf = pfb->cbufs[i]->texture;
187
188 if (fd_resource(surf)->valid) {
189 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
190 } else {
191 batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
192 }
193
194 buffers |= PIPE_CLEAR_COLOR0 << i;
195
196 if (fd_blend_enabled(ctx, i))
197 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
198 }
199
200 /* Mark SSBOs as being written.. we don't actually know which ones are
201 * read vs written, so just assume the worst
202 */
203 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) {
204 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
205 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
206 }
207
208 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) {
209 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
210 struct pipe_image_view *img =
211 &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
212 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
213 resource_written(batch, img->resource);
214 else
215 resource_read(batch, img->resource);
216 }
217 }
218
219 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) {
220 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
221 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
222 }
223
224 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) {
225 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
226 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
227 }
228
229 /* Mark VBOs as being read */
230 if (ctx->dirty & FD_DIRTY_VTXBUF) {
231 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
232 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
233 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
234 }
235 }
236
237 /* Mark index buffer as being read */
238 resource_read(batch, indexbuf);
239
240 /* Mark indirect draw buffer as being read */
241 if (info->indirect)
242 resource_read(batch, info->indirect->buffer);
243
244 /* Mark textures as being read */
245 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
246 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
247 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
248 }
249
250 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
251 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
252 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
253 }
254
255 /* Mark streamout buffers as being written.. */
256 if (ctx->dirty & FD_DIRTY_STREAMOUT) {
257 for (i = 0; i < ctx->streamout.num_targets; i++)
258 if (ctx->streamout.targets[i])
259 resource_written(batch, ctx->streamout.targets[i]->buffer);
260 }
261
262 resource_written(batch, batch->query_buf);
263
264 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
265 resource_written(batch, aq->prsc);
266
267 mtx_unlock(&ctx->screen->lock);
268
269 batch->num_draws++;
270
271 prims = u_reduced_prims_for_vertices(info->mode, info->count);
272
273 ctx->stats.draw_calls++;
274
275 /* TODO prims_emitted should be clipped when the stream-out buffer is
276 * not large enough. See max_tf_vtx().. probably need to move that
277 * into common code. Although a bit more annoying since a2xx doesn't
278 * use ir3 so no common way to get at the pipe_stream_output_info
279 * which is needed for this calculation.
280 */
281 if (ctx->streamout.num_targets > 0)
282 ctx->stats.prims_emitted += prims;
283 ctx->stats.prims_generated += prims;
284
285 /* any buffers that haven't been cleared yet, we need to restore: */
286 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
287 /* and any buffers used, need to be resolved: */
288 batch->resolve |= buffers;
289
290 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
291 pfb->width, pfb->height, batch->num_draws,
292 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
293 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
294
295 if (ctx->draw_vbo(ctx, info, index_offset))
296 batch->needs_flush = true;
297
298 batch->num_vertices += info->count * info->instance_count;
299
300 for (i = 0; i < ctx->streamout.num_targets; i++)
301 ctx->streamout.offsets[i] += info->count;
302
303 if (fd_mesa_debug & FD_DBG_DDRAW)
304 fd_context_all_dirty(ctx);
305
306 fd_batch_check_size(batch);
307
308 if (info == &new_info)
309 pipe_resource_reference(&indexbuf, NULL);
310 }
311
312 /* Generic clear implementation (partially) using u_blitter: */
313 static void
314 fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
315 const union pipe_color_union *color, double depth, unsigned stencil)
316 {
317 struct fd_context *ctx = fd_context(pctx);
318 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
319 struct blitter_context *blitter = ctx->blitter;
320
321 fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR);
322
323 util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
324 buffers, NULL, NULL);
325
326 struct pipe_stencil_ref sr = {
327 .ref_value = { stencil & 0xff }
328 };
329 pctx->set_stencil_ref(pctx, &sr);
330
331 struct pipe_constant_buffer cb = {
332 .buffer_size = 16,
333 .user_buffer = &color->ui,
334 };
335 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
336
337 if (!ctx->clear_rs_state) {
338 const struct pipe_rasterizer_state tmpl = {
339 .cull_face = PIPE_FACE_NONE,
340 .half_pixel_center = 1,
341 .bottom_edge_rule = 1,
342 .flatshade = 1,
343 .depth_clip_near = 1,
344 .depth_clip_far = 1,
345 };
346 ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl);
347 }
348 pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state);
349
350 struct pipe_viewport_state vp = {
351 .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth },
352 .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f },
353 };
354 pctx->set_viewport_states(pctx, 0, 1, &vp);
355
356 pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
357 pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
358 &ctx->solid_vbuf_state.vertexbuf.vb[0]);
359 pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
360 pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
361 pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
362
363 struct pipe_draw_info info = {
364 .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
365 .count = 2,
366 .max_index = 1,
367 .instance_count = 1,
368 };
369 ctx->draw_vbo(ctx, &info, 0);
370
371 util_blitter_restore_constant_buffer_state(blitter);
372 util_blitter_restore_vertex_states(blitter);
373 util_blitter_restore_fragment_states(blitter);
374 util_blitter_restore_textures(blitter);
375 util_blitter_restore_fb_state(blitter);
376 util_blitter_restore_render_cond(blitter);
377 util_blitter_unset_running_flag(blitter);
378
379 fd_blitter_pipe_end(ctx);
380 }
381
382 static void
383 fd_clear(struct pipe_context *pctx, unsigned buffers,
384 const union pipe_color_union *color, double depth, unsigned stencil)
385 {
386 struct fd_context *ctx = fd_context(pctx);
387 struct fd_batch *batch = fd_context_batch(ctx);
388 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
389 unsigned cleared_buffers;
390 int i;
391
392 /* TODO: push down the region versions into the tiles */
393 if (!fd_render_condition_check(pctx))
394 return;
395
396 fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
397
398 if (ctx->in_blit) {
399 fd_batch_reset(batch);
400 fd_context_all_dirty(ctx);
401 }
402
403 /* pctx->clear() is only for full-surface clears, so scissor is
404 * equivalent to having GL_SCISSOR_TEST disabled:
405 */
406 batch->max_scissor.minx = 0;
407 batch->max_scissor.miny = 0;
408 batch->max_scissor.maxx = pfb->width;
409 batch->max_scissor.maxy = pfb->height;
410
411 /* for bookkeeping about which buffers have been cleared (and thus
412 * can fully or partially skip mem2gmem) we need to ignore buffers
413 * that have already had a draw, in case apps do silly things like
414 * clear after draw (ie. if you only clear the color buffer, but
415 * something like alpha-test causes side effects from the draw in
416 * the depth buffer, etc)
417 */
418 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
419 batch->cleared |= cleared_buffers;
420 batch->invalidated |= cleared_buffers;
421
422 batch->resolve |= buffers;
423 batch->needs_flush = true;
424
425 mtx_lock(&ctx->screen->lock);
426
427 if (buffers & PIPE_CLEAR_COLOR)
428 for (i = 0; i < pfb->nr_cbufs; i++)
429 if (buffers & (PIPE_CLEAR_COLOR0 << i))
430 resource_written(batch, pfb->cbufs[i]->texture);
431
432 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
433 resource_written(batch, pfb->zsbuf->texture);
434 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
435 }
436
437 resource_written(batch, batch->query_buf);
438
439 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
440 resource_written(batch, aq->prsc);
441
442 mtx_unlock(&ctx->screen->lock);
443
444 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
445 pfb->width, pfb->height, depth, stencil,
446 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
447 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
448
449 /* if per-gen backend doesn't implement ctx->clear() generic
450 * blitter clear:
451 */
452 bool fallback = true;
453
454 if (ctx->clear) {
455 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
456
457 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
458 if (fd_mesa_debug & FD_DBG_DCLEAR)
459 fd_context_all_dirty(ctx);
460
461 fallback = false;
462 }
463 }
464
465 if (fallback) {
466 fd_blitter_clear(pctx, buffers, color, depth, stencil);
467 }
468
469 fd_batch_check_size(batch);
470 }
471
472 static void
473 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
474 const union pipe_color_union *color,
475 unsigned x, unsigned y, unsigned w, unsigned h,
476 bool render_condition_enabled)
477 {
478 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
479 }
480
481 static void
482 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
483 unsigned buffers, double depth, unsigned stencil,
484 unsigned x, unsigned y, unsigned w, unsigned h,
485 bool render_condition_enabled)
486 {
487 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
488 buffers, depth, stencil, x, y, w, h);
489 }
490
491 static void
492 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
493 {
494 struct fd_context *ctx = fd_context(pctx);
495 struct fd_batch *batch, *save_batch = NULL;
496 unsigned i;
497
498 batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
499 fd_batch_reference(&save_batch, ctx->batch);
500 fd_batch_reference(&ctx->batch, batch);
501 fd_context_all_dirty(ctx);
502
503 mtx_lock(&ctx->screen->lock);
504
505 /* Mark SSBOs as being written.. we don't actually know which ones are
506 * read vs written, so just assume the worst
507 */
508 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
509 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
510
511 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
512 struct pipe_image_view *img =
513 &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
514 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
515 resource_written(batch, img->resource);
516 else
517 resource_read(batch, img->resource);
518 }
519
520 /* UBO's are read */
521 foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
522 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
523
524 /* Mark textures as being read */
525 foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
526 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
527
528 /* For global buffers, we don't really know if read or written, so assume
529 * the worst:
530 */
531 foreach_bit(i, ctx->global_bindings.enabled_mask)
532 resource_written(batch, ctx->global_bindings.buf[i]);
533
534 if (info->indirect)
535 resource_read(batch, info->indirect);
536
537 mtx_unlock(&ctx->screen->lock);
538
539 batch->needs_flush = true;
540 ctx->launch_grid(ctx, info);
541
542 fd_batch_flush(batch, false, false);
543
544 fd_batch_reference(&ctx->batch, save_batch);
545 fd_context_all_dirty(ctx);
546 fd_batch_reference(&save_batch, NULL);
547 fd_batch_reference(&batch, NULL);
548 }
549
550 void
551 fd_draw_init(struct pipe_context *pctx)
552 {
553 pctx->draw_vbo = fd_draw_vbo;
554 pctx->clear = fd_clear;
555 pctx->clear_render_target = fd_clear_render_target;
556 pctx->clear_depth_stencil = fd_clear_depth_stencil;
557
558 if (has_compute(fd_screen(pctx->screen))) {
559 pctx->launch_grid = fd_launch_grid;
560 }
561 }