bcd21a232d24cfecb82000b4c94bd130067839cb
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_prim.h"
33 #include "util/u_format.h"
34 #include "util/u_helpers.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_context.h"
38 #include "freedreno_state.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_query_acc.h"
41 #include "freedreno_query_hw.h"
42 #include "freedreno_util.h"
43
44 static void
45 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
46 {
47 if (!prsc)
48 return;
49 fd_batch_resource_used(batch, fd_resource(prsc), false);
50 }
51
52 static void
53 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
54 {
55 if (!prsc)
56 return;
57 fd_resource(prsc)->valid = true;
58 fd_batch_resource_used(batch, fd_resource(prsc), true);
59 }
60
61 static void
62 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
63 {
64 struct fd_context *ctx = fd_context(pctx);
65 struct fd_batch *batch = ctx->batch;
66 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
67 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
68 unsigned i, prims, buffers = 0, restore_buffers = 0;
69
70 if (!info->count_from_stream_output && !info->indirect &&
71 !info->primitive_restart &&
72 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
73 return;
74
75 /* if we supported transform feedback, we'd have to disable this: */
76 if (((scissor->maxx - scissor->minx) *
77 (scissor->maxy - scissor->miny)) == 0) {
78 return;
79 }
80
81 /* TODO: push down the region versions into the tiles */
82 if (!fd_render_condition_check(pctx))
83 return;
84
85 /* emulate unsupported primitives: */
86 if (!fd_supported_prim(ctx, info->mode)) {
87 if (ctx->streamout.num_targets > 0)
88 debug_error("stream-out with emulated prims");
89 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
90 util_primconvert_draw_vbo(ctx->primconvert, info);
91 return;
92 }
93
94 /* Upload a user index buffer. */
95 struct pipe_resource *indexbuf = NULL;
96 unsigned index_offset = 0;
97 struct pipe_draw_info new_info;
98 if (info->index_size) {
99 if (info->has_user_indices) {
100 if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset))
101 return;
102 new_info = *info;
103 new_info.index.resource = indexbuf;
104 new_info.has_user_indices = false;
105 info = &new_info;
106 } else {
107 indexbuf = info->index.resource;
108 }
109 }
110
111 if (ctx->in_blit) {
112 fd_batch_reset(batch);
113 fd_context_all_dirty(ctx);
114 }
115
116 batch->blit = ctx->in_blit;
117 batch->back_blit = ctx->in_shadow;
118
119 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
120 * query_buf may not be created yet.
121 */
122 fd_batch_set_stage(batch, FD_STAGE_DRAW);
123
124 /*
125 * Figure out the buffers/features we need:
126 */
127
128 mtx_lock(&ctx->screen->lock);
129
130 if (fd_depth_enabled(ctx)) {
131 if (fd_resource(pfb->zsbuf->texture)->valid)
132 restore_buffers |= FD_BUFFER_DEPTH;
133 buffers |= FD_BUFFER_DEPTH;
134 resource_written(batch, pfb->zsbuf->texture);
135 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
136 }
137
138 if (fd_stencil_enabled(ctx)) {
139 if (fd_resource(pfb->zsbuf->texture)->valid)
140 restore_buffers |= FD_BUFFER_DEPTH;
141 buffers |= FD_BUFFER_STENCIL;
142 resource_written(batch, pfb->zsbuf->texture);
143 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
144 }
145
146 if (fd_logicop_enabled(ctx))
147 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
148
149 for (i = 0; i < pfb->nr_cbufs; i++) {
150 struct pipe_resource *surf;
151
152 if (!pfb->cbufs[i])
153 continue;
154
155 surf = pfb->cbufs[i]->texture;
156
157 resource_written(batch, surf);
158
159 if (fd_resource(surf)->valid)
160 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
161
162 buffers |= PIPE_CLEAR_COLOR0 << i;
163
164 if (surf->nr_samples > 1)
165 batch->gmem_reason |= FD_GMEM_MSAA_ENABLED;
166
167 if (fd_blend_enabled(ctx, i))
168 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
169 }
170
171 /* Mark SSBOs as being written.. we don't actually know which ones are
172 * read vs written, so just assume the worst
173 */
174 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
175 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
176
177 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
178 struct pipe_image_view *img =
179 &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
180 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
181 resource_written(batch, img->resource);
182 else
183 resource_read(batch, img->resource);
184 }
185
186 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
187 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
188 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
189 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
190
191 /* Mark VBOs as being read */
192 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
193 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
194 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
195 }
196
197 /* Mark index buffer as being read */
198 resource_read(batch, indexbuf);
199
200 /* Mark textures as being read */
201 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
202 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
203 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
204 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
205
206 /* Mark streamout buffers as being written.. */
207 for (i = 0; i < ctx->streamout.num_targets; i++)
208 if (ctx->streamout.targets[i])
209 resource_written(batch, ctx->streamout.targets[i]->buffer);
210
211 resource_written(batch, batch->query_buf);
212
213 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
214 resource_written(batch, aq->prsc);
215
216 mtx_unlock(&ctx->screen->lock);
217
218 batch->num_draws++;
219
220 prims = u_reduced_prims_for_vertices(info->mode, info->count);
221
222 ctx->stats.draw_calls++;
223
224 /* TODO prims_emitted should be clipped when the stream-out buffer is
225 * not large enough. See max_tf_vtx().. probably need to move that
226 * into common code. Although a bit more annoying since a2xx doesn't
227 * use ir3 so no common way to get at the pipe_stream_output_info
228 * which is needed for this calculation.
229 */
230 if (ctx->streamout.num_targets > 0)
231 ctx->stats.prims_emitted += prims;
232 ctx->stats.prims_generated += prims;
233
234 /* any buffers that haven't been cleared yet, we need to restore: */
235 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->cleared);
236 /* and any buffers used, need to be resolved: */
237 batch->resolve |= buffers;
238
239 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
240 pfb->width, pfb->height, batch->num_draws,
241 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
242 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
243
244 if (ctx->draw_vbo(ctx, info, index_offset))
245 batch->needs_flush = true;
246
247 for (i = 0; i < ctx->streamout.num_targets; i++)
248 ctx->streamout.offsets[i] += info->count;
249
250 if (fd_mesa_debug & FD_DBG_DDRAW)
251 fd_context_all_dirty(ctx);
252
253 fd_batch_check_size(batch);
254
255 if (info == &new_info)
256 pipe_resource_reference(&indexbuf, NULL);
257 }
258
259 /* Generic clear implementation (partially) using u_blitter: */
260 static void
261 fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
262 const union pipe_color_union *color, double depth, unsigned stencil)
263 {
264 struct fd_context *ctx = fd_context(pctx);
265 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
266 struct blitter_context *blitter = ctx->blitter;
267
268 fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR);
269
270 util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
271 buffers, NULL, NULL);
272
273 struct pipe_stencil_ref sr = {
274 .ref_value = { stencil & 0xff }
275 };
276 pctx->set_stencil_ref(pctx, &sr);
277
278 struct pipe_constant_buffer cb = {
279 .buffer_size = 16,
280 .user_buffer = &color->ui,
281 };
282 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
283
284 if (!ctx->clear_rs_state) {
285 const struct pipe_rasterizer_state tmpl = {
286 .cull_face = PIPE_FACE_NONE,
287 .half_pixel_center = 1,
288 .bottom_edge_rule = 1,
289 .flatshade = 1,
290 .depth_clip = 1,
291 };
292 ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl);
293 }
294 pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state);
295
296 struct pipe_viewport_state vp = {
297 .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth },
298 .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f },
299 };
300 pctx->set_viewport_states(pctx, 0, 1, &vp);
301
302 pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
303 pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
304 &ctx->solid_vbuf_state.vertexbuf.vb[0]);
305 pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
306 pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
307 pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
308
309 struct pipe_draw_info info = {
310 .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
311 .count = 2,
312 .max_index = 1,
313 .instance_count = 1,
314 };
315 ctx->draw_vbo(ctx, &info, 0);
316
317 util_blitter_restore_constant_buffer_state(blitter);
318 util_blitter_restore_vertex_states(blitter);
319 util_blitter_restore_fragment_states(blitter);
320 util_blitter_restore_textures(blitter);
321 util_blitter_restore_fb_state(blitter);
322 util_blitter_restore_render_cond(blitter);
323 util_blitter_unset_running_flag(blitter);
324
325 fd_blitter_pipe_end(ctx);
326 }
327
328 /* TODO figure out how to make better use of existing state mechanism
329 * for clear (and possibly gmem->mem / mem->gmem) so we can (a) keep
330 * track of what state really actually changes, and (b) reduce the code
331 * in the a2xx/a3xx parts.
332 */
333
334 static void
335 fd_clear(struct pipe_context *pctx, unsigned buffers,
336 const union pipe_color_union *color, double depth, unsigned stencil)
337 {
338 struct fd_context *ctx = fd_context(pctx);
339 struct fd_batch *batch = ctx->batch;
340 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
341 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
342 unsigned cleared_buffers;
343 int i;
344
345 /* TODO: push down the region versions into the tiles */
346 if (!fd_render_condition_check(pctx))
347 return;
348
349 if (ctx->in_blit) {
350 fd_batch_reset(batch);
351 fd_context_all_dirty(ctx);
352 }
353
354 /* for bookkeeping about which buffers have been cleared (and thus
355 * can fully or partially skip mem2gmem) we need to ignore buffers
356 * that have already had a draw, in case apps do silly things like
357 * clear after draw (ie. if you only clear the color buffer, but
358 * something like alpha-test causes side effects from the draw in
359 * the depth buffer, etc)
360 */
361 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
362
363 /* do we have full-screen scissor? */
364 if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) {
365 batch->cleared |= cleared_buffers;
366 } else {
367 batch->partial_cleared |= cleared_buffers;
368 if (cleared_buffers & PIPE_CLEAR_COLOR)
369 batch->cleared_scissor.color = *scissor;
370 if (cleared_buffers & PIPE_CLEAR_DEPTH)
371 batch->cleared_scissor.depth = *scissor;
372 if (cleared_buffers & PIPE_CLEAR_STENCIL)
373 batch->cleared_scissor.stencil = *scissor;
374 }
375 batch->resolve |= buffers;
376 batch->needs_flush = true;
377
378 mtx_lock(&ctx->screen->lock);
379
380 if (buffers & PIPE_CLEAR_COLOR)
381 for (i = 0; i < pfb->nr_cbufs; i++)
382 if (buffers & (PIPE_CLEAR_COLOR0 << i))
383 resource_written(batch, pfb->cbufs[i]->texture);
384
385 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
386 resource_written(batch, pfb->zsbuf->texture);
387 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
388 }
389
390 resource_written(batch, batch->query_buf);
391
392 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
393 resource_written(batch, aq->prsc);
394
395 mtx_unlock(&ctx->screen->lock);
396
397 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
398 pfb->width, pfb->height, depth, stencil,
399 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
400 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
401
402 /* if per-gen backend doesn't implement ctx->clear() generic
403 * blitter clear:
404 */
405 bool fallback = true;
406
407 if (ctx->clear) {
408 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
409
410 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
411 if (fd_mesa_debug & FD_DBG_DCLEAR)
412 fd_context_all_dirty(ctx);
413
414 fallback = false;
415 }
416 }
417
418 if (fallback) {
419 fd_blitter_clear(pctx, buffers, color, depth, stencil);
420 }
421 }
422
423 static void
424 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
425 const union pipe_color_union *color,
426 unsigned x, unsigned y, unsigned w, unsigned h,
427 bool render_condition_enabled)
428 {
429 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
430 }
431
432 static void
433 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
434 unsigned buffers, double depth, unsigned stencil,
435 unsigned x, unsigned y, unsigned w, unsigned h,
436 bool render_condition_enabled)
437 {
438 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
439 buffers, depth, stencil, x, y, w, h);
440 }
441
442 static void
443 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
444 {
445 struct fd_context *ctx = fd_context(pctx);
446 struct fd_batch *batch, *save_batch = NULL;
447 unsigned i;
448
449 batch = fd_batch_create(ctx);
450 fd_batch_reference(&save_batch, ctx->batch);
451 fd_batch_reference(&ctx->batch, batch);
452
453 mtx_lock(&ctx->screen->lock);
454
455 /* Mark SSBOs as being written.. we don't actually know which ones are
456 * read vs written, so just assume the worst
457 */
458 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
459 resource_read(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
460
461 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
462 struct pipe_image_view *img =
463 &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
464 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
465 resource_written(batch, img->resource);
466 else
467 resource_read(batch, img->resource);
468 }
469
470 /* UBO's are read */
471 foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
472 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
473
474 /* Mark textures as being read */
475 foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
476 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
477
478 mtx_unlock(&ctx->screen->lock);
479
480 ctx->launch_grid(ctx, info);
481
482 fd_gmem_flush_compute(batch);
483
484 fd_batch_reference(&ctx->batch, save_batch);
485 fd_batch_reference(&save_batch, NULL);
486 }
487
488 void
489 fd_draw_init(struct pipe_context *pctx)
490 {
491 pctx->draw_vbo = fd_draw_vbo;
492 pctx->clear = fd_clear;
493 pctx->clear_render_target = fd_clear_render_target;
494 pctx->clear_depth_stencil = fd_clear_depth_stencil;
495
496 if (has_compute(fd_screen(pctx->screen))) {
497 pctx->launch_grid = fd_launch_grid;
498 }
499 }