freedreno: Remove the Emacs mode lines
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/u_draw.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_prim.h"
32 #include "util/u_format.h"
33 #include "util/u_helpers.h"
34
35 #include "freedreno_draw.h"
36 #include "freedreno_context.h"
37 #include "freedreno_state.h"
38 #include "freedreno_resource.h"
39 #include "freedreno_query_acc.h"
40 #include "freedreno_query_hw.h"
41 #include "freedreno_util.h"
42
43 static void
44 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
45 {
46 if (!prsc)
47 return;
48 fd_batch_resource_used(batch, fd_resource(prsc), false);
49 }
50
51 static void
52 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
53 {
54 if (!prsc)
55 return;
56 fd_batch_resource_used(batch, fd_resource(prsc), true);
57 }
58
59 static void
60 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
61 {
62 struct fd_context *ctx = fd_context(pctx);
63 struct fd_batch *batch = fd_context_batch(ctx);
64 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
65 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
66 unsigned i, prims, buffers = 0, restore_buffers = 0;
67
68 /* for debugging problems with indirect draw, it is convenient
69 * to be able to emulate it, to determine if game is feeding us
70 * bogus data:
71 */
72 if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) {
73 util_draw_indirect(pctx, info);
74 return;
75 }
76
77 if (!info->count_from_stream_output && !info->indirect &&
78 !info->primitive_restart &&
79 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
80 return;
81
82 /* if we supported transform feedback, we'd have to disable this: */
83 if (((scissor->maxx - scissor->minx) *
84 (scissor->maxy - scissor->miny)) == 0) {
85 return;
86 }
87
88 /* TODO: push down the region versions into the tiles */
89 if (!fd_render_condition_check(pctx))
90 return;
91
92 /* emulate unsupported primitives: */
93 if (!fd_supported_prim(ctx, info->mode)) {
94 if (ctx->streamout.num_targets > 0)
95 debug_error("stream-out with emulated prims");
96 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
97 util_primconvert_draw_vbo(ctx->primconvert, info);
98 return;
99 }
100
101 /* Upload a user index buffer. */
102 struct pipe_resource *indexbuf = NULL;
103 unsigned index_offset = 0;
104 struct pipe_draw_info new_info;
105 if (info->index_size) {
106 if (info->has_user_indices) {
107 if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset))
108 return;
109 new_info = *info;
110 new_info.index.resource = indexbuf;
111 new_info.has_user_indices = false;
112 info = &new_info;
113 } else {
114 indexbuf = info->index.resource;
115 }
116 }
117
118 if (ctx->in_blit) {
119 fd_batch_reset(batch);
120 fd_context_all_dirty(ctx);
121 }
122
123 batch->blit = ctx->in_blit;
124 batch->back_blit = ctx->in_shadow;
125
126 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
127 * query_buf may not be created yet.
128 */
129 fd_batch_set_stage(batch, FD_STAGE_DRAW);
130
131 /*
132 * Figure out the buffers/features we need:
133 */
134
135 mtx_lock(&ctx->screen->lock);
136
137 if (fd_depth_enabled(ctx)) {
138 if (fd_resource(pfb->zsbuf->texture)->valid) {
139 restore_buffers |= FD_BUFFER_DEPTH;
140 } else {
141 batch->invalidated |= FD_BUFFER_DEPTH;
142 }
143 buffers |= FD_BUFFER_DEPTH;
144 resource_written(batch, pfb->zsbuf->texture);
145 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
146 }
147
148 if (fd_stencil_enabled(ctx)) {
149 if (fd_resource(pfb->zsbuf->texture)->valid) {
150 restore_buffers |= FD_BUFFER_STENCIL;
151 } else {
152 batch->invalidated |= FD_BUFFER_STENCIL;
153 }
154 buffers |= FD_BUFFER_STENCIL;
155 resource_written(batch, pfb->zsbuf->texture);
156 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
157 }
158
159 if (fd_logicop_enabled(ctx))
160 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
161
162 for (i = 0; i < pfb->nr_cbufs; i++) {
163 struct pipe_resource *surf;
164
165 if (!pfb->cbufs[i])
166 continue;
167
168 surf = pfb->cbufs[i]->texture;
169
170 if (fd_resource(surf)->valid) {
171 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
172 } else {
173 batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
174 }
175
176 resource_written(batch, surf);
177
178 buffers |= PIPE_CLEAR_COLOR0 << i;
179
180 if (fd_blend_enabled(ctx, i))
181 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
182 }
183
184 /* Mark SSBOs as being written.. we don't actually know which ones are
185 * read vs written, so just assume the worst
186 */
187 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
188 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
189
190 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) {
191 struct pipe_image_view *img =
192 &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i];
193 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
194 resource_written(batch, img->resource);
195 else
196 resource_read(batch, img->resource);
197 }
198
199 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
200 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
201 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
202 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
203
204 /* Mark VBOs as being read */
205 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
206 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
207 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
208 }
209
210 /* Mark index buffer as being read */
211 resource_read(batch, indexbuf);
212
213 /* Mark indirect draw buffer as being read */
214 if (info->indirect)
215 resource_read(batch, info->indirect->buffer);
216
217 /* Mark textures as being read */
218 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
219 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
220 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
221 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
222
223 /* Mark streamout buffers as being written.. */
224 for (i = 0; i < ctx->streamout.num_targets; i++)
225 if (ctx->streamout.targets[i])
226 resource_written(batch, ctx->streamout.targets[i]->buffer);
227
228 resource_written(batch, batch->query_buf);
229
230 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
231 resource_written(batch, aq->prsc);
232
233 mtx_unlock(&ctx->screen->lock);
234
235 batch->num_draws++;
236
237 prims = u_reduced_prims_for_vertices(info->mode, info->count);
238
239 ctx->stats.draw_calls++;
240
241 /* TODO prims_emitted should be clipped when the stream-out buffer is
242 * not large enough. See max_tf_vtx().. probably need to move that
243 * into common code. Although a bit more annoying since a2xx doesn't
244 * use ir3 so no common way to get at the pipe_stream_output_info
245 * which is needed for this calculation.
246 */
247 if (ctx->streamout.num_targets > 0)
248 ctx->stats.prims_emitted += prims;
249 ctx->stats.prims_generated += prims;
250
251 /* any buffers that haven't been cleared yet, we need to restore: */
252 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
253 /* and any buffers used, need to be resolved: */
254 batch->resolve |= buffers;
255
256 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
257 pfb->width, pfb->height, batch->num_draws,
258 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
259 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
260
261 if (ctx->draw_vbo(ctx, info, index_offset))
262 batch->needs_flush = true;
263
264 for (i = 0; i < ctx->streamout.num_targets; i++)
265 ctx->streamout.offsets[i] += info->count;
266
267 if (fd_mesa_debug & FD_DBG_DDRAW)
268 fd_context_all_dirty(ctx);
269
270 fd_batch_check_size(batch);
271
272 if (info == &new_info)
273 pipe_resource_reference(&indexbuf, NULL);
274 }
275
276 /* Generic clear implementation (partially) using u_blitter: */
277 static void
278 fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
279 const union pipe_color_union *color, double depth, unsigned stencil)
280 {
281 struct fd_context *ctx = fd_context(pctx);
282 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
283 struct blitter_context *blitter = ctx->blitter;
284
285 fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR);
286
287 util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
288 buffers, NULL, NULL);
289
290 struct pipe_stencil_ref sr = {
291 .ref_value = { stencil & 0xff }
292 };
293 pctx->set_stencil_ref(pctx, &sr);
294
295 struct pipe_constant_buffer cb = {
296 .buffer_size = 16,
297 .user_buffer = &color->ui,
298 };
299 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
300
301 if (!ctx->clear_rs_state) {
302 const struct pipe_rasterizer_state tmpl = {
303 .cull_face = PIPE_FACE_NONE,
304 .half_pixel_center = 1,
305 .bottom_edge_rule = 1,
306 .flatshade = 1,
307 .depth_clip_near = 1,
308 .depth_clip_far = 1,
309 };
310 ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl);
311 }
312 pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state);
313
314 struct pipe_viewport_state vp = {
315 .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth },
316 .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f },
317 };
318 pctx->set_viewport_states(pctx, 0, 1, &vp);
319
320 pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
321 pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
322 &ctx->solid_vbuf_state.vertexbuf.vb[0]);
323 pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
324 pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
325 pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
326
327 struct pipe_draw_info info = {
328 .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
329 .count = 2,
330 .max_index = 1,
331 .instance_count = 1,
332 };
333 ctx->draw_vbo(ctx, &info, 0);
334
335 util_blitter_restore_constant_buffer_state(blitter);
336 util_blitter_restore_vertex_states(blitter);
337 util_blitter_restore_fragment_states(blitter);
338 util_blitter_restore_textures(blitter);
339 util_blitter_restore_fb_state(blitter);
340 util_blitter_restore_render_cond(blitter);
341 util_blitter_unset_running_flag(blitter);
342
343 fd_blitter_pipe_end(ctx);
344 }
345
346 static void
347 fd_clear(struct pipe_context *pctx, unsigned buffers,
348 const union pipe_color_union *color, double depth, unsigned stencil)
349 {
350 struct fd_context *ctx = fd_context(pctx);
351 struct fd_batch *batch = fd_context_batch(ctx);
352 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
353 unsigned cleared_buffers;
354 int i;
355
356 /* TODO: push down the region versions into the tiles */
357 if (!fd_render_condition_check(pctx))
358 return;
359
360 if (ctx->in_blit) {
361 fd_batch_reset(batch);
362 fd_context_all_dirty(ctx);
363 }
364
365 /* pctx->clear() is only for full-surface clears, so scissor is
366 * equivalent to having GL_SCISSOR_TEST disabled:
367 */
368 batch->max_scissor.minx = 0;
369 batch->max_scissor.miny = 0;
370 batch->max_scissor.maxx = pfb->width;
371 batch->max_scissor.maxy = pfb->height;
372
373 /* for bookkeeping about which buffers have been cleared (and thus
374 * can fully or partially skip mem2gmem) we need to ignore buffers
375 * that have already had a draw, in case apps do silly things like
376 * clear after draw (ie. if you only clear the color buffer, but
377 * something like alpha-test causes side effects from the draw in
378 * the depth buffer, etc)
379 */
380 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
381 batch->cleared |= cleared_buffers;
382 batch->invalidated |= cleared_buffers;
383
384 batch->resolve |= buffers;
385 batch->needs_flush = true;
386
387 mtx_lock(&ctx->screen->lock);
388
389 if (buffers & PIPE_CLEAR_COLOR)
390 for (i = 0; i < pfb->nr_cbufs; i++)
391 if (buffers & (PIPE_CLEAR_COLOR0 << i))
392 resource_written(batch, pfb->cbufs[i]->texture);
393
394 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
395 resource_written(batch, pfb->zsbuf->texture);
396 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
397 }
398
399 resource_written(batch, batch->query_buf);
400
401 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
402 resource_written(batch, aq->prsc);
403
404 mtx_unlock(&ctx->screen->lock);
405
406 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
407 pfb->width, pfb->height, depth, stencil,
408 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
409 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
410
411 /* if per-gen backend doesn't implement ctx->clear() generic
412 * blitter clear:
413 */
414 bool fallback = true;
415
416 if (ctx->clear) {
417 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
418
419 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
420 if (fd_mesa_debug & FD_DBG_DCLEAR)
421 fd_context_all_dirty(ctx);
422
423 fallback = false;
424 }
425 }
426
427 if (fallback) {
428 fd_blitter_clear(pctx, buffers, color, depth, stencil);
429 }
430
431 fd_batch_check_size(batch);
432 }
433
434 static void
435 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
436 const union pipe_color_union *color,
437 unsigned x, unsigned y, unsigned w, unsigned h,
438 bool render_condition_enabled)
439 {
440 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
441 }
442
443 static void
444 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
445 unsigned buffers, double depth, unsigned stencil,
446 unsigned x, unsigned y, unsigned w, unsigned h,
447 bool render_condition_enabled)
448 {
449 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
450 buffers, depth, stencil, x, y, w, h);
451 }
452
453 static void
454 fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
455 {
456 struct fd_context *ctx = fd_context(pctx);
457 struct fd_batch *batch, *save_batch = NULL;
458 unsigned i;
459
460 batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
461 fd_batch_reference(&save_batch, ctx->batch);
462 fd_batch_reference(&ctx->batch, batch);
463
464 mtx_lock(&ctx->screen->lock);
465
466 /* Mark SSBOs as being written.. we don't actually know which ones are
467 * read vs written, so just assume the worst
468 */
469 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
470 resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
471
472 foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
473 struct pipe_image_view *img =
474 &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
475 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
476 resource_written(batch, img->resource);
477 else
478 resource_read(batch, img->resource);
479 }
480
481 /* UBO's are read */
482 foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
483 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
484
485 /* Mark textures as being read */
486 foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
487 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
488
489 /* For global buffers, we don't really know if read or written, so assume
490 * the worst:
491 */
492 foreach_bit(i, ctx->global_bindings.enabled_mask)
493 resource_written(batch, ctx->global_bindings.buf[i]);
494
495 if (info->indirect)
496 resource_read(batch, info->indirect);
497
498 mtx_unlock(&ctx->screen->lock);
499
500 batch->needs_flush = true;
501 ctx->launch_grid(ctx, info);
502
503 fd_batch_flush(batch, false, false);
504
505 fd_batch_reference(&ctx->batch, save_batch);
506 fd_batch_reference(&save_batch, NULL);
507 fd_batch_reference(&batch, NULL);
508 }
509
510 void
511 fd_draw_init(struct pipe_context *pctx)
512 {
513 pctx->draw_vbo = fd_draw_vbo;
514 pctx->clear = fd_clear;
515 pctx->clear_render_target = fd_clear_render_target;
516 pctx->clear_depth_stencil = fd_clear_depth_stencil;
517
518 if (has_compute(fd_screen(pctx->screen))) {
519 pctx->launch_grid = fd_launch_grid;
520 }
521 }