freedreno: core SSBO support
[mesa.git] / src / gallium / drivers / freedreno / freedreno_draw.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "pipe/p_state.h"
30 #include "util/u_string.h"
31 #include "util/u_memory.h"
32 #include "util/u_prim.h"
33 #include "util/u_format.h"
34 #include "util/u_helpers.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_context.h"
38 #include "freedreno_state.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_query_acc.h"
41 #include "freedreno_query_hw.h"
42 #include "freedreno_util.h"
43
44 static void
45 resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
46 {
47 if (!prsc)
48 return;
49 fd_batch_resource_used(batch, fd_resource(prsc), false);
50 }
51
52 static void
53 resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
54 {
55 if (!prsc)
56 return;
57 fd_batch_resource_used(batch, fd_resource(prsc), true);
58 }
59
60 static void
61 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
62 {
63 struct fd_context *ctx = fd_context(pctx);
64 struct fd_batch *batch = ctx->batch;
65 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
66 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
67 unsigned i, prims, buffers = 0;
68
69 if (!info->count_from_stream_output && !info->indirect &&
70 !info->primitive_restart &&
71 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
72 return;
73
74 /* if we supported transform feedback, we'd have to disable this: */
75 if (((scissor->maxx - scissor->minx) *
76 (scissor->maxy - scissor->miny)) == 0) {
77 return;
78 }
79
80 /* TODO: push down the region versions into the tiles */
81 if (!fd_render_condition_check(pctx))
82 return;
83
84 /* emulate unsupported primitives: */
85 if (!fd_supported_prim(ctx, info->mode)) {
86 if (ctx->streamout.num_targets > 0)
87 debug_error("stream-out with emulated prims");
88 util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
89 util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
90 util_primconvert_draw_vbo(ctx->primconvert, info);
91 return;
92 }
93
94 /* Upload a user index buffer. */
95 struct pipe_index_buffer ibuffer_saved = {};
96 if (info->indexed && ctx->indexbuf.user_buffer &&
97 !util_save_and_upload_index_buffer(pctx, info, &ctx->indexbuf,
98 &ibuffer_saved)) {
99 return;
100 }
101
102 if (ctx->in_blit) {
103 fd_batch_reset(batch);
104 fd_context_all_dirty(ctx);
105 }
106
107 batch->blit = ctx->in_blit;
108 batch->back_blit = ctx->in_shadow;
109
110 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
111 * query_buf may not be created yet.
112 */
113 fd_batch_set_stage(batch, FD_STAGE_DRAW);
114
115 /*
116 * Figure out the buffers/features we need:
117 */
118
119 mtx_lock(&ctx->screen->lock);
120
121 if (fd_depth_enabled(ctx)) {
122 buffers |= FD_BUFFER_DEPTH;
123 resource_written(batch, pfb->zsbuf->texture);
124 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
125 }
126
127 if (fd_stencil_enabled(ctx)) {
128 buffers |= FD_BUFFER_STENCIL;
129 resource_written(batch, pfb->zsbuf->texture);
130 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
131 }
132
133 if (fd_logicop_enabled(ctx))
134 batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
135
136 for (i = 0; i < pfb->nr_cbufs; i++) {
137 struct pipe_resource *surf;
138
139 if (!pfb->cbufs[i])
140 continue;
141
142 surf = pfb->cbufs[i]->texture;
143
144 resource_written(batch, surf);
145 buffers |= PIPE_CLEAR_COLOR0 << i;
146
147 if (surf->nr_samples > 1)
148 batch->gmem_reason |= FD_GMEM_MSAA_ENABLED;
149
150 if (fd_blend_enabled(ctx, i))
151 batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
152 }
153
154 /* Mark SSBOs as being written.. we don't actually know which ones are
155 * read vs written, so just assume the worst
156 */
157 foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
158 resource_read(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer);
159
160 foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask)
161 resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
162 foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
163 resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
164
165 /* Mark VBOs as being read */
166 foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
167 assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
168 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer);
169 }
170
171 /* Mark index buffer as being read */
172 resource_read(batch, ctx->indexbuf.buffer);
173
174 /* Mark textures as being read */
175 foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
176 resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
177 foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
178 resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
179
180 /* Mark streamout buffers as being written.. */
181 for (i = 0; i < ctx->streamout.num_targets; i++)
182 if (ctx->streamout.targets[i])
183 resource_written(batch, ctx->streamout.targets[i]->buffer);
184
185 resource_written(batch, batch->query_buf);
186
187 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
188 resource_written(batch, aq->prsc);
189
190 mtx_unlock(&ctx->screen->lock);
191
192 batch->num_draws++;
193
194 prims = u_reduced_prims_for_vertices(info->mode, info->count);
195
196 ctx->stats.draw_calls++;
197
198 /* TODO prims_emitted should be clipped when the stream-out buffer is
199 * not large enough. See max_tf_vtx().. probably need to move that
200 * into common code. Although a bit more annoying since a2xx doesn't
201 * use ir3 so no common way to get at the pipe_stream_output_info
202 * which is needed for this calculation.
203 */
204 if (ctx->streamout.num_targets > 0)
205 ctx->stats.prims_emitted += prims;
206 ctx->stats.prims_generated += prims;
207
208 /* any buffers that haven't been cleared yet, we need to restore: */
209 batch->restore |= buffers & (FD_BUFFER_ALL & ~batch->cleared);
210 /* and any buffers used, need to be resolved: */
211 batch->resolve |= buffers;
212
213 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
214 pfb->width, pfb->height, batch->num_draws,
215 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
216 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
217
218 if (ctx->draw_vbo(ctx, info))
219 batch->needs_flush = true;
220
221 for (i = 0; i < ctx->streamout.num_targets; i++)
222 ctx->streamout.offsets[i] += info->count;
223
224 if (fd_mesa_debug & FD_DBG_DDRAW)
225 fd_context_all_dirty(ctx);
226
227 fd_batch_check_size(batch);
228
229 if (info->indexed && ibuffer_saved.user_buffer)
230 pctx->set_index_buffer(pctx, &ibuffer_saved);
231 }
232
233 /* Generic clear implementation (partially) using u_blitter: */
234 static void
235 fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
236 const union pipe_color_union *color, double depth, unsigned stencil)
237 {
238 struct fd_context *ctx = fd_context(pctx);
239 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
240 struct blitter_context *blitter = ctx->blitter;
241
242 fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR);
243
244 util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
245 buffers, NULL, NULL);
246
247 struct pipe_stencil_ref sr = {
248 .ref_value = { stencil & 0xff }
249 };
250 pctx->set_stencil_ref(pctx, &sr);
251
252 struct pipe_constant_buffer cb = {
253 .buffer_size = 16,
254 .user_buffer = &color->ui,
255 };
256 pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
257
258 if (!ctx->clear_rs_state) {
259 const struct pipe_rasterizer_state tmpl = {
260 .cull_face = PIPE_FACE_NONE,
261 .half_pixel_center = 1,
262 .bottom_edge_rule = 1,
263 .flatshade = 1,
264 .depth_clip = 1,
265 };
266 ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl);
267 }
268 pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state);
269
270 struct pipe_viewport_state vp = {
271 .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth },
272 .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f },
273 };
274 pctx->set_viewport_states(pctx, 0, 1, &vp);
275
276 pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
277 pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
278 &ctx->solid_vbuf_state.vertexbuf.vb[0]);
279 pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
280 pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
281 pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
282
283 struct pipe_draw_info info = {
284 .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
285 .count = 2,
286 .max_index = 1,
287 .instance_count = 1,
288 };
289 ctx->draw_vbo(ctx, &info);
290
291 util_blitter_restore_constant_buffer_state(blitter);
292 util_blitter_restore_vertex_states(blitter);
293 util_blitter_restore_fragment_states(blitter);
294 util_blitter_restore_textures(blitter);
295 util_blitter_restore_fb_state(blitter);
296 util_blitter_restore_render_cond(blitter);
297 util_blitter_unset_running_flag(blitter);
298
299 fd_blitter_pipe_end(ctx);
300 }
301
302 /* TODO figure out how to make better use of existing state mechanism
303 * for clear (and possibly gmem->mem / mem->gmem) so we can (a) keep
304 * track of what state really actually changes, and (b) reduce the code
305 * in the a2xx/a3xx parts.
306 */
307
308 static void
309 fd_clear(struct pipe_context *pctx, unsigned buffers,
310 const union pipe_color_union *color, double depth, unsigned stencil)
311 {
312 struct fd_context *ctx = fd_context(pctx);
313 struct fd_batch *batch = ctx->batch;
314 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
315 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
316 unsigned cleared_buffers;
317 int i;
318
319 /* TODO: push down the region versions into the tiles */
320 if (!fd_render_condition_check(pctx))
321 return;
322
323 if (ctx->in_blit) {
324 fd_batch_reset(batch);
325 fd_context_all_dirty(ctx);
326 }
327
328 /* for bookkeeping about which buffers have been cleared (and thus
329 * can fully or partially skip mem2gmem) we need to ignore buffers
330 * that have already had a draw, in case apps do silly things like
331 * clear after draw (ie. if you only clear the color buffer, but
332 * something like alpha-test causes side effects from the draw in
333 * the depth buffer, etc)
334 */
335 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
336
337 /* do we have full-screen scissor? */
338 if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) {
339 batch->cleared |= cleared_buffers;
340 } else {
341 batch->partial_cleared |= cleared_buffers;
342 if (cleared_buffers & PIPE_CLEAR_COLOR)
343 batch->cleared_scissor.color = *scissor;
344 if (cleared_buffers & PIPE_CLEAR_DEPTH)
345 batch->cleared_scissor.depth = *scissor;
346 if (cleared_buffers & PIPE_CLEAR_STENCIL)
347 batch->cleared_scissor.stencil = *scissor;
348 }
349 batch->resolve |= buffers;
350 batch->needs_flush = true;
351
352 mtx_lock(&ctx->screen->lock);
353
354 if (buffers & PIPE_CLEAR_COLOR)
355 for (i = 0; i < pfb->nr_cbufs; i++)
356 if (buffers & (PIPE_CLEAR_COLOR0 << i))
357 resource_written(batch, pfb->cbufs[i]->texture);
358
359 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
360 resource_written(batch, pfb->zsbuf->texture);
361 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
362 }
363
364 resource_written(batch, batch->query_buf);
365
366 list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node)
367 resource_written(batch, aq->prsc);
368
369 mtx_unlock(&ctx->screen->lock);
370
371 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers,
372 pfb->width, pfb->height, depth, stencil,
373 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
374 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
375
376 /* if per-gen backend doesn't implement ctx->clear() generic
377 * blitter clear:
378 */
379 if (!ctx->clear) {
380 fd_blitter_clear(pctx, buffers, color, depth, stencil);
381 return;
382 }
383
384 fd_batch_set_stage(batch, FD_STAGE_CLEAR);
385
386 ctx->clear(ctx, buffers, color, depth, stencil);
387
388 if (fd_mesa_debug & FD_DBG_DCLEAR)
389 fd_context_all_dirty(ctx);
390 }
391
392 static void
393 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
394 const union pipe_color_union *color,
395 unsigned x, unsigned y, unsigned w, unsigned h,
396 bool render_condition_enabled)
397 {
398 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h);
399 }
400
401 static void
402 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
403 unsigned buffers, double depth, unsigned stencil,
404 unsigned x, unsigned y, unsigned w, unsigned h,
405 bool render_condition_enabled)
406 {
407 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
408 buffers, depth, stencil, x, y, w, h);
409 }
410
411 void
412 fd_draw_init(struct pipe_context *pctx)
413 {
414 pctx->draw_vbo = fd_draw_vbo;
415 pctx->clear = fd_clear;
416 pctx->clear_render_target = fd_clear_render_target;
417 pctx->clear_depth_stencil = fd_clear_depth_stencil;
418 }