1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #include "pipe/p_state.h"
30 #include "util/u_draw.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_prim.h"
34 #include "util/u_format.h"
35 #include "util/u_helpers.h"
37 #include "freedreno_draw.h"
38 #include "freedreno_context.h"
39 #include "freedreno_state.h"
40 #include "freedreno_resource.h"
41 #include "freedreno_query_acc.h"
42 #include "freedreno_query_hw.h"
43 #include "freedreno_util.h"
46 resource_read(struct fd_batch
*batch
, struct pipe_resource
*prsc
)
50 fd_batch_resource_used(batch
, fd_resource(prsc
), false);
54 resource_written(struct fd_batch
*batch
, struct pipe_resource
*prsc
)
58 fd_batch_resource_used(batch
, fd_resource(prsc
), true);
62 fd_draw_vbo(struct pipe_context
*pctx
, const struct pipe_draw_info
*info
)
64 struct fd_context
*ctx
= fd_context(pctx
);
65 struct fd_batch
*batch
= fd_context_batch(ctx
);
66 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
67 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
68 unsigned i
, prims
, buffers
= 0, restore_buffers
= 0;
70 /* for debugging problems with indirect draw, it is convenient
71 * to be able to emulate it, to determine if game is feeding us
74 if (info
->indirect
&& (fd_mesa_debug
& FD_DBG_NOINDR
)) {
75 util_draw_indirect(pctx
, info
);
79 if (!info
->count_from_stream_output
&& !info
->indirect
&&
80 !info
->primitive_restart
&&
81 !u_trim_pipe_prim(info
->mode
, (unsigned*)&info
->count
))
84 /* if we supported transform feedback, we'd have to disable this: */
85 if (((scissor
->maxx
- scissor
->minx
) *
86 (scissor
->maxy
- scissor
->miny
)) == 0) {
90 /* TODO: push down the region versions into the tiles */
91 if (!fd_render_condition_check(pctx
))
94 /* emulate unsupported primitives: */
95 if (!fd_supported_prim(ctx
, info
->mode
)) {
96 if (ctx
->streamout
.num_targets
> 0)
97 debug_error("stream-out with emulated prims");
98 util_primconvert_save_rasterizer_state(ctx
->primconvert
, ctx
->rasterizer
);
99 util_primconvert_draw_vbo(ctx
->primconvert
, info
);
103 /* Upload a user index buffer. */
104 struct pipe_resource
*indexbuf
= NULL
;
105 unsigned index_offset
= 0;
106 struct pipe_draw_info new_info
;
107 if (info
->index_size
) {
108 if (info
->has_user_indices
) {
109 if (!util_upload_index_buffer(pctx
, info
, &indexbuf
, &index_offset
))
112 new_info
.index
.resource
= indexbuf
;
113 new_info
.has_user_indices
= false;
116 indexbuf
= info
->index
.resource
;
121 fd_batch_reset(batch
);
122 fd_context_all_dirty(ctx
);
125 batch
->blit
= ctx
->in_blit
;
126 batch
->back_blit
= ctx
->in_shadow
;
128 /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
129 * query_buf may not be created yet.
131 fd_batch_set_stage(batch
, FD_STAGE_DRAW
);
134 * Figure out the buffers/features we need:
137 mtx_lock(&ctx
->screen
->lock
);
139 if (fd_depth_enabled(ctx
)) {
140 if (fd_resource(pfb
->zsbuf
->texture
)->valid
)
141 restore_buffers
|= FD_BUFFER_DEPTH
;
142 buffers
|= FD_BUFFER_DEPTH
;
143 resource_written(batch
, pfb
->zsbuf
->texture
);
144 batch
->gmem_reason
|= FD_GMEM_DEPTH_ENABLED
;
147 if (fd_stencil_enabled(ctx
)) {
148 if (fd_resource(pfb
->zsbuf
->texture
)->valid
)
149 restore_buffers
|= FD_BUFFER_STENCIL
;
150 buffers
|= FD_BUFFER_STENCIL
;
151 resource_written(batch
, pfb
->zsbuf
->texture
);
152 batch
->gmem_reason
|= FD_GMEM_STENCIL_ENABLED
;
155 if (fd_logicop_enabled(ctx
))
156 batch
->gmem_reason
|= FD_GMEM_LOGICOP_ENABLED
;
158 for (i
= 0; i
< pfb
->nr_cbufs
; i
++) {
159 struct pipe_resource
*surf
;
164 surf
= pfb
->cbufs
[i
]->texture
;
166 resource_written(batch
, surf
);
168 if (fd_resource(surf
)->valid
)
169 restore_buffers
|= PIPE_CLEAR_COLOR0
<< i
;
171 buffers
|= PIPE_CLEAR_COLOR0
<< i
;
173 if (fd_blend_enabled(ctx
, i
))
174 batch
->gmem_reason
|= FD_GMEM_BLEND_ENABLED
;
177 /* Mark SSBOs as being written.. we don't actually know which ones are
178 * read vs written, so just assume the worst
180 foreach_bit(i
, ctx
->shaderbuf
[PIPE_SHADER_FRAGMENT
].enabled_mask
)
181 resource_written(batch
, ctx
->shaderbuf
[PIPE_SHADER_FRAGMENT
].sb
[i
].buffer
);
183 foreach_bit(i
, ctx
->shaderimg
[PIPE_SHADER_FRAGMENT
].enabled_mask
) {
184 struct pipe_image_view
*img
=
185 &ctx
->shaderimg
[PIPE_SHADER_FRAGMENT
].si
[i
];
186 if (img
->access
& PIPE_IMAGE_ACCESS_WRITE
)
187 resource_written(batch
, img
->resource
);
189 resource_read(batch
, img
->resource
);
192 foreach_bit(i
, ctx
->constbuf
[PIPE_SHADER_VERTEX
].enabled_mask
)
193 resource_read(batch
, ctx
->constbuf
[PIPE_SHADER_VERTEX
].cb
[i
].buffer
);
194 foreach_bit(i
, ctx
->constbuf
[PIPE_SHADER_FRAGMENT
].enabled_mask
)
195 resource_read(batch
, ctx
->constbuf
[PIPE_SHADER_FRAGMENT
].cb
[i
].buffer
);
197 /* Mark VBOs as being read */
198 foreach_bit(i
, ctx
->vtx
.vertexbuf
.enabled_mask
) {
199 assert(!ctx
->vtx
.vertexbuf
.vb
[i
].is_user_buffer
);
200 resource_read(batch
, ctx
->vtx
.vertexbuf
.vb
[i
].buffer
.resource
);
203 /* Mark index buffer as being read */
204 resource_read(batch
, indexbuf
);
206 /* Mark indirect draw buffer as being read */
208 resource_read(batch
, info
->indirect
->buffer
);
210 /* Mark textures as being read */
211 foreach_bit(i
, ctx
->tex
[PIPE_SHADER_VERTEX
].valid_textures
)
212 resource_read(batch
, ctx
->tex
[PIPE_SHADER_VERTEX
].textures
[i
]->texture
);
213 foreach_bit(i
, ctx
->tex
[PIPE_SHADER_FRAGMENT
].valid_textures
)
214 resource_read(batch
, ctx
->tex
[PIPE_SHADER_FRAGMENT
].textures
[i
]->texture
);
216 /* Mark streamout buffers as being written.. */
217 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++)
218 if (ctx
->streamout
.targets
[i
])
219 resource_written(batch
, ctx
->streamout
.targets
[i
]->buffer
);
221 resource_written(batch
, batch
->query_buf
);
223 list_for_each_entry(struct fd_acc_query
, aq
, &ctx
->acc_active_queries
, node
)
224 resource_written(batch
, aq
->prsc
);
226 mtx_unlock(&ctx
->screen
->lock
);
230 prims
= u_reduced_prims_for_vertices(info
->mode
, info
->count
);
232 ctx
->stats
.draw_calls
++;
234 /* TODO prims_emitted should be clipped when the stream-out buffer is
235 * not large enough. See max_tf_vtx().. probably need to move that
236 * into common code. Although a bit more annoying since a2xx doesn't
237 * use ir3 so no common way to get at the pipe_stream_output_info
238 * which is needed for this calculation.
240 if (ctx
->streamout
.num_targets
> 0)
241 ctx
->stats
.prims_emitted
+= prims
;
242 ctx
->stats
.prims_generated
+= prims
;
244 /* any buffers that haven't been cleared yet, we need to restore: */
245 batch
->restore
|= restore_buffers
& (FD_BUFFER_ALL
& ~batch
->cleared
);
246 /* and any buffers used, need to be resolved: */
247 batch
->resolve
|= buffers
;
249 DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch
, buffers
,
250 pfb
->width
, pfb
->height
, batch
->num_draws
,
251 util_format_short_name(pipe_surface_format(pfb
->cbufs
[0])),
252 util_format_short_name(pipe_surface_format(pfb
->zsbuf
)));
254 if (ctx
->draw_vbo(ctx
, info
, index_offset
))
255 batch
->needs_flush
= true;
257 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++)
258 ctx
->streamout
.offsets
[i
] += info
->count
;
260 if (fd_mesa_debug
& FD_DBG_DDRAW
)
261 fd_context_all_dirty(ctx
);
263 fd_batch_check_size(batch
);
265 if (info
== &new_info
)
266 pipe_resource_reference(&indexbuf
, NULL
);
269 /* Generic clear implementation (partially) using u_blitter: */
271 fd_blitter_clear(struct pipe_context
*pctx
, unsigned buffers
,
272 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
274 struct fd_context
*ctx
= fd_context(pctx
);
275 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
276 struct blitter_context
*blitter
= ctx
->blitter
;
278 fd_blitter_pipe_begin(ctx
, false, true, FD_STAGE_CLEAR
);
280 util_blitter_common_clear_setup(blitter
, pfb
->width
, pfb
->height
,
281 buffers
, NULL
, NULL
);
283 struct pipe_stencil_ref sr
= {
284 .ref_value
= { stencil
& 0xff }
286 pctx
->set_stencil_ref(pctx
, &sr
);
288 struct pipe_constant_buffer cb
= {
290 .user_buffer
= &color
->ui
,
292 pctx
->set_constant_buffer(pctx
, PIPE_SHADER_FRAGMENT
, 0, &cb
);
294 if (!ctx
->clear_rs_state
) {
295 const struct pipe_rasterizer_state tmpl
= {
296 .cull_face
= PIPE_FACE_NONE
,
297 .half_pixel_center
= 1,
298 .bottom_edge_rule
= 1,
300 .depth_clip_near
= 1,
303 ctx
->clear_rs_state
= pctx
->create_rasterizer_state(pctx
, &tmpl
);
305 pctx
->bind_rasterizer_state(pctx
, ctx
->clear_rs_state
);
307 struct pipe_viewport_state vp
= {
308 .scale
= { 0.5f
* pfb
->width
, -0.5f
* pfb
->height
, depth
},
309 .translate
= { 0.5f
* pfb
->width
, 0.5f
* pfb
->height
, 0.0f
},
311 pctx
->set_viewport_states(pctx
, 0, 1, &vp
);
313 pctx
->bind_vertex_elements_state(pctx
, ctx
->solid_vbuf_state
.vtx
);
314 pctx
->set_vertex_buffers(pctx
, blitter
->vb_slot
, 1,
315 &ctx
->solid_vbuf_state
.vertexbuf
.vb
[0]);
316 pctx
->set_stream_output_targets(pctx
, 0, NULL
, NULL
);
317 pctx
->bind_vs_state(pctx
, ctx
->solid_prog
.vp
);
318 pctx
->bind_fs_state(pctx
, ctx
->solid_prog
.fp
);
320 struct pipe_draw_info info
= {
321 .mode
= PIPE_PRIM_MAX
, /* maps to DI_PT_RECTLIST */
326 ctx
->draw_vbo(ctx
, &info
, 0);
328 util_blitter_restore_constant_buffer_state(blitter
);
329 util_blitter_restore_vertex_states(blitter
);
330 util_blitter_restore_fragment_states(blitter
);
331 util_blitter_restore_textures(blitter
);
332 util_blitter_restore_fb_state(blitter
);
333 util_blitter_restore_render_cond(blitter
);
334 util_blitter_unset_running_flag(blitter
);
336 fd_blitter_pipe_end(ctx
);
339 /* TODO figure out how to make better use of existing state mechanism
340 * for clear (and possibly gmem->mem / mem->gmem) so we can (a) keep
341 * track of what state really actually changes, and (b) reduce the code
342 * in the a2xx/a3xx parts.
346 fd_clear(struct pipe_context
*pctx
, unsigned buffers
,
347 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
349 struct fd_context
*ctx
= fd_context(pctx
);
350 struct fd_batch
*batch
= fd_context_batch(ctx
);
351 struct pipe_framebuffer_state
*pfb
= &batch
->framebuffer
;
352 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
353 unsigned cleared_buffers
;
356 /* TODO: push down the region versions into the tiles */
357 if (!fd_render_condition_check(pctx
))
361 fd_batch_reset(batch
);
362 fd_context_all_dirty(ctx
);
365 /* for bookkeeping about which buffers have been cleared (and thus
366 * can fully or partially skip mem2gmem) we need to ignore buffers
367 * that have already had a draw, in case apps do silly things like
368 * clear after draw (ie. if you only clear the color buffer, but
369 * something like alpha-test causes side effects from the draw in
370 * the depth buffer, etc)
372 cleared_buffers
= buffers
& (FD_BUFFER_ALL
& ~batch
->restore
);
374 /* do we have full-screen scissor? */
375 if (!memcmp(scissor
, &ctx
->disabled_scissor
, sizeof(*scissor
))) {
376 batch
->cleared
|= cleared_buffers
;
378 batch
->partial_cleared
|= cleared_buffers
;
379 if (cleared_buffers
& PIPE_CLEAR_COLOR
)
380 batch
->cleared_scissor
.color
= *scissor
;
381 if (cleared_buffers
& PIPE_CLEAR_DEPTH
)
382 batch
->cleared_scissor
.depth
= *scissor
;
383 if (cleared_buffers
& PIPE_CLEAR_STENCIL
)
384 batch
->cleared_scissor
.stencil
= *scissor
;
386 batch
->resolve
|= buffers
;
387 batch
->needs_flush
= true;
389 mtx_lock(&ctx
->screen
->lock
);
391 if (buffers
& PIPE_CLEAR_COLOR
)
392 for (i
= 0; i
< pfb
->nr_cbufs
; i
++)
393 if (buffers
& (PIPE_CLEAR_COLOR0
<< i
))
394 resource_written(batch
, pfb
->cbufs
[i
]->texture
);
396 if (buffers
& (PIPE_CLEAR_DEPTH
| PIPE_CLEAR_STENCIL
)) {
397 resource_written(batch
, pfb
->zsbuf
->texture
);
398 batch
->gmem_reason
|= FD_GMEM_CLEARS_DEPTH_STENCIL
;
401 resource_written(batch
, batch
->query_buf
);
403 list_for_each_entry(struct fd_acc_query
, aq
, &ctx
->acc_active_queries
, node
)
404 resource_written(batch
, aq
->prsc
);
406 mtx_unlock(&ctx
->screen
->lock
);
408 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch
, buffers
,
409 pfb
->width
, pfb
->height
, depth
, stencil
,
410 util_format_short_name(pipe_surface_format(pfb
->cbufs
[0])),
411 util_format_short_name(pipe_surface_format(pfb
->zsbuf
)));
413 /* if per-gen backend doesn't implement ctx->clear() generic
416 bool fallback
= true;
419 fd_batch_set_stage(batch
, FD_STAGE_CLEAR
);
421 if (ctx
->clear(ctx
, buffers
, color
, depth
, stencil
)) {
422 if (fd_mesa_debug
& FD_DBG_DCLEAR
)
423 fd_context_all_dirty(ctx
);
430 fd_blitter_clear(pctx
, buffers
, color
, depth
, stencil
);
435 fd_clear_render_target(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
436 const union pipe_color_union
*color
,
437 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
438 bool render_condition_enabled
)
440 DBG("TODO: x=%u, y=%u, w=%u, h=%u", x
, y
, w
, h
);
444 fd_clear_depth_stencil(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
445 unsigned buffers
, double depth
, unsigned stencil
,
446 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
447 bool render_condition_enabled
)
449 DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u",
450 buffers
, depth
, stencil
, x
, y
, w
, h
);
454 fd_launch_grid(struct pipe_context
*pctx
, const struct pipe_grid_info
*info
)
456 struct fd_context
*ctx
= fd_context(pctx
);
457 struct fd_batch
*batch
, *save_batch
= NULL
;
460 batch
= fd_batch_create(ctx
, true);
461 fd_batch_reference(&save_batch
, ctx
->batch
);
462 fd_batch_reference(&ctx
->batch
, batch
);
464 mtx_lock(&ctx
->screen
->lock
);
466 /* Mark SSBOs as being written.. we don't actually know which ones are
467 * read vs written, so just assume the worst
469 foreach_bit(i
, ctx
->shaderbuf
[PIPE_SHADER_COMPUTE
].enabled_mask
)
470 resource_read(batch
, ctx
->shaderbuf
[PIPE_SHADER_COMPUTE
].sb
[i
].buffer
);
472 foreach_bit(i
, ctx
->shaderimg
[PIPE_SHADER_COMPUTE
].enabled_mask
) {
473 struct pipe_image_view
*img
=
474 &ctx
->shaderimg
[PIPE_SHADER_COMPUTE
].si
[i
];
475 if (img
->access
& PIPE_IMAGE_ACCESS_WRITE
)
476 resource_written(batch
, img
->resource
);
478 resource_read(batch
, img
->resource
);
482 foreach_bit(i
, ctx
->constbuf
[PIPE_SHADER_COMPUTE
].enabled_mask
)
483 resource_read(batch
, ctx
->constbuf
[PIPE_SHADER_COMPUTE
].cb
[i
].buffer
);
485 /* Mark textures as being read */
486 foreach_bit(i
, ctx
->tex
[PIPE_SHADER_COMPUTE
].valid_textures
)
487 resource_read(batch
, ctx
->tex
[PIPE_SHADER_COMPUTE
].textures
[i
]->texture
);
489 /* For global buffers, we don't really know if read or written, so assume
492 foreach_bit(i
, ctx
->global_bindings
.enabled_mask
)
493 resource_written(batch
, ctx
->global_bindings
.buf
[i
]);
496 resource_read(batch
, info
->indirect
);
498 mtx_unlock(&ctx
->screen
->lock
);
500 batch
->needs_flush
= true;
501 ctx
->launch_grid(ctx
, info
);
503 fd_batch_flush(batch
, false, false);
505 fd_batch_reference(&ctx
->batch
, save_batch
);
506 fd_batch_reference(&save_batch
, NULL
);
510 fd_draw_init(struct pipe_context
*pctx
)
512 pctx
->draw_vbo
= fd_draw_vbo
;
513 pctx
->clear
= fd_clear
;
514 pctx
->clear_render_target
= fd_clear_render_target
;
515 pctx
->clear_depth_stencil
= fd_clear_depth_stencil
;
517 if (has_compute(fd_screen(pctx
->screen
))) {
518 pctx
->launch_grid
= fd_launch_grid
;