2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/u_blitter.h"
25 #include "util/u_prim.h"
26 #include "util/u_format.h"
27 #include "util/u_pack_color.h"
28 #include "util/u_prim_restart.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
32 #include "v3d_context.h"
33 #include "v3d_resource.h"
35 #include "broadcom/compiler/v3d_compiler.h"
36 #include "broadcom/common/v3d_macros.h"
37 #include "broadcom/cle/v3dx_pack.h"
40 * Does the initial bining command list setup for drawing to a given FBO.
43 v3d_start_draw(struct v3d_context
*v3d
)
45 struct v3d_job
*job
= v3d
->job
;
50 /* Get space to emit our BCL state, using a branch to jump to a new BO
53 v3d_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
55 job
->submit
.bcl_start
= job
->bcl
.bo
->offset
;
56 v3d_job_add_bo(job
, job
->bcl
.bo
);
58 /* The PTB will request the tile alloc initial size per tile at start
61 uint32_t tile_alloc_size
= (job
->draw_tiles_x
*
62 job
->draw_tiles_y
) * 64;
63 /* The PTB allocates in aligned 4k chunks after the initial setup. */
64 tile_alloc_size
= align(tile_alloc_size
, 4096);
66 /* Include the first two chunk allocations that the PTB does so that
67 * we definitely clear the OOM condition before triggering one (the HW
68 * won't trigger OOM during the first allocations).
70 tile_alloc_size
+= 8192;
72 /* For performance, allocate some extra initial memory after the PTB's
73 * minimal allocations, so that we hopefully don't have to block the
74 * GPU on the kernel handling an OOM signal.
76 tile_alloc_size
+= 512 * 1024;
78 job
->tile_alloc
= v3d_bo_alloc(v3d
->screen
, tile_alloc_size
,
80 uint32_t tsda_per_tile_size
= v3d
->screen
->devinfo
.ver
>= 40 ? 256 : 64;
81 job
->tile_state
= v3d_bo_alloc(v3d
->screen
,
88 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CFG
, config
) {
89 config
.width_in_pixels
= v3d
->framebuffer
.width
;
90 config
.height_in_pixels
= v3d
->framebuffer
.height
;
91 config
.number_of_render_targets
=
92 MAX2(v3d
->framebuffer
.nr_cbufs
, 1);
94 config
.multisample_mode_4x
= job
->msaa
;
96 config
.maximum_bpp_of_all_render_targets
= job
->internal_bpp
;
98 #else /* V3D_VERSION < 40 */
99 /* "Binning mode lists start with a Tile Binning Mode Configuration
102 * Part1 signals the end of binning config setup.
104 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CFG_PART2
, config
) {
105 config
.tile_allocation_memory_address
=
106 cl_address(job
->tile_alloc
, 0);
107 config
.tile_allocation_memory_size
= job
->tile_alloc
->size
;
110 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CFG_PART1
, config
) {
111 config
.tile_state_data_array_base_address
=
112 cl_address(job
->tile_state
, 0);
114 config
.width_in_tiles
= job
->draw_tiles_x
;
115 config
.height_in_tiles
= job
->draw_tiles_y
;
117 config
.number_of_render_targets
=
118 MAX2(v3d
->framebuffer
.nr_cbufs
, 1);
120 config
.multisample_mode_4x
= job
->msaa
;
122 config
.maximum_bpp_of_all_render_targets
= job
->internal_bpp
;
124 #endif /* V3D_VERSION < 40 */
126 /* There's definitely nothing in the VCD cache we want. */
127 cl_emit(&job
->bcl
, FLUSH_VCD_CACHE
, bin
);
129 /* Disable any leftover OQ state from another job. */
130 cl_emit(&job
->bcl
, OCCLUSION_QUERY_COUNTER
, counter
);
132 /* "Binning mode lists must have a Start Tile Binning item (6) after
133 * any prefix state data before the binning list proper starts."
135 cl_emit(&job
->bcl
, START_TILE_BINNING
, bin
);
137 job
->needs_flush
= true;
138 job
->draw_width
= v3d
->framebuffer
.width
;
139 job
->draw_height
= v3d
->framebuffer
.height
;
143 v3d_predraw_check_stage_inputs(struct pipe_context
*pctx
,
144 enum pipe_shader_type s
)
146 struct v3d_context
*v3d
= v3d_context(pctx
);
148 /* XXX perf: If we're reading from the output of TF in this job, we
149 * should instead be using the wait for transform feedback
153 /* Flush writes to textures we're sampling. */
154 for (int i
= 0; i
< v3d
->tex
[s
].num_textures
; i
++) {
155 struct pipe_sampler_view
*pview
= v3d
->tex
[s
].textures
[i
];
158 struct v3d_sampler_view
*view
= v3d_sampler_view(pview
);
160 if (view
->texture
!= view
->base
.texture
&&
161 view
->base
.format
!= PIPE_FORMAT_X32_S8X24_UINT
)
162 v3d_update_shadow_texture(pctx
, &view
->base
);
164 v3d_flush_jobs_writing_resource(v3d
, view
->texture
);
167 /* Flush writes to UBOs. */
168 foreach_bit(i
, v3d
->constbuf
[s
].enabled_mask
) {
169 struct pipe_constant_buffer
*cb
= &v3d
->constbuf
[s
].cb
[i
];
171 v3d_flush_jobs_writing_resource(v3d
, cb
->buffer
);
174 /* Flush writes to our image views */
175 foreach_bit(i
, v3d
->shaderimg
[s
].enabled_mask
) {
176 struct v3d_image_view
*view
= &v3d
->shaderimg
[s
].si
[i
];
178 v3d_flush_jobs_writing_resource(v3d
, view
->base
.resource
);
181 /* Flush writes to our vertex buffers (i.e. from transform feedback) */
182 if (s
== PIPE_SHADER_VERTEX
) {
183 foreach_bit(i
, v3d
->vertexbuf
.enabled_mask
) {
184 struct pipe_vertex_buffer
*vb
= &v3d
->vertexbuf
.vb
[i
];
186 v3d_flush_jobs_writing_resource(v3d
, vb
->buffer
.resource
);
192 v3d_predraw_check_outputs(struct pipe_context
*pctx
)
194 struct v3d_context
*v3d
= v3d_context(pctx
);
196 /* Flush jobs reading from TF buffers that we are about to write. */
197 if (v3d_transform_feedback_enabled(v3d
)) {
198 struct v3d_streamout_stateobj
*so
= &v3d
->streamout
;
200 for (int i
= 0; i
< so
->num_targets
; i
++) {
204 const struct pipe_stream_output_target
*target
=
206 v3d_flush_jobs_reading_resource(v3d
, target
->buffer
);
212 * Checks if the state for the current draw reads a particular resource in
213 * in the given shader stage.
216 v3d_state_reads_resource(struct v3d_context
*v3d
,
217 struct pipe_resource
*prsc
,
218 enum pipe_shader_type s
)
220 struct v3d_resource
*rsc
= v3d_resource(prsc
);
223 if (s
== PIPE_SHADER_VERTEX
) {
224 foreach_bit(i
, v3d
->vertexbuf
.enabled_mask
) {
225 struct pipe_vertex_buffer
*vb
= &v3d
->vertexbuf
.vb
[i
];
226 if (!vb
->buffer
.resource
)
229 struct v3d_resource
*vb_rsc
=
230 v3d_resource(vb
->buffer
.resource
);
231 if (rsc
->bo
== vb_rsc
->bo
)
236 /* Constant buffers */
237 foreach_bit(i
, v3d
->constbuf
[s
].enabled_mask
) {
238 struct pipe_constant_buffer
*cb
= &v3d
->constbuf
[s
].cb
[i
];
242 struct v3d_resource
*cb_rsc
= v3d_resource(cb
->buffer
);
243 if (rsc
->bo
== cb_rsc
->bo
)
247 /* Shader storage buffers */
248 foreach_bit(i
, v3d
->ssbo
[s
].enabled_mask
) {
249 struct pipe_shader_buffer
*sb
= &v3d
->ssbo
[s
].sb
[i
];
253 struct v3d_resource
*sb_rsc
= v3d_resource(sb
->buffer
);
254 if (rsc
->bo
== sb_rsc
->bo
)
259 for (int i
= 0; i
< v3d
->tex
[s
].num_textures
; i
++) {
260 struct pipe_sampler_view
*pview
= v3d
->tex
[s
].textures
[i
];
264 struct v3d_sampler_view
*view
= v3d_sampler_view(pview
);
265 struct v3d_resource
*v_rsc
= v3d_resource(view
->texture
);
266 if (rsc
->bo
== v_rsc
->bo
)
274 v3d_emit_wait_for_tf(struct v3d_job
*job
)
276 /* XXX: we might be able to skip this in some cases, for now we
279 cl_emit(&job
->bcl
, FLUSH_TRANSFORM_FEEDBACK_DATA
, flush
);
281 cl_emit(&job
->bcl
, WAIT_FOR_TRANSFORM_FEEDBACK
, wait
) {
282 /* XXX: Wait for all outstanding writes... maybe we can do
283 * better in some cases.
285 wait
.block_count
= 255;
288 /* We have just flushed all our outstanding TF work in this job so make
289 * sure we don't emit TF flushes again for any of it again.
291 _mesa_set_clear(job
->tf_write_prscs
, NULL
);
295 v3d_emit_wait_for_tf_if_needed(struct v3d_context
*v3d
, struct v3d_job
*job
)
297 if (!job
->tf_enabled
)
300 set_foreach(job
->tf_write_prscs
, entry
) {
301 struct pipe_resource
*prsc
= (struct pipe_resource
*)entry
->key
;
302 for (int s
= 0; s
< PIPE_SHADER_COMPUTE
; s
++) {
303 /* Fragment shaders can only start executing after all
304 * binning (and thus TF) is complete.
306 * XXX: For VS/GS/TES, if the binning shader does not
307 * read the resource then we could also avoid emitting
310 if (s
== PIPE_SHADER_FRAGMENT
)
313 if (v3d_state_reads_resource(v3d
, prsc
, s
)) {
314 v3d_emit_wait_for_tf(job
);
322 v3d_emit_gl_shader_state(struct v3d_context
*v3d
,
323 const struct pipe_draw_info
*info
)
325 struct v3d_job
*job
= v3d
->job
;
326 /* VC5_DIRTY_VTXSTATE */
327 struct v3d_vertex_stateobj
*vtx
= v3d
->vtx
;
328 /* VC5_DIRTY_VTXBUF */
329 struct v3d_vertexbuf_stateobj
*vertexbuf
= &v3d
->vertexbuf
;
331 /* Upload the uniforms to the indirect CL first */
332 struct v3d_cl_reloc fs_uniforms
=
333 v3d_write_uniforms(v3d
, v3d
->prog
.fs
,
334 PIPE_SHADER_FRAGMENT
);
335 struct v3d_cl_reloc vs_uniforms
=
336 v3d_write_uniforms(v3d
, v3d
->prog
.vs
,
338 struct v3d_cl_reloc cs_uniforms
=
339 v3d_write_uniforms(v3d
, v3d
->prog
.cs
,
342 /* See GFXH-930 workaround below */
343 uint32_t num_elements_to_emit
= MAX2(vtx
->num_elements
, 1);
344 uint32_t shader_rec_offset
=
345 v3d_cl_ensure_space(&job
->indirect
,
346 cl_packet_length(GL_SHADER_STATE_RECORD
) +
347 num_elements_to_emit
*
348 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
),
351 /* XXX perf: We should move most of the SHADER_STATE_RECORD setup to
352 * compile time, so that we mostly just have to OR the VS and FS
353 * records together at draw time.
355 cl_emit(&job
->indirect
, GL_SHADER_STATE_RECORD
, shader
) {
356 shader
.enable_clipping
= true;
357 /* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
358 shader
.point_size_in_shaded_vertex_data
=
359 (info
->mode
== PIPE_PRIM_POINTS
&&
360 v3d
->rasterizer
->base
.point_size_per_vertex
);
362 /* Must be set if the shader modifies Z, discards, or modifies
363 * the sample mask. For any of these cases, the fragment
364 * shader needs to write the Z value (even just discards).
366 shader
.fragment_shader_does_z_writes
=
367 v3d
->prog
.fs
->prog_data
.fs
->writes_z
;
368 /* Set if the EZ test must be disabled (due to shader side
369 * effects and the early_z flag not being present in the
372 shader
.turn_off_early_z_test
=
373 v3d
->prog
.fs
->prog_data
.fs
->disable_ez
;
375 shader
.fragment_shader_uses_real_pixel_centre_w_in_addition_to_centroid_w2
=
376 v3d
->prog
.fs
->prog_data
.fs
->uses_center_w
;
378 #if V3D_VERSION >= 40
379 shader
.disable_implicit_point_line_varyings
=
380 !v3d
->prog
.fs
->prog_data
.fs
->uses_implicit_point_line_varyings
;
383 shader
.number_of_varyings_in_fragment_shader
=
384 v3d
->prog
.fs
->prog_data
.fs
->num_inputs
;
386 shader
.coordinate_shader_propagate_nans
= true;
387 shader
.vertex_shader_propagate_nans
= true;
388 shader
.fragment_shader_propagate_nans
= true;
390 shader
.coordinate_shader_code_address
=
391 cl_address(v3d_resource(v3d
->prog
.cs
->resource
)->bo
,
392 v3d
->prog
.cs
->offset
);
393 shader
.vertex_shader_code_address
=
394 cl_address(v3d_resource(v3d
->prog
.vs
->resource
)->bo
,
395 v3d
->prog
.vs
->offset
);
396 shader
.fragment_shader_code_address
=
397 cl_address(v3d_resource(v3d
->prog
.fs
->resource
)->bo
,
398 v3d
->prog
.fs
->offset
);
400 /* XXX: Use combined input/output size flag in the common
403 shader
.coordinate_shader_has_separate_input_and_output_vpm_blocks
=
404 v3d
->prog
.cs
->prog_data
.vs
->separate_segments
;
405 shader
.vertex_shader_has_separate_input_and_output_vpm_blocks
=
406 v3d
->prog
.vs
->prog_data
.vs
->separate_segments
;
408 shader
.coordinate_shader_input_vpm_segment_size
=
409 v3d
->prog
.cs
->prog_data
.vs
->separate_segments
?
410 v3d
->prog
.cs
->prog_data
.vs
->vpm_input_size
: 1;
411 shader
.vertex_shader_input_vpm_segment_size
=
412 v3d
->prog
.vs
->prog_data
.vs
->separate_segments
?
413 v3d
->prog
.vs
->prog_data
.vs
->vpm_input_size
: 1;
415 shader
.coordinate_shader_output_vpm_segment_size
=
416 v3d
->prog
.cs
->prog_data
.vs
->vpm_output_size
;
417 shader
.vertex_shader_output_vpm_segment_size
=
418 v3d
->prog
.vs
->prog_data
.vs
->vpm_output_size
;
420 shader
.coordinate_shader_uniforms_address
= cs_uniforms
;
421 shader
.vertex_shader_uniforms_address
= vs_uniforms
;
422 shader
.fragment_shader_uniforms_address
= fs_uniforms
;
424 #if V3D_VERSION >= 41
425 shader
.min_coord_shader_input_segments_required_in_play
= 1;
426 shader
.min_vertex_shader_input_segments_required_in_play
= 1;
428 shader
.coordinate_shader_4_way_threadable
=
429 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 4;
430 shader
.vertex_shader_4_way_threadable
=
431 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 4;
432 shader
.fragment_shader_4_way_threadable
=
433 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 4;
435 shader
.coordinate_shader_start_in_final_thread_section
=
436 v3d
->prog
.cs
->prog_data
.vs
->base
.single_seg
;
437 shader
.vertex_shader_start_in_final_thread_section
=
438 v3d
->prog
.vs
->prog_data
.vs
->base
.single_seg
;
439 shader
.fragment_shader_start_in_final_thread_section
=
440 v3d
->prog
.fs
->prog_data
.fs
->base
.single_seg
;
442 shader
.coordinate_shader_4_way_threadable
=
443 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 4;
444 shader
.coordinate_shader_2_way_threadable
=
445 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 2;
446 shader
.vertex_shader_4_way_threadable
=
447 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 4;
448 shader
.vertex_shader_2_way_threadable
=
449 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 2;
450 shader
.fragment_shader_4_way_threadable
=
451 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 4;
452 shader
.fragment_shader_2_way_threadable
=
453 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 2;
456 shader
.vertex_id_read_by_coordinate_shader
=
457 v3d
->prog
.cs
->prog_data
.vs
->uses_vid
;
458 shader
.instance_id_read_by_coordinate_shader
=
459 v3d
->prog
.cs
->prog_data
.vs
->uses_iid
;
460 shader
.vertex_id_read_by_vertex_shader
=
461 v3d
->prog
.vs
->prog_data
.vs
->uses_vid
;
462 shader
.instance_id_read_by_vertex_shader
=
463 v3d
->prog
.vs
->prog_data
.vs
->uses_iid
;
465 shader
.address_of_default_attribute_values
=
466 cl_address(v3d_resource(vtx
->defaults
)->bo
,
467 vtx
->defaults_offset
);
470 bool cs_loaded_any
= false;
471 for (int i
= 0; i
< vtx
->num_elements
; i
++) {
472 struct pipe_vertex_element
*elem
= &vtx
->pipe
[i
];
473 struct pipe_vertex_buffer
*vb
=
474 &vertexbuf
->vb
[elem
->vertex_buffer_index
];
475 struct v3d_resource
*rsc
= v3d_resource(vb
->buffer
.resource
);
477 const uint32_t size
=
478 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
);
479 cl_emit_with_prepacked(&job
->indirect
,
480 GL_SHADER_STATE_ATTRIBUTE_RECORD
,
481 &vtx
->attrs
[i
* size
], attr
) {
482 attr
.stride
= vb
->stride
;
483 attr
.address
= cl_address(rsc
->bo
,
486 attr
.number_of_values_read_by_coordinate_shader
=
487 v3d
->prog
.cs
->prog_data
.vs
->vattr_sizes
[i
];
488 attr
.number_of_values_read_by_vertex_shader
=
489 v3d
->prog
.vs
->prog_data
.vs
->vattr_sizes
[i
];
491 /* GFXH-930: At least one attribute must be enabled
492 * and read by CS and VS. If we have attributes being
493 * consumed by the VS but not the CS, then set up a
494 * dummy load of the last attribute into the CS's VPM
495 * inputs. (Since CS is just dead-code-elimination
496 * compared to VS, we can't have CS loading but not
499 if (v3d
->prog
.cs
->prog_data
.vs
->vattr_sizes
[i
])
500 cs_loaded_any
= true;
501 if (i
== vtx
->num_elements
- 1 && !cs_loaded_any
) {
502 attr
.number_of_values_read_by_coordinate_shader
= 1;
504 #if V3D_VERSION >= 41
505 attr
.maximum_index
= 0xffffff;
508 STATIC_ASSERT(sizeof(vtx
->attrs
) >= V3D_MAX_VS_INPUTS
/ 4 * size
);
511 if (vtx
->num_elements
== 0) {
512 /* GFXH-930: At least one attribute must be enabled and read
513 * by CS and VS. If we have no attributes being consumed by
514 * the shader, set up a dummy to be loaded into the VPM.
516 cl_emit(&job
->indirect
, GL_SHADER_STATE_ATTRIBUTE_RECORD
, attr
) {
517 /* Valid address of data whose value will be unused. */
518 attr
.address
= cl_address(job
->indirect
.bo
, 0);
520 attr
.type
= ATTRIBUTE_FLOAT
;
524 attr
.number_of_values_read_by_coordinate_shader
= 1;
525 attr
.number_of_values_read_by_vertex_shader
= 1;
529 cl_emit(&job
->bcl
, VCM_CACHE_SIZE
, vcm
) {
530 vcm
.number_of_16_vertex_batches_for_binning
=
531 v3d
->prog
.cs
->prog_data
.vs
->vcm_cache_size
;
532 vcm
.number_of_16_vertex_batches_for_rendering
=
533 v3d
->prog
.vs
->prog_data
.vs
->vcm_cache_size
;
536 cl_emit(&job
->bcl
, GL_SHADER_STATE
, state
) {
537 state
.address
= cl_address(job
->indirect
.bo
, shader_rec_offset
);
538 state
.number_of_attribute_arrays
= num_elements_to_emit
;
541 v3d_bo_unreference(&cs_uniforms
.bo
);
542 v3d_bo_unreference(&vs_uniforms
.bo
);
543 v3d_bo_unreference(&fs_uniforms
.bo
);
545 job
->shader_rec_count
++;
549 * Computes the various transform feedback statistics, since they can't be
550 * recorded by CL packets.
553 v3d_tf_statistics_record(struct v3d_context
*v3d
,
554 const struct pipe_draw_info
*info
,
557 if (!v3d
->active_queries
)
560 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
561 v3d
->prims_generated
+= prims
;
564 /* XXX: Only count if we didn't overflow. */
565 v3d
->tf_prims_generated
+= prims
;
570 v3d_update_job_ez(struct v3d_context
*v3d
, struct v3d_job
*job
)
572 switch (v3d
->zsa
->ez_state
) {
573 case VC5_EZ_UNDECIDED
:
574 /* If the Z/S state didn't pick a direction but didn't
575 * disable, then go along with the current EZ state. This
576 * allows EZ optimization for Z func == EQUAL or NEVER.
582 /* If the Z/S state picked a direction, then it needs to match
583 * the current direction if we've decided on one.
585 if (job
->ez_state
== VC5_EZ_UNDECIDED
)
586 job
->ez_state
= v3d
->zsa
->ez_state
;
587 else if (job
->ez_state
!= v3d
->zsa
->ez_state
)
588 job
->ez_state
= VC5_EZ_DISABLED
;
591 case VC5_EZ_DISABLED
:
592 /* If the current Z/S state disables EZ because of a bad Z
593 * func or stencil operation, then we can't do any more EZ in
596 job
->ez_state
= VC5_EZ_DISABLED
;
600 /* If the FS affects the Z of the pixels, then it may update against
601 * the chosen EZ direction (though we could use
602 * ARB_conservative_depth's hints to avoid this)
604 if (v3d
->prog
.fs
->prog_data
.fs
->writes_z
) {
605 job
->ez_state
= VC5_EZ_DISABLED
;
608 if (job
->first_ez_state
== VC5_EZ_UNDECIDED
&&
609 (job
->ez_state
!= VC5_EZ_DISABLED
|| job
->draw_calls_queued
== 0))
610 job
->first_ez_state
= job
->ez_state
;
614 v3d_draw_vbo(struct pipe_context
*pctx
, const struct pipe_draw_info
*info
)
616 struct v3d_context
*v3d
= v3d_context(pctx
);
618 if (!info
->count_from_stream_output
&& !info
->indirect
&&
619 !info
->primitive_restart
&&
620 !u_trim_pipe_prim(info
->mode
, (unsigned*)&info
->count
))
623 /* Fall back for weird desktop GL primitive restart values. */
624 if (info
->primitive_restart
&&
628 switch (info
->index_size
) {
637 if (info
->restart_index
!= mask
) {
638 util_draw_vbo_without_prim_restart(pctx
, info
);
643 if (info
->mode
>= PIPE_PRIM_QUADS
) {
644 util_primconvert_save_rasterizer_state(v3d
->primconvert
, &v3d
->rasterizer
->base
);
645 util_primconvert_draw_vbo(v3d
->primconvert
, info
);
646 perf_debug("Fallback conversion for %d %s vertices\n",
647 info
->count
, u_prim_name(info
->mode
));
651 /* Before setting up the draw, flush anything writing to the resources
652 * that we read from or reading from resources we write to.
654 for (int s
= 0; s
< PIPE_SHADER_COMPUTE
; s
++)
655 v3d_predraw_check_stage_inputs(pctx
, s
);
658 v3d_flush_jobs_writing_resource(v3d
, info
->indirect
->buffer
);
660 v3d_predraw_check_outputs(pctx
);
662 struct v3d_job
*job
= v3d_get_job_for_fbo(v3d
);
664 /* If vertex texturing depends on the output of rendering, we need to
665 * ensure that that rendering is complete before we run a coordinate
666 * shader that depends on it.
668 * Given that doing that is unusual, for now we just block the binner
669 * on the last submitted render, rather than tracking the last
670 * rendering to each texture's BO.
672 if (v3d
->tex
[PIPE_SHADER_VERTEX
].num_textures
|| info
->indirect
) {
673 perf_debug("Blocking binner on last render "
674 "due to vertex texturing or indirect drawing.\n");
675 job
->submit
.in_sync_bcl
= v3d
->out_sync
;
678 /* Mark SSBOs as being written. We don't actually know which ones are
679 * read vs written, so just assume the worst
681 for (int s
= 0; s
< PIPE_SHADER_COMPUTE
; s
++) {
682 foreach_bit(i
, v3d
->ssbo
[s
].enabled_mask
) {
683 v3d_job_add_write_resource(job
,
684 v3d
->ssbo
[s
].sb
[i
].buffer
);
685 job
->tmu_dirty_rcl
= true;
688 foreach_bit(i
, v3d
->shaderimg
[s
].enabled_mask
) {
689 v3d_job_add_write_resource(job
,
690 v3d
->shaderimg
[s
].si
[i
].base
.resource
);
691 job
->tmu_dirty_rcl
= true;
695 /* Get space to emit our draw call into the BCL, using a branch to
696 * jump to a new BO if necessary.
698 v3d_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
700 if (v3d
->prim_mode
!= info
->mode
) {
701 v3d
->prim_mode
= info
->mode
;
702 v3d
->dirty
|= VC5_DIRTY_PRIM_MODE
;
706 v3d_update_compiled_shaders(v3d
, info
->mode
);
707 v3d_update_job_ez(v3d
, job
);
709 /* If this job was writing to transform feedback buffers before this
710 * draw and we are reading from them here, then we need to wait for TF
711 * to complete before we emit this draw.
713 * Notice this check needs to happen before we emit state for the
714 * current draw call, where we update job->tf_enabled, so we can ensure
715 * that we only check TF writes for prior draws.
717 v3d_emit_wait_for_tf_if_needed(v3d
, job
);
719 #if V3D_VERSION >= 41
720 v3d41_emit_state(pctx
);
722 v3d33_emit_state(pctx
);
725 if (v3d
->dirty
& (VC5_DIRTY_VTXBUF
|
727 VC5_DIRTY_PRIM_MODE
|
728 VC5_DIRTY_RASTERIZER
|
729 VC5_DIRTY_COMPILED_CS
|
730 VC5_DIRTY_COMPILED_VS
|
731 VC5_DIRTY_COMPILED_FS
|
732 v3d
->prog
.cs
->uniform_dirty_bits
|
733 v3d
->prog
.vs
->uniform_dirty_bits
|
734 v3d
->prog
.fs
->uniform_dirty_bits
)) {
735 v3d_emit_gl_shader_state(v3d
, info
);
740 /* The Base Vertex/Base Instance packet sets those values to nonzero
741 * for the next draw call only.
743 if (info
->index_bias
|| info
->start_instance
) {
744 cl_emit(&job
->bcl
, BASE_VERTEX_BASE_INSTANCE
, base
) {
745 base
.base_instance
= info
->start_instance
;
746 base
.base_vertex
= info
->index_bias
;
750 uint32_t prim_tf_enable
= 0;
752 /* V3D 3.x: The HW only processes transform feedback on primitives
755 if (v3d
->streamout
.num_targets
)
756 prim_tf_enable
= (V3D_PRIM_POINTS_TF
- V3D_PRIM_POINTS
);
759 v3d_tf_statistics_record(v3d
, info
, v3d
->streamout
.num_targets
);
761 /* Note that the primitive type fields match with OpenGL/gallium
762 * definitions, up to but not including QUADS.
764 if (info
->index_size
) {
765 uint32_t index_size
= info
->index_size
;
766 uint32_t offset
= info
->start
* index_size
;
767 struct pipe_resource
*prsc
;
768 if (info
->has_user_indices
) {
770 u_upload_data(v3d
->uploader
, 0,
771 info
->count
* info
->index_size
, 4,
775 prsc
= info
->index
.resource
;
777 struct v3d_resource
*rsc
= v3d_resource(prsc
);
779 #if V3D_VERSION >= 40
780 cl_emit(&job
->bcl
, INDEX_BUFFER_SETUP
, ib
) {
781 ib
.address
= cl_address(rsc
->bo
, 0);
782 ib
.size
= rsc
->bo
->size
;
786 if (info
->indirect
) {
787 cl_emit(&job
->bcl
, INDIRECT_INDEXED_INSTANCED_PRIM_LIST
, prim
) {
788 prim
.index_type
= ffs(info
->index_size
) - 1;
790 prim
.address_of_indices_list
=
791 cl_address(rsc
->bo
, offset
);
792 #endif /* V3D_VERSION < 40 */
793 prim
.mode
= info
->mode
| prim_tf_enable
;
794 prim
.enable_primitive_restarts
= info
->primitive_restart
;
796 prim
.number_of_draw_indirect_indexed_records
= info
->indirect
->draw_count
;
798 prim
.stride_in_multiples_of_4_bytes
= info
->indirect
->stride
>> 2;
799 prim
.address
= cl_address(v3d_resource(info
->indirect
->buffer
)->bo
,
800 info
->indirect
->offset
);
802 } else if (info
->instance_count
> 1) {
803 cl_emit(&job
->bcl
, INDEXED_INSTANCED_PRIM_LIST
, prim
) {
804 prim
.index_type
= ffs(info
->index_size
) - 1;
805 #if V3D_VERSION >= 40
806 prim
.index_offset
= offset
;
807 #else /* V3D_VERSION < 40 */
808 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
809 prim
.address_of_indices_list
=
810 cl_address(rsc
->bo
, offset
);
811 #endif /* V3D_VERSION < 40 */
812 prim
.mode
= info
->mode
| prim_tf_enable
;
813 prim
.enable_primitive_restarts
= info
->primitive_restart
;
815 prim
.number_of_instances
= info
->instance_count
;
816 prim
.instance_length
= info
->count
;
819 cl_emit(&job
->bcl
, INDEXED_PRIM_LIST
, prim
) {
820 prim
.index_type
= ffs(info
->index_size
) - 1;
821 prim
.length
= info
->count
;
822 #if V3D_VERSION >= 40
823 prim
.index_offset
= offset
;
824 #else /* V3D_VERSION < 40 */
825 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
826 prim
.address_of_indices_list
=
827 cl_address(rsc
->bo
, offset
);
828 #endif /* V3D_VERSION < 40 */
829 prim
.mode
= info
->mode
| prim_tf_enable
;
830 prim
.enable_primitive_restarts
= info
->primitive_restart
;
834 job
->draw_calls_queued
++;
836 if (info
->has_user_indices
)
837 pipe_resource_reference(&prsc
, NULL
);
839 if (info
->indirect
) {
840 cl_emit(&job
->bcl
, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS
, prim
) {
841 prim
.mode
= info
->mode
| prim_tf_enable
;
842 prim
.number_of_draw_indirect_array_records
= info
->indirect
->draw_count
;
844 prim
.stride_in_multiples_of_4_bytes
= info
->indirect
->stride
>> 2;
845 prim
.address
= cl_address(v3d_resource(info
->indirect
->buffer
)->bo
,
846 info
->indirect
->offset
);
848 } else if (info
->instance_count
> 1) {
849 cl_emit(&job
->bcl
, VERTEX_ARRAY_INSTANCED_PRIMS
, prim
) {
850 prim
.mode
= info
->mode
| prim_tf_enable
;
851 prim
.index_of_first_vertex
= info
->start
;
852 prim
.number_of_instances
= info
->instance_count
;
853 prim
.instance_length
= info
->count
;
856 cl_emit(&job
->bcl
, VERTEX_ARRAY_PRIMS
, prim
) {
857 prim
.mode
= info
->mode
| prim_tf_enable
;
858 prim
.length
= info
->count
;
859 prim
.index_of_first_vertex
= info
->start
;
864 /* A flush is required in between a TF draw and any following TF specs
865 * packet, or the GPU may hang. Just flush each time for now.
867 if (v3d
->streamout
.num_targets
)
868 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_FLUSH_AND_COUNT
, flush
);
870 job
->draw_calls_queued
++;
872 /* Increment the TF offsets by how many verts we wrote. XXX: This
873 * needs some clamping to the buffer size.
875 for (int i
= 0; i
< v3d
->streamout
.num_targets
; i
++)
876 v3d
->streamout
.offsets
[i
] += info
->count
;
878 if (v3d
->zsa
&& job
->zsbuf
&& v3d
->zsa
->base
.depth
.enabled
) {
879 struct v3d_resource
*rsc
= v3d_resource(job
->zsbuf
->texture
);
880 v3d_job_add_bo(job
, rsc
->bo
);
882 job
->load
|= PIPE_CLEAR_DEPTH
& ~job
->clear
;
883 if (v3d
->zsa
->base
.depth
.writemask
)
884 job
->store
|= PIPE_CLEAR_DEPTH
;
885 rsc
->initialized_buffers
= PIPE_CLEAR_DEPTH
;
888 if (v3d
->zsa
&& job
->zsbuf
&& v3d
->zsa
->base
.stencil
[0].enabled
) {
889 struct v3d_resource
*rsc
= v3d_resource(job
->zsbuf
->texture
);
890 if (rsc
->separate_stencil
)
891 rsc
= rsc
->separate_stencil
;
893 v3d_job_add_bo(job
, rsc
->bo
);
895 job
->load
|= PIPE_CLEAR_STENCIL
& ~job
->clear
;
896 if (v3d
->zsa
->base
.stencil
[0].writemask
||
897 v3d
->zsa
->base
.stencil
[1].writemask
) {
898 job
->store
|= PIPE_CLEAR_STENCIL
;
900 rsc
->initialized_buffers
|= PIPE_CLEAR_STENCIL
;
903 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
904 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
905 int blend_rt
= v3d
->blend
->base
.independent_blend_enable
? i
: 0;
907 if (job
->store
& bit
|| !job
->cbufs
[i
])
909 struct v3d_resource
*rsc
= v3d_resource(job
->cbufs
[i
]->texture
);
911 job
->load
|= bit
& ~job
->clear
;
912 if (v3d
->blend
->base
.rt
[blend_rt
].colormask
)
914 v3d_job_add_bo(job
, rsc
->bo
);
917 if (job
->referenced_size
> 768 * 1024 * 1024) {
918 perf_debug("Flushing job with %dkb to try to free up memory\n",
919 job
->referenced_size
/ 1024);
923 if (V3D_DEBUG
& V3D_DEBUG_ALWAYS_FLUSH
)
928 * Implements gallium's clear() hook (glClear()) by drawing a pair of triangles.
931 v3d_draw_clear(struct v3d_context
*v3d
,
933 const union pipe_color_union
*color
,
934 double depth
, unsigned stencil
)
936 static const union pipe_color_union dummy_color
= {};
938 /* The blitter util dereferences the color regardless, even though the
939 * gallium clear API may not pass one in when only Z/S are cleared.
942 color
= &dummy_color
;
944 v3d_blitter_save(v3d
);
945 util_blitter_clear(v3d
->blitter
,
946 v3d
->framebuffer
.width
,
947 v3d
->framebuffer
.height
,
948 util_framebuffer_get_num_layers(&v3d
->framebuffer
),
949 buffers
, color
, depth
, stencil
);
953 * Attempts to perform the GL clear by using the TLB's fast clear at the start
957 v3d_tlb_clear(struct v3d_job
*job
, unsigned buffers
,
958 const union pipe_color_union
*color
,
959 double depth
, unsigned stencil
)
961 struct v3d_context
*v3d
= job
->v3d
;
963 if (job
->draw_calls_queued
) {
964 /* If anything in the CL has drawn using the buffer, then the
965 * TLB clear we're trying to add now would happen before that
968 buffers
&= ~(job
->load
| job
->store
);
971 /* GFXH-1461: If we were to emit a load of just depth or just stencil,
972 * then the clear for the other may get lost. We need to decide now
973 * if it would be possible to need to emit a load of just one after
974 * we've set up our TLB clears.
976 if (buffers
& PIPE_CLEAR_DEPTHSTENCIL
&&
977 (buffers
& PIPE_CLEAR_DEPTHSTENCIL
) != PIPE_CLEAR_DEPTHSTENCIL
&&
979 util_format_is_depth_and_stencil(job
->zsbuf
->texture
->format
)) {
980 buffers
&= ~PIPE_CLEAR_DEPTHSTENCIL
;
983 for (int i
= 0; i
< V3D_MAX_DRAW_BUFFERS
; i
++) {
984 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
985 if (!(buffers
& bit
))
988 struct pipe_surface
*psurf
= v3d
->framebuffer
.cbufs
[i
];
989 struct v3d_surface
*surf
= v3d_surface(psurf
);
990 struct v3d_resource
*rsc
= v3d_resource(psurf
->texture
);
993 uint32_t internal_size
= 4 << surf
->internal_bpp
;
995 static union pipe_color_union swapped_color
;
996 if (v3d
->swap_color_rb
& (1 << i
)) {
997 swapped_color
.f
[0] = color
->f
[2];
998 swapped_color
.f
[1] = color
->f
[1];
999 swapped_color
.f
[2] = color
->f
[0];
1000 swapped_color
.f
[3] = color
->f
[3];
1001 color
= &swapped_color
;
1004 switch (surf
->internal_type
) {
1005 case V3D_INTERNAL_TYPE_8
:
1006 util_pack_color(color
->f
, PIPE_FORMAT_R8G8B8A8_UNORM
,
1008 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
1010 case V3D_INTERNAL_TYPE_8I
:
1011 case V3D_INTERNAL_TYPE_8UI
:
1012 job
->clear_color
[i
][0] = ((color
->ui
[0] & 0xff) |
1013 (color
->ui
[1] & 0xff) << 8 |
1014 (color
->ui
[2] & 0xff) << 16 |
1015 (color
->ui
[3] & 0xff) << 24);
1017 case V3D_INTERNAL_TYPE_16F
:
1018 util_pack_color(color
->f
, PIPE_FORMAT_R16G16B16A16_FLOAT
,
1020 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
1022 case V3D_INTERNAL_TYPE_16I
:
1023 case V3D_INTERNAL_TYPE_16UI
:
1024 job
->clear_color
[i
][0] = ((color
->ui
[0] & 0xffff) |
1025 color
->ui
[1] << 16);
1026 job
->clear_color
[i
][1] = ((color
->ui
[2] & 0xffff) |
1027 color
->ui
[3] << 16);
1029 case V3D_INTERNAL_TYPE_32F
:
1030 case V3D_INTERNAL_TYPE_32I
:
1031 case V3D_INTERNAL_TYPE_32UI
:
1032 memcpy(job
->clear_color
[i
], color
->ui
, internal_size
);
1036 rsc
->initialized_buffers
|= bit
;
1039 unsigned zsclear
= buffers
& PIPE_CLEAR_DEPTHSTENCIL
;
1041 struct v3d_resource
*rsc
=
1042 v3d_resource(v3d
->framebuffer
.zsbuf
->texture
);
1044 if (zsclear
& PIPE_CLEAR_DEPTH
)
1045 job
->clear_z
= depth
;
1046 if (zsclear
& PIPE_CLEAR_STENCIL
)
1047 job
->clear_s
= stencil
;
1049 rsc
->initialized_buffers
|= zsclear
;
1052 job
->draw_min_x
= 0;
1053 job
->draw_min_y
= 0;
1054 job
->draw_max_x
= v3d
->framebuffer
.width
;
1055 job
->draw_max_y
= v3d
->framebuffer
.height
;
1056 job
->clear
|= buffers
;
1057 job
->store
|= buffers
;
1059 v3d_start_draw(v3d
);
1065 v3d_clear(struct pipe_context
*pctx
, unsigned buffers
,
1066 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
1068 struct v3d_context
*v3d
= v3d_context(pctx
);
1069 struct v3d_job
*job
= v3d_get_job_for_fbo(v3d
);
1071 buffers
&= ~v3d_tlb_clear(job
, buffers
, color
, depth
, stencil
);
1074 v3d_draw_clear(v3d
, buffers
, color
, depth
, stencil
);
1078 v3d_clear_render_target(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
1079 const union pipe_color_union
*color
,
1080 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
1081 bool render_condition_enabled
)
1083 fprintf(stderr
, "unimpl: clear RT\n");
1087 v3d_clear_depth_stencil(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
1088 unsigned buffers
, double depth
, unsigned stencil
,
1089 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
1090 bool render_condition_enabled
)
1092 fprintf(stderr
, "unimpl: clear DS\n");
1096 v3dX(draw_init
)(struct pipe_context
*pctx
)
1098 pctx
->draw_vbo
= v3d_draw_vbo
;
1099 pctx
->clear
= v3d_clear
;
1100 pctx
->clear_render_target
= v3d_clear_render_target
;
1101 pctx
->clear_depth_stencil
= v3d_clear_depth_stencil
;