2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/u_blitter.h"
25 #include "util/u_prim.h"
26 #include "util/u_format.h"
27 #include "util/u_pack_color.h"
28 #include "util/u_prim_restart.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
32 #include "vc5_context.h"
33 #include "vc5_resource.h"
35 #include "broadcom/cle/v3d_packet_v33_pack.h"
36 #include "broadcom/compiler/v3d_compiler.h"
39 * Does the initial bining command list setup for drawing to a given FBO.
42 vc5_start_draw(struct vc5_context
*vc5
)
44 struct vc5_job
*job
= vc5
->job
;
49 /* Get space to emit our BCL state, using a branch to jump to a new BO
52 vc5_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
54 job
->submit
.bcl_start
= job
->bcl
.bo
->offset
;
55 vc5_job_add_bo(job
, job
->bcl
.bo
);
57 job
->tile_alloc
= vc5_bo_alloc(vc5
->screen
, 1024 * 1024, "tile alloc");
58 struct vc5_bo
*tsda
= vc5_bo_alloc(vc5
->screen
,
64 /* "Binning mode lists start with a Tile Binning Mode Configuration
67 * Part1 signals the end of binning config setup.
69 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CONFIGURATION_PART2
, config
) {
70 config
.tile_allocation_memory_address
=
71 cl_address(job
->tile_alloc
, 0);
72 config
.tile_allocation_memory_size
= job
->tile_alloc
->size
;
75 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CONFIGURATION_PART1
, config
) {
76 config
.tile_state_data_array_base_address
=
79 config
.width_in_tiles
= job
->draw_tiles_x
;
80 config
.height_in_tiles
= job
->draw_tiles_y
;
83 config
.number_of_render_targets
=
84 MAX2(vc5
->framebuffer
.nr_cbufs
, 1);
86 config
.multisample_mode_4x
= job
->msaa
;
88 config
.maximum_bpp_of_all_render_targets
= job
->internal_bpp
;
91 vc5_bo_unreference(&tsda
);
93 /* There's definitely nothing in the VCD cache we want. */
94 cl_emit(&job
->bcl
, FLUSH_VCD_CACHE
, bin
);
96 /* Disable any leftover OQ state from another job. */
97 cl_emit(&job
->bcl
, OCCLUSION_QUERY_COUNTER
, counter
);
99 /* "Binning mode lists must have a Start Tile Binning item (6) after
100 * any prefix state data before the binning list proper starts."
102 cl_emit(&job
->bcl
, START_TILE_BINNING
, bin
);
104 cl_emit(&job
->bcl
, PRIMITIVE_LIST_FORMAT
, fmt
) {
105 fmt
.data_type
= LIST_INDEXED
;
106 fmt
.primitive_type
= LIST_TRIANGLES
;
109 job
->needs_flush
= true;
110 job
->draw_width
= vc5
->framebuffer
.width
;
111 job
->draw_height
= vc5
->framebuffer
.height
;
115 vc5_predraw_check_textures(struct pipe_context
*pctx
,
116 struct vc5_texture_stateobj
*stage_tex
)
118 struct vc5_context
*vc5
= vc5_context(pctx
);
120 for (int i
= 0; i
< stage_tex
->num_textures
; i
++) {
121 struct pipe_sampler_view
*view
= stage_tex
->textures
[i
];
125 vc5_flush_jobs_writing_resource(vc5
, view
->texture
);
130 vc5_emit_gl_shader_state(struct vc5_context
*vc5
,
131 const struct pipe_draw_info
*info
)
133 struct vc5_job
*job
= vc5
->job
;
134 /* VC5_DIRTY_VTXSTATE */
135 struct vc5_vertex_stateobj
*vtx
= vc5
->vtx
;
136 /* VC5_DIRTY_VTXBUF */
137 struct vc5_vertexbuf_stateobj
*vertexbuf
= &vc5
->vertexbuf
;
139 /* Upload the uniforms to the indirect CL first */
140 struct vc5_cl_reloc fs_uniforms
=
141 vc5_write_uniforms(vc5
, vc5
->prog
.fs
,
142 &vc5
->constbuf
[PIPE_SHADER_FRAGMENT
],
144 struct vc5_cl_reloc vs_uniforms
=
145 vc5_write_uniforms(vc5
, vc5
->prog
.vs
,
146 &vc5
->constbuf
[PIPE_SHADER_VERTEX
],
148 struct vc5_cl_reloc cs_uniforms
=
149 vc5_write_uniforms(vc5
, vc5
->prog
.cs
,
150 &vc5
->constbuf
[PIPE_SHADER_VERTEX
],
153 /* See GFXH-930 workaround below */
154 uint32_t num_elements_to_emit
= MAX2(vtx
->num_elements
, 1);
155 uint32_t shader_rec_offset
=
156 vc5_cl_ensure_space(&job
->indirect
,
157 cl_packet_length(GL_SHADER_STATE_RECORD
) +
158 num_elements_to_emit
*
159 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
),
162 cl_emit(&job
->indirect
, GL_SHADER_STATE_RECORD
, shader
) {
163 shader
.enable_clipping
= true;
164 /* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
165 shader
.point_size_in_shaded_vertex_data
=
166 (info
->mode
== PIPE_PRIM_POINTS
&&
167 vc5
->rasterizer
->base
.point_size_per_vertex
);
169 /* Must be set if the shader modifies Z, discards, or modifies
170 * the sample mask. For any of these cases, the fragment
171 * shader needs to write the Z value (even just discards).
173 shader
.fragment_shader_does_z_writes
=
174 (vc5
->prog
.fs
->prog_data
.fs
->writes_z
||
175 vc5
->prog
.fs
->prog_data
.fs
->discard
);
177 shader
.number_of_varyings_in_fragment_shader
=
178 vc5
->prog
.fs
->prog_data
.base
->num_inputs
;
180 shader
.propagate_nans
= true;
182 shader
.coordinate_shader_code_address
=
183 cl_address(vc5
->prog
.cs
->bo
, 0);
184 shader
.vertex_shader_code_address
=
185 cl_address(vc5
->prog
.vs
->bo
, 0);
186 shader
.fragment_shader_code_address
=
187 cl_address(vc5
->prog
.fs
->bo
, 0);
189 /* XXX: Use combined input/output size flag in the common
192 shader
.coordinate_shader_has_separate_input_and_output_vpm_blocks
= true;
193 shader
.vertex_shader_has_separate_input_and_output_vpm_blocks
= true;
194 shader
.coordinate_shader_input_vpm_segment_size
=
195 MAX2(vc5
->prog
.cs
->prog_data
.vs
->vpm_input_size
, 1);
196 shader
.vertex_shader_input_vpm_segment_size
=
197 MAX2(vc5
->prog
.vs
->prog_data
.vs
->vpm_input_size
, 1);
199 shader
.coordinate_shader_output_vpm_segment_size
=
200 vc5
->prog
.cs
->prog_data
.vs
->vpm_output_size
;
201 shader
.vertex_shader_output_vpm_segment_size
=
202 vc5
->prog
.vs
->prog_data
.vs
->vpm_output_size
;
204 shader
.coordinate_shader_uniforms_address
= cs_uniforms
;
205 shader
.vertex_shader_uniforms_address
= vs_uniforms
;
206 shader
.fragment_shader_uniforms_address
= fs_uniforms
;
208 shader
.vertex_id_read_by_coordinate_shader
=
209 vc5
->prog
.cs
->prog_data
.vs
->uses_vid
;
210 shader
.instance_id_read_by_coordinate_shader
=
211 vc5
->prog
.cs
->prog_data
.vs
->uses_iid
;
212 shader
.vertex_id_read_by_vertex_shader
=
213 vc5
->prog
.vs
->prog_data
.vs
->uses_vid
;
214 shader
.instance_id_read_by_vertex_shader
=
215 vc5
->prog
.vs
->prog_data
.vs
->uses_iid
;
217 shader
.address_of_default_attribute_values
=
218 cl_address(vtx
->default_attribute_values
, 0);
221 for (int i
= 0; i
< vtx
->num_elements
; i
++) {
222 struct pipe_vertex_element
*elem
= &vtx
->pipe
[i
];
223 struct pipe_vertex_buffer
*vb
=
224 &vertexbuf
->vb
[elem
->vertex_buffer_index
];
225 struct vc5_resource
*rsc
= vc5_resource(vb
->buffer
.resource
);
227 const uint32_t size
=
228 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
);
229 cl_emit_with_prepacked(&job
->indirect
,
230 GL_SHADER_STATE_ATTRIBUTE_RECORD
,
231 &vtx
->attrs
[i
* size
], attr
) {
232 attr
.stride
= vb
->stride
;
233 attr
.address
= cl_address(rsc
->bo
,
236 attr
.number_of_values_read_by_coordinate_shader
=
237 vc5
->prog
.cs
->prog_data
.vs
->vattr_sizes
[i
];
238 attr
.number_of_values_read_by_vertex_shader
=
239 vc5
->prog
.vs
->prog_data
.vs
->vattr_sizes
[i
];
243 if (vtx
->num_elements
== 0) {
244 /* GFXH-930: At least one attribute must be enabled and read
245 * by CS and VS. If we have no attributes being consumed by
246 * the shader, set up a dummy to be loaded into the VPM.
248 cl_emit(&job
->indirect
, GL_SHADER_STATE_ATTRIBUTE_RECORD
, attr
) {
249 /* Valid address of data whose value will be unused. */
250 attr
.address
= cl_address(job
->indirect
.bo
, 0);
252 attr
.type
= ATTRIBUTE_FLOAT
;
256 attr
.number_of_values_read_by_coordinate_shader
= 1;
257 attr
.number_of_values_read_by_vertex_shader
= 1;
261 cl_emit(&job
->bcl
, GL_SHADER_STATE
, state
) {
262 state
.address
= cl_address(job
->indirect
.bo
, shader_rec_offset
);
263 state
.number_of_attribute_arrays
= num_elements_to_emit
;
266 vc5_bo_unreference(&cs_uniforms
.bo
);
267 vc5_bo_unreference(&vs_uniforms
.bo
);
268 vc5_bo_unreference(&fs_uniforms
.bo
);
270 job
->shader_rec_count
++;
274 * Computes the various transform feedback statistics, since they can't be
275 * recorded by CL packets.
278 vc5_tf_statistics_record(struct vc5_context
*vc5
,
279 const struct pipe_draw_info
*info
,
282 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
284 vc5
->prims_generated
+= prims
;
287 /* XXX: Only count if we didn't overflow. */
288 vc5
->tf_prims_generated
+= prims
;
293 vc5_draw_vbo(struct pipe_context
*pctx
, const struct pipe_draw_info
*info
)
295 struct vc5_context
*vc5
= vc5_context(pctx
);
297 if (!info
->count_from_stream_output
&& !info
->indirect
&&
298 !info
->primitive_restart
&&
299 !u_trim_pipe_prim(info
->mode
, (unsigned*)&info
->count
))
302 /* Fall back for weird desktop GL primitive restart values. */
303 if (info
->primitive_restart
&&
307 switch (info
->index_size
) {
316 if (info
->restart_index
!= mask
) {
317 util_draw_vbo_without_prim_restart(pctx
, info
);
322 if (info
->mode
>= PIPE_PRIM_QUADS
) {
323 util_primconvert_save_rasterizer_state(vc5
->primconvert
, &vc5
->rasterizer
->base
);
324 util_primconvert_draw_vbo(vc5
->primconvert
, info
);
325 perf_debug("Fallback conversion for %d %s vertices\n",
326 info
->count
, u_prim_name(info
->mode
));
330 /* Before setting up the draw, flush anything writing to the textures
333 vc5_predraw_check_textures(pctx
, &vc5
->verttex
);
334 vc5_predraw_check_textures(pctx
, &vc5
->fragtex
);
336 struct vc5_job
*job
= vc5_get_job_for_fbo(vc5
);
338 /* Get space to emit our draw call into the BCL, using a branch to
339 * jump to a new BO if necessary.
341 vc5_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
343 if (vc5
->prim_mode
!= info
->mode
) {
344 vc5
->prim_mode
= info
->mode
;
345 vc5
->dirty
|= VC5_DIRTY_PRIM_MODE
;
349 vc5_update_compiled_shaders(vc5
, info
->mode
);
351 vc5_emit_state(pctx
);
353 if (vc5
->dirty
& (VC5_DIRTY_VTXBUF
|
355 VC5_DIRTY_PRIM_MODE
|
356 VC5_DIRTY_RASTERIZER
|
357 VC5_DIRTY_COMPILED_CS
|
358 VC5_DIRTY_COMPILED_VS
|
359 VC5_DIRTY_COMPILED_FS
|
360 vc5
->prog
.cs
->uniform_dirty_bits
|
361 vc5
->prog
.vs
->uniform_dirty_bits
|
362 vc5
->prog
.fs
->uniform_dirty_bits
)) {
363 vc5_emit_gl_shader_state(vc5
, info
);
368 /* The Base Vertex/Base Instance packet sets those values to nonzero
369 * for the next draw call only.
371 if (info
->index_bias
|| info
->start_instance
) {
372 cl_emit(&job
->bcl
, BASE_VERTEX_BASE_INSTANCE
, base
) {
373 base
.base_instance
= info
->start_instance
;
374 base
.base_vertex
= info
->index_bias
;
378 /* The HW only processes transform feedback on primitives with the
381 uint32_t prim_tf_enable
= 0;
382 if (vc5
->streamout
.num_targets
)
383 prim_tf_enable
= (V3D_PRIM_POINTS_TF
- V3D_PRIM_POINTS
);
385 vc5_tf_statistics_record(vc5
, info
, prim_tf_enable
);
387 /* Note that the primitive type fields match with OpenGL/gallium
388 * definitions, up to but not including QUADS.
390 if (info
->index_size
) {
391 uint32_t index_size
= info
->index_size
;
392 uint32_t offset
= info
->start
* index_size
;
393 struct pipe_resource
*prsc
;
394 if (info
->has_user_indices
) {
396 u_upload_data(vc5
->uploader
, 0,
397 info
->count
* info
->index_size
, 4,
401 prsc
= info
->index
.resource
;
403 struct vc5_resource
*rsc
= vc5_resource(prsc
);
405 if (info
->instance_count
> 1) {
406 cl_emit(&job
->bcl
, INDEXED_INSTANCED_PRIMITIVE_LIST
, prim
) {
407 prim
.index_type
= ffs(info
->index_size
) - 1;
408 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
409 prim
.address_of_indices_list
=
410 cl_address(rsc
->bo
, offset
);
411 prim
.mode
= info
->mode
| prim_tf_enable
;
412 prim
.enable_primitive_restarts
= info
->primitive_restart
;
414 prim
.number_of_instances
= info
->instance_count
;
415 prim
.instance_length
= info
->count
;
418 cl_emit(&job
->bcl
, INDEXED_PRIMITIVE_LIST
, prim
) {
419 prim
.index_type
= ffs(info
->index_size
) - 1;
420 prim
.length
= info
->count
;
421 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
422 prim
.address_of_indices_list
=
423 cl_address(rsc
->bo
, offset
);
424 prim
.mode
= info
->mode
| prim_tf_enable
;
425 prim
.enable_primitive_restarts
= info
->primitive_restart
;
429 job
->draw_calls_queued
++;
431 if (info
->has_user_indices
)
432 pipe_resource_reference(&prsc
, NULL
);
434 if (info
->instance_count
> 1) {
435 cl_emit(&job
->bcl
, VERTEX_ARRAY_INSTANCED_PRIMITIVES
, prim
) {
436 prim
.mode
= info
->mode
| prim_tf_enable
;
437 prim
.index_of_first_vertex
= info
->start
;
438 prim
.number_of_instances
= info
->instance_count
;
439 prim
.instance_length
= info
->count
;
442 cl_emit(&job
->bcl
, VERTEX_ARRAY_PRIMITIVES
, prim
) {
443 prim
.mode
= info
->mode
| prim_tf_enable
;
444 prim
.length
= info
->count
;
445 prim
.index_of_first_vertex
= info
->start
;
449 job
->draw_calls_queued
++;
451 if (vc5
->zsa
&& job
->zsbuf
&&
452 (vc5
->zsa
->base
.depth
.enabled
||
453 vc5
->zsa
->base
.stencil
[0].enabled
)) {
454 struct vc5_resource
*rsc
= vc5_resource(job
->zsbuf
->texture
);
455 vc5_job_add_bo(job
, rsc
->bo
);
457 if (vc5
->zsa
->base
.depth
.enabled
) {
458 job
->resolve
|= PIPE_CLEAR_DEPTH
;
459 rsc
->initialized_buffers
= PIPE_CLEAR_DEPTH
;
461 if (vc5
->zsa
->early_z_enable
)
462 job
->uses_early_z
= true;
465 if (vc5
->zsa
->base
.stencil
[0].enabled
) {
466 job
->resolve
|= PIPE_CLEAR_STENCIL
;
467 rsc
->initialized_buffers
|= PIPE_CLEAR_STENCIL
;
471 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
472 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
474 if (job
->resolve
& bit
|| !job
->cbufs
[i
])
476 struct vc5_resource
*rsc
= vc5_resource(job
->cbufs
[i
]->texture
);
479 vc5_job_add_bo(job
, rsc
->bo
);
482 if (job
->referenced_size
> 768 * 1024 * 1024) {
483 perf_debug("Flushing job with %dkb to try to free up memory\n",
484 job
->referenced_size
/ 1024);
488 if (V3D_DEBUG
& V3D_DEBUG_ALWAYS_FLUSH
)
493 vc5_clear(struct pipe_context
*pctx
, unsigned buffers
,
494 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
496 struct vc5_context
*vc5
= vc5_context(pctx
);
497 struct vc5_job
*job
= vc5_get_job_for_fbo(vc5
);
499 /* We can't flag new buffers for clearing once we've queued draws. We
500 * could avoid this by using the 3d engine to clear.
502 if (job
->draw_calls_queued
) {
503 perf_debug("Flushing rendering to process new clear.\n");
504 vc5_job_submit(vc5
, job
);
505 job
= vc5_get_job_for_fbo(vc5
);
508 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
509 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
510 if (!(buffers
& bit
))
513 struct pipe_surface
*psurf
= vc5
->framebuffer
.cbufs
[i
];
514 struct vc5_surface
*surf
= vc5_surface(psurf
);
515 struct vc5_resource
*rsc
= vc5_resource(psurf
->texture
);
518 uint32_t internal_size
= 4 << surf
->internal_bpp
;
520 static union pipe_color_union swapped_color
;
521 if (vc5
->swap_color_rb
& (1 << i
)) {
522 swapped_color
.f
[0] = color
->f
[2];
523 swapped_color
.f
[1] = color
->f
[1];
524 swapped_color
.f
[2] = color
->f
[0];
525 swapped_color
.f
[3] = color
->f
[3];
526 color
= &swapped_color
;
529 switch (surf
->internal_type
) {
530 case INTERNAL_TYPE_8
:
531 if (surf
->format
== PIPE_FORMAT_B4G4R4A4_UNORM
||
532 surf
->format
== PIPE_FORMAT_B4G4R4A4_UNORM
) {
533 /* Our actual hardware layout is ABGR4444, but
534 * we apply a swizzle when texturing to flip
535 * things back around.
537 util_pack_color(color
->f
, PIPE_FORMAT_A8R8G8B8_UNORM
,
540 util_pack_color(color
->f
, PIPE_FORMAT_R8G8B8A8_UNORM
,
543 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
545 case INTERNAL_TYPE_8I
:
546 case INTERNAL_TYPE_8UI
:
547 job
->clear_color
[i
][0] = ((uc
.ui
[0] & 0xff) |
548 (uc
.ui
[1] & 0xff) << 8 |
549 (uc
.ui
[2] & 0xff) << 16 |
550 (uc
.ui
[3] & 0xff) << 24);
552 case INTERNAL_TYPE_16F
:
553 util_pack_color(color
->f
, PIPE_FORMAT_R16G16B16A16_FLOAT
,
555 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
557 case INTERNAL_TYPE_16I
:
558 case INTERNAL_TYPE_16UI
:
559 job
->clear_color
[i
][0] = ((uc
.ui
[0] & 0xffff) |
561 job
->clear_color
[i
][1] = ((uc
.ui
[2] & 0xffff) |
564 case INTERNAL_TYPE_32F
:
565 case INTERNAL_TYPE_32I
:
566 case INTERNAL_TYPE_32UI
:
567 memcpy(job
->clear_color
[i
], color
->ui
, internal_size
);
571 rsc
->initialized_buffers
|= bit
;
574 unsigned zsclear
= buffers
& PIPE_CLEAR_DEPTHSTENCIL
;
576 struct vc5_resource
*rsc
=
577 vc5_resource(vc5
->framebuffer
.zsbuf
->texture
);
579 if (zsclear
& PIPE_CLEAR_DEPTH
)
580 job
->clear_z
= depth
;
581 if (zsclear
& PIPE_CLEAR_STENCIL
)
582 job
->clear_s
= stencil
;
584 rsc
->initialized_buffers
|= zsclear
;
589 job
->draw_max_x
= vc5
->framebuffer
.width
;
590 job
->draw_max_y
= vc5
->framebuffer
.height
;
591 job
->cleared
|= buffers
;
592 job
->resolve
|= buffers
;
598 vc5_clear_render_target(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
599 const union pipe_color_union
*color
,
600 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
601 bool render_condition_enabled
)
603 fprintf(stderr
, "unimpl: clear RT\n");
607 vc5_clear_depth_stencil(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
608 unsigned buffers
, double depth
, unsigned stencil
,
609 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
610 bool render_condition_enabled
)
612 fprintf(stderr
, "unimpl: clear DS\n");
616 vc5_draw_init(struct pipe_context
*pctx
)
618 pctx
->draw_vbo
= vc5_draw_vbo
;
619 pctx
->clear
= vc5_clear
;
620 pctx
->clear_render_target
= vc5_clear_render_target
;
621 pctx
->clear_depth_stencil
= vc5_clear_depth_stencil
;