2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/u_blitter.h"
25 #include "util/u_prim.h"
26 #include "util/u_format.h"
27 #include "util/u_pack_color.h"
28 #include "util/u_prim_restart.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
32 #include "v3d_context.h"
33 #include "v3d_resource.h"
35 #include "broadcom/compiler/v3d_compiler.h"
36 #include "broadcom/common/v3d_macros.h"
37 #include "broadcom/cle/v3dx_pack.h"
40 * Does the initial bining command list setup for drawing to a given FBO.
43 v3d_start_draw(struct v3d_context
*v3d
)
45 struct v3d_job
*job
= v3d
->job
;
50 /* Get space to emit our BCL state, using a branch to jump to a new BO
53 v3d_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
55 job
->submit
.bcl_start
= job
->bcl
.bo
->offset
;
56 v3d_job_add_bo(job
, job
->bcl
.bo
);
58 job
->tile_alloc
= v3d_bo_alloc(v3d
->screen
, 1024 * 1024, "tile alloc");
59 uint32_t tsda_per_tile_size
= v3d
->screen
->devinfo
.ver
>= 40 ? 256 : 64;
60 job
->tile_state
= v3d_bo_alloc(v3d
->screen
,
67 /* "Binning mode lists start with a Tile Binning Mode Configuration
70 * Part1 signals the end of binning config setup.
72 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CONFIGURATION_PART2
, config
) {
73 config
.tile_allocation_memory_address
=
74 cl_address(job
->tile_alloc
, 0);
75 config
.tile_allocation_memory_size
= job
->tile_alloc
->size
;
79 cl_emit(&job
->bcl
, TILE_BINNING_MODE_CONFIGURATION_PART1
, config
) {
81 config
.width_in_pixels_minus_1
= v3d
->framebuffer
.width
- 1;
82 config
.height_in_pixels_minus_1
= v3d
->framebuffer
.height
- 1;
83 config
.number_of_render_targets_minus_1
=
84 MAX2(v3d
->framebuffer
.nr_cbufs
, 1) - 1;
85 #else /* V3D_VERSION < 40 */
86 config
.tile_state_data_array_base_address
=
87 cl_address(job
->tile_state
, 0);
89 config
.width_in_tiles
= job
->draw_tiles_x
;
90 config
.height_in_tiles
= job
->draw_tiles_y
;
92 config
.number_of_render_targets
=
93 MAX2(v3d
->framebuffer
.nr_cbufs
, 1);
94 #endif /* V3D_VERSION < 40 */
96 config
.multisample_mode_4x
= job
->msaa
;
98 config
.maximum_bpp_of_all_render_targets
= job
->internal_bpp
;
101 /* There's definitely nothing in the VCD cache we want. */
102 cl_emit(&job
->bcl
, FLUSH_VCD_CACHE
, bin
);
104 /* Disable any leftover OQ state from another job. */
105 cl_emit(&job
->bcl
, OCCLUSION_QUERY_COUNTER
, counter
);
107 /* "Binning mode lists must have a Start Tile Binning item (6) after
108 * any prefix state data before the binning list proper starts."
110 cl_emit(&job
->bcl
, START_TILE_BINNING
, bin
);
112 job
->needs_flush
= true;
113 job
->draw_width
= v3d
->framebuffer
.width
;
114 job
->draw_height
= v3d
->framebuffer
.height
;
118 v3d_predraw_check_textures(struct pipe_context
*pctx
,
119 struct v3d_texture_stateobj
*stage_tex
)
121 struct v3d_context
*v3d
= v3d_context(pctx
);
123 for (int i
= 0; i
< stage_tex
->num_textures
; i
++) {
124 struct pipe_sampler_view
*view
= stage_tex
->textures
[i
];
128 v3d_flush_jobs_writing_resource(v3d
, view
->texture
);
133 v3d_emit_gl_shader_state(struct v3d_context
*v3d
,
134 const struct pipe_draw_info
*info
)
136 struct v3d_job
*job
= v3d
->job
;
137 /* VC5_DIRTY_VTXSTATE */
138 struct v3d_vertex_stateobj
*vtx
= v3d
->vtx
;
139 /* VC5_DIRTY_VTXBUF */
140 struct v3d_vertexbuf_stateobj
*vertexbuf
= &v3d
->vertexbuf
;
142 /* Upload the uniforms to the indirect CL first */
143 struct v3d_cl_reloc fs_uniforms
=
144 v3d_write_uniforms(v3d
, v3d
->prog
.fs
,
145 &v3d
->constbuf
[PIPE_SHADER_FRAGMENT
],
147 struct v3d_cl_reloc vs_uniforms
=
148 v3d_write_uniforms(v3d
, v3d
->prog
.vs
,
149 &v3d
->constbuf
[PIPE_SHADER_VERTEX
],
151 struct v3d_cl_reloc cs_uniforms
=
152 v3d_write_uniforms(v3d
, v3d
->prog
.cs
,
153 &v3d
->constbuf
[PIPE_SHADER_VERTEX
],
156 /* See GFXH-930 workaround below */
157 uint32_t num_elements_to_emit
= MAX2(vtx
->num_elements
, 1);
158 uint32_t shader_rec_offset
=
159 v3d_cl_ensure_space(&job
->indirect
,
160 cl_packet_length(GL_SHADER_STATE_RECORD
) +
161 num_elements_to_emit
*
162 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
),
165 cl_emit(&job
->indirect
, GL_SHADER_STATE_RECORD
, shader
) {
166 shader
.enable_clipping
= true;
167 /* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
168 shader
.point_size_in_shaded_vertex_data
=
169 (info
->mode
== PIPE_PRIM_POINTS
&&
170 v3d
->rasterizer
->base
.point_size_per_vertex
);
172 /* Must be set if the shader modifies Z, discards, or modifies
173 * the sample mask. For any of these cases, the fragment
174 * shader needs to write the Z value (even just discards).
176 shader
.fragment_shader_does_z_writes
=
177 (v3d
->prog
.fs
->prog_data
.fs
->writes_z
||
178 v3d
->prog
.fs
->prog_data
.fs
->discard
);
180 shader
.fragment_shader_uses_real_pixel_centre_w_in_addition_to_centroid_w2
=
181 v3d
->prog
.fs
->prog_data
.fs
->uses_centroid_and_center_w
;
183 shader
.number_of_varyings_in_fragment_shader
=
184 v3d
->prog
.fs
->prog_data
.base
->num_inputs
;
186 shader
.coordinate_shader_propagate_nans
= true;
187 shader
.vertex_shader_propagate_nans
= true;
188 shader
.fragment_shader_propagate_nans
= true;
190 shader
.coordinate_shader_code_address
=
191 cl_address(v3d
->prog
.cs
->bo
, 0);
192 shader
.vertex_shader_code_address
=
193 cl_address(v3d
->prog
.vs
->bo
, 0);
194 shader
.fragment_shader_code_address
=
195 cl_address(v3d
->prog
.fs
->bo
, 0);
197 /* XXX: Use combined input/output size flag in the common
200 shader
.coordinate_shader_has_separate_input_and_output_vpm_blocks
= true;
201 shader
.vertex_shader_has_separate_input_and_output_vpm_blocks
= true;
202 shader
.coordinate_shader_input_vpm_segment_size
=
203 MAX2(v3d
->prog
.cs
->prog_data
.vs
->vpm_input_size
, 1);
204 shader
.vertex_shader_input_vpm_segment_size
=
205 MAX2(v3d
->prog
.vs
->prog_data
.vs
->vpm_input_size
, 1);
207 shader
.coordinate_shader_output_vpm_segment_size
=
208 v3d
->prog
.cs
->prog_data
.vs
->vpm_output_size
;
209 shader
.vertex_shader_output_vpm_segment_size
=
210 v3d
->prog
.vs
->prog_data
.vs
->vpm_output_size
;
212 shader
.coordinate_shader_uniforms_address
= cs_uniforms
;
213 shader
.vertex_shader_uniforms_address
= vs_uniforms
;
214 shader
.fragment_shader_uniforms_address
= fs_uniforms
;
216 #if V3D_VERSION >= 41
217 shader
.coordinate_shader_4_way_threadable
=
218 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 4;
219 shader
.vertex_shader_4_way_threadable
=
220 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 4;
221 shader
.fragment_shader_4_way_threadable
=
222 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 4;
224 shader
.coordinate_shader_start_in_final_thread_section
=
225 v3d
->prog
.cs
->prog_data
.vs
->base
.single_seg
;
226 shader
.vertex_shader_start_in_final_thread_section
=
227 v3d
->prog
.vs
->prog_data
.vs
->base
.single_seg
;
228 shader
.fragment_shader_start_in_final_thread_section
=
229 v3d
->prog
.fs
->prog_data
.fs
->base
.single_seg
;
231 shader
.coordinate_shader_4_way_threadable
=
232 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 4;
233 shader
.coordinate_shader_2_way_threadable
=
234 v3d
->prog
.cs
->prog_data
.vs
->base
.threads
== 2;
235 shader
.vertex_shader_4_way_threadable
=
236 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 4;
237 shader
.vertex_shader_2_way_threadable
=
238 v3d
->prog
.vs
->prog_data
.vs
->base
.threads
== 2;
239 shader
.fragment_shader_4_way_threadable
=
240 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 4;
241 shader
.fragment_shader_2_way_threadable
=
242 v3d
->prog
.fs
->prog_data
.fs
->base
.threads
== 2;
245 shader
.vertex_id_read_by_coordinate_shader
=
246 v3d
->prog
.cs
->prog_data
.vs
->uses_vid
;
247 shader
.instance_id_read_by_coordinate_shader
=
248 v3d
->prog
.cs
->prog_data
.vs
->uses_iid
;
249 shader
.vertex_id_read_by_vertex_shader
=
250 v3d
->prog
.vs
->prog_data
.vs
->uses_vid
;
251 shader
.instance_id_read_by_vertex_shader
=
252 v3d
->prog
.vs
->prog_data
.vs
->uses_iid
;
254 shader
.address_of_default_attribute_values
=
255 cl_address(vtx
->default_attribute_values
, 0);
258 for (int i
= 0; i
< vtx
->num_elements
; i
++) {
259 struct pipe_vertex_element
*elem
= &vtx
->pipe
[i
];
260 struct pipe_vertex_buffer
*vb
=
261 &vertexbuf
->vb
[elem
->vertex_buffer_index
];
262 struct v3d_resource
*rsc
= v3d_resource(vb
->buffer
.resource
);
264 const uint32_t size
=
265 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD
);
266 cl_emit_with_prepacked(&job
->indirect
,
267 GL_SHADER_STATE_ATTRIBUTE_RECORD
,
268 &vtx
->attrs
[i
* size
], attr
) {
269 attr
.stride
= vb
->stride
;
270 attr
.address
= cl_address(rsc
->bo
,
273 attr
.number_of_values_read_by_coordinate_shader
=
274 v3d
->prog
.cs
->prog_data
.vs
->vattr_sizes
[i
];
275 attr
.number_of_values_read_by_vertex_shader
=
276 v3d
->prog
.vs
->prog_data
.vs
->vattr_sizes
[i
];
277 #if V3D_VERSION >= 41
278 attr
.maximum_index
= 0xffffff;
283 if (vtx
->num_elements
== 0) {
284 /* GFXH-930: At least one attribute must be enabled and read
285 * by CS and VS. If we have no attributes being consumed by
286 * the shader, set up a dummy to be loaded into the VPM.
288 cl_emit(&job
->indirect
, GL_SHADER_STATE_ATTRIBUTE_RECORD
, attr
) {
289 /* Valid address of data whose value will be unused. */
290 attr
.address
= cl_address(job
->indirect
.bo
, 0);
292 attr
.type
= ATTRIBUTE_FLOAT
;
296 attr
.number_of_values_read_by_coordinate_shader
= 1;
297 attr
.number_of_values_read_by_vertex_shader
= 1;
301 cl_emit(&job
->bcl
, GL_SHADER_STATE
, state
) {
302 state
.address
= cl_address(job
->indirect
.bo
, shader_rec_offset
);
303 state
.number_of_attribute_arrays
= num_elements_to_emit
;
306 v3d_bo_unreference(&cs_uniforms
.bo
);
307 v3d_bo_unreference(&vs_uniforms
.bo
);
308 v3d_bo_unreference(&fs_uniforms
.bo
);
310 job
->shader_rec_count
++;
314 * Computes the various transform feedback statistics, since they can't be
315 * recorded by CL packets.
318 v3d_tf_statistics_record(struct v3d_context
*v3d
,
319 const struct pipe_draw_info
*info
,
322 if (!v3d
->active_queries
)
325 uint32_t prims
= u_prims_for_vertices(info
->mode
, info
->count
);
326 v3d
->prims_generated
+= prims
;
329 /* XXX: Only count if we didn't overflow. */
330 v3d
->tf_prims_generated
+= prims
;
335 v3d_update_job_ez(struct v3d_context
*v3d
, struct v3d_job
*job
)
337 switch (v3d
->zsa
->ez_state
) {
338 case VC5_EZ_UNDECIDED
:
339 /* If the Z/S state didn't pick a direction but didn't
340 * disable, then go along with the current EZ state. This
341 * allows EZ optimization for Z func == EQUAL or NEVER.
347 /* If the Z/S state picked a direction, then it needs to match
348 * the current direction if we've decided on one.
350 if (job
->ez_state
== VC5_EZ_UNDECIDED
)
351 job
->ez_state
= v3d
->zsa
->ez_state
;
352 else if (job
->ez_state
!= v3d
->zsa
->ez_state
)
353 job
->ez_state
= VC5_EZ_DISABLED
;
356 case VC5_EZ_DISABLED
:
357 /* If the current Z/S state disables EZ because of a bad Z
358 * func or stencil operation, then we can't do any more EZ in
361 job
->ez_state
= VC5_EZ_DISABLED
;
365 /* If the FS affects the Z of the pixels, then it may update against
366 * the chosen EZ direction (though we could use
367 * ARB_conservative_depth's hints to avoid this)
369 if (v3d
->prog
.fs
->prog_data
.fs
->writes_z
) {
370 job
->ez_state
= VC5_EZ_DISABLED
;
373 if (job
->first_ez_state
== VC5_EZ_UNDECIDED
)
374 job
->first_ez_state
= job
->ez_state
;
378 v3d_draw_vbo(struct pipe_context
*pctx
, const struct pipe_draw_info
*info
)
380 struct v3d_context
*v3d
= v3d_context(pctx
);
382 if (!info
->count_from_stream_output
&& !info
->indirect
&&
383 !info
->primitive_restart
&&
384 !u_trim_pipe_prim(info
->mode
, (unsigned*)&info
->count
))
387 /* Fall back for weird desktop GL primitive restart values. */
388 if (info
->primitive_restart
&&
392 switch (info
->index_size
) {
401 if (info
->restart_index
!= mask
) {
402 util_draw_vbo_without_prim_restart(pctx
, info
);
407 if (info
->mode
>= PIPE_PRIM_QUADS
) {
408 util_primconvert_save_rasterizer_state(v3d
->primconvert
, &v3d
->rasterizer
->base
);
409 util_primconvert_draw_vbo(v3d
->primconvert
, info
);
410 perf_debug("Fallback conversion for %d %s vertices\n",
411 info
->count
, u_prim_name(info
->mode
));
415 /* Before setting up the draw, flush anything writing to the textures
418 v3d_predraw_check_textures(pctx
, &v3d
->verttex
);
419 v3d_predraw_check_textures(pctx
, &v3d
->fragtex
);
421 struct v3d_job
*job
= v3d_get_job_for_fbo(v3d
);
423 /* Get space to emit our draw call into the BCL, using a branch to
424 * jump to a new BO if necessary.
426 v3d_cl_ensure_space_with_branch(&job
->bcl
, 256 /* XXX */);
428 if (v3d
->prim_mode
!= info
->mode
) {
429 v3d
->prim_mode
= info
->mode
;
430 v3d
->dirty
|= VC5_DIRTY_PRIM_MODE
;
434 v3d_update_compiled_shaders(v3d
, info
->mode
);
435 v3d_update_job_ez(v3d
, job
);
437 #if V3D_VERSION >= 41
438 v3d41_emit_state(pctx
);
440 v3d33_emit_state(pctx
);
443 if (v3d
->dirty
& (VC5_DIRTY_VTXBUF
|
445 VC5_DIRTY_PRIM_MODE
|
446 VC5_DIRTY_RASTERIZER
|
447 VC5_DIRTY_COMPILED_CS
|
448 VC5_DIRTY_COMPILED_VS
|
449 VC5_DIRTY_COMPILED_FS
|
450 v3d
->prog
.cs
->uniform_dirty_bits
|
451 v3d
->prog
.vs
->uniform_dirty_bits
|
452 v3d
->prog
.fs
->uniform_dirty_bits
)) {
453 v3d_emit_gl_shader_state(v3d
, info
);
458 /* The Base Vertex/Base Instance packet sets those values to nonzero
459 * for the next draw call only.
461 if (info
->index_bias
|| info
->start_instance
) {
462 cl_emit(&job
->bcl
, BASE_VERTEX_BASE_INSTANCE
, base
) {
463 base
.base_instance
= info
->start_instance
;
464 base
.base_vertex
= info
->index_bias
;
468 uint32_t prim_tf_enable
= 0;
470 /* V3D 3.x: The HW only processes transform feedback on primitives
473 if (v3d
->streamout
.num_targets
)
474 prim_tf_enable
= (V3D_PRIM_POINTS_TF
- V3D_PRIM_POINTS
);
477 v3d_tf_statistics_record(v3d
, info
, v3d
->streamout
.num_targets
);
479 /* Note that the primitive type fields match with OpenGL/gallium
480 * definitions, up to but not including QUADS.
482 if (info
->index_size
) {
483 uint32_t index_size
= info
->index_size
;
484 uint32_t offset
= info
->start
* index_size
;
485 struct pipe_resource
*prsc
;
486 if (info
->has_user_indices
) {
488 u_upload_data(v3d
->uploader
, 0,
489 info
->count
* info
->index_size
, 4,
493 prsc
= info
->index
.resource
;
495 struct v3d_resource
*rsc
= v3d_resource(prsc
);
497 #if V3D_VERSION >= 40
498 cl_emit(&job
->bcl
, INDEX_BUFFER_SETUP
, ib
) {
499 ib
.address
= cl_address(rsc
->bo
, 0);
500 ib
.size
= rsc
->bo
->size
;
504 if (info
->instance_count
> 1) {
505 cl_emit(&job
->bcl
, INDEXED_INSTANCED_PRIMITIVE_LIST
, prim
) {
506 prim
.index_type
= ffs(info
->index_size
) - 1;
507 #if V3D_VERSION >= 40
508 prim
.index_offset
= offset
;
509 #else /* V3D_VERSION < 40 */
510 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
511 prim
.address_of_indices_list
=
512 cl_address(rsc
->bo
, offset
);
513 #endif /* V3D_VERSION < 40 */
514 prim
.mode
= info
->mode
| prim_tf_enable
;
515 prim
.enable_primitive_restarts
= info
->primitive_restart
;
517 prim
.number_of_instances
= info
->instance_count
;
518 prim
.instance_length
= info
->count
;
521 cl_emit(&job
->bcl
, INDEXED_PRIMITIVE_LIST
, prim
) {
522 prim
.index_type
= ffs(info
->index_size
) - 1;
523 prim
.length
= info
->count
;
524 #if V3D_VERSION >= 40
525 prim
.index_offset
= offset
;
526 #else /* V3D_VERSION < 40 */
527 prim
.maximum_index
= (1u << 31) - 1; /* XXX */
528 prim
.address_of_indices_list
=
529 cl_address(rsc
->bo
, offset
);
530 #endif /* V3D_VERSION < 40 */
531 prim
.mode
= info
->mode
| prim_tf_enable
;
532 prim
.enable_primitive_restarts
= info
->primitive_restart
;
536 job
->draw_calls_queued
++;
538 if (info
->has_user_indices
)
539 pipe_resource_reference(&prsc
, NULL
);
541 if (info
->instance_count
> 1) {
542 cl_emit(&job
->bcl
, VERTEX_ARRAY_INSTANCED_PRIMITIVES
, prim
) {
543 prim
.mode
= info
->mode
| prim_tf_enable
;
544 prim
.index_of_first_vertex
= info
->start
;
545 prim
.number_of_instances
= info
->instance_count
;
546 prim
.instance_length
= info
->count
;
549 cl_emit(&job
->bcl
, VERTEX_ARRAY_PRIMITIVES
, prim
) {
550 prim
.mode
= info
->mode
| prim_tf_enable
;
551 prim
.length
= info
->count
;
552 prim
.index_of_first_vertex
= info
->start
;
556 job
->draw_calls_queued
++;
558 if (v3d
->zsa
&& job
->zsbuf
&&
559 (v3d
->zsa
->base
.depth
.enabled
||
560 v3d
->zsa
->base
.stencil
[0].enabled
)) {
561 struct v3d_resource
*rsc
= v3d_resource(job
->zsbuf
->texture
);
562 v3d_job_add_bo(job
, rsc
->bo
);
564 if (v3d
->zsa
->base
.depth
.enabled
) {
565 job
->resolve
|= PIPE_CLEAR_DEPTH
;
566 rsc
->initialized_buffers
= PIPE_CLEAR_DEPTH
;
569 if (v3d
->zsa
->base
.stencil
[0].enabled
) {
570 job
->resolve
|= PIPE_CLEAR_STENCIL
;
571 rsc
->initialized_buffers
|= PIPE_CLEAR_STENCIL
;
575 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
576 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
578 if (job
->resolve
& bit
|| !job
->cbufs
[i
])
580 struct v3d_resource
*rsc
= v3d_resource(job
->cbufs
[i
]->texture
);
583 v3d_job_add_bo(job
, rsc
->bo
);
586 if (job
->referenced_size
> 768 * 1024 * 1024) {
587 perf_debug("Flushing job with %dkb to try to free up memory\n",
588 job
->referenced_size
/ 1024);
592 if (V3D_DEBUG
& V3D_DEBUG_ALWAYS_FLUSH
)
597 v3d_clear(struct pipe_context
*pctx
, unsigned buffers
,
598 const union pipe_color_union
*color
, double depth
, unsigned stencil
)
600 struct v3d_context
*v3d
= v3d_context(pctx
);
601 struct v3d_job
*job
= v3d_get_job_for_fbo(v3d
);
603 /* We can't flag new buffers for clearing once we've queued draws. We
604 * could avoid this by using the 3d engine to clear.
606 if (job
->draw_calls_queued
) {
607 perf_debug("Flushing rendering to process new clear.\n");
608 v3d_job_submit(v3d
, job
);
609 job
= v3d_get_job_for_fbo(v3d
);
612 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++) {
613 uint32_t bit
= PIPE_CLEAR_COLOR0
<< i
;
614 if (!(buffers
& bit
))
617 struct pipe_surface
*psurf
= v3d
->framebuffer
.cbufs
[i
];
618 struct v3d_surface
*surf
= v3d_surface(psurf
);
619 struct v3d_resource
*rsc
= v3d_resource(psurf
->texture
);
622 uint32_t internal_size
= 4 << surf
->internal_bpp
;
624 static union pipe_color_union swapped_color
;
625 if (v3d
->swap_color_rb
& (1 << i
)) {
626 swapped_color
.f
[0] = color
->f
[2];
627 swapped_color
.f
[1] = color
->f
[1];
628 swapped_color
.f
[2] = color
->f
[0];
629 swapped_color
.f
[3] = color
->f
[3];
630 color
= &swapped_color
;
633 switch (surf
->internal_type
) {
634 case V3D_INTERNAL_TYPE_8
:
635 util_pack_color(color
->f
, PIPE_FORMAT_R8G8B8A8_UNORM
,
637 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
639 case V3D_INTERNAL_TYPE_8I
:
640 case V3D_INTERNAL_TYPE_8UI
:
641 job
->clear_color
[i
][0] = ((color
->ui
[0] & 0xff) |
642 (color
->ui
[1] & 0xff) << 8 |
643 (color
->ui
[2] & 0xff) << 16 |
644 (color
->ui
[3] & 0xff) << 24);
646 case V3D_INTERNAL_TYPE_16F
:
647 util_pack_color(color
->f
, PIPE_FORMAT_R16G16B16A16_FLOAT
,
649 memcpy(job
->clear_color
[i
], uc
.ui
, internal_size
);
651 case V3D_INTERNAL_TYPE_16I
:
652 case V3D_INTERNAL_TYPE_16UI
:
653 job
->clear_color
[i
][0] = ((color
->ui
[0] & 0xffff) |
655 job
->clear_color
[i
][1] = ((color
->ui
[2] & 0xffff) |
658 case V3D_INTERNAL_TYPE_32F
:
659 case V3D_INTERNAL_TYPE_32I
:
660 case V3D_INTERNAL_TYPE_32UI
:
661 memcpy(job
->clear_color
[i
], color
->ui
, internal_size
);
665 rsc
->initialized_buffers
|= bit
;
668 unsigned zsclear
= buffers
& PIPE_CLEAR_DEPTHSTENCIL
;
670 struct v3d_resource
*rsc
=
671 v3d_resource(v3d
->framebuffer
.zsbuf
->texture
);
673 if (zsclear
& PIPE_CLEAR_DEPTH
)
674 job
->clear_z
= depth
;
675 if (zsclear
& PIPE_CLEAR_STENCIL
)
676 job
->clear_s
= stencil
;
678 rsc
->initialized_buffers
|= zsclear
;
683 job
->draw_max_x
= v3d
->framebuffer
.width
;
684 job
->draw_max_y
= v3d
->framebuffer
.height
;
685 job
->cleared
|= buffers
;
686 job
->resolve
|= buffers
;
692 v3d_clear_render_target(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
693 const union pipe_color_union
*color
,
694 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
695 bool render_condition_enabled
)
697 fprintf(stderr
, "unimpl: clear RT\n");
701 v3d_clear_depth_stencil(struct pipe_context
*pctx
, struct pipe_surface
*ps
,
702 unsigned buffers
, double depth
, unsigned stencil
,
703 unsigned x
, unsigned y
, unsigned w
, unsigned h
,
704 bool render_condition_enabled
)
706 fprintf(stderr
, "unimpl: clear DS\n");
710 v3dX(draw_init
)(struct pipe_context
*pctx
)
712 pctx
->draw_vbo
= v3d_draw_vbo
;
713 pctx
->clear
= v3d_clear
;
714 pctx
->clear_render_target
= v3d_clear_render_target
;
715 pctx
->clear_depth_stencil
= v3d_clear_depth_stencil
;