broadcom/vc5: Always use the RGBA8 formats for RGBX8.
[mesa.git] / src / gallium / drivers / vc5 / vc5_draw.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/u_blitter.h"
25 #include "util/u_prim.h"
26 #include "util/u_format.h"
27 #include "util/u_pack_color.h"
28 #include "util/u_prim_restart.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
31
32 #include "vc5_context.h"
33 #include "vc5_resource.h"
34 #include "vc5_cl.h"
35 #include "broadcom/compiler/v3d_compiler.h"
36 #include "broadcom/common/v3d_macros.h"
37 #include "broadcom/cle/v3dx_pack.h"
38
39 /**
40 * Does the initial bining command list setup for drawing to a given FBO.
41 */
42 static void
43 vc5_start_draw(struct vc5_context *vc5)
44 {
45 struct vc5_job *job = vc5->job;
46
47 if (job->needs_flush)
48 return;
49
50 /* Get space to emit our BCL state, using a branch to jump to a new BO
51 * if necessary.
52 */
53 vc5_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
54
55 job->submit.bcl_start = job->bcl.bo->offset;
56 vc5_job_add_bo(job, job->bcl.bo);
57
58 job->tile_alloc = vc5_bo_alloc(vc5->screen, 1024 * 1024, "tile alloc");
59 uint32_t tsda_per_tile_size = vc5->screen->devinfo.ver >= 40 ? 256 : 64;
60 job->tile_state = vc5_bo_alloc(vc5->screen,
61 job->draw_tiles_y *
62 job->draw_tiles_x *
63 tsda_per_tile_size,
64 "TSDA");
65
66 #if V3D_VERSION < 40
67 /* "Binning mode lists start with a Tile Binning Mode Configuration
68 * item (120)"
69 *
70 * Part1 signals the end of binning config setup.
71 */
72 cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION_PART2, config) {
73 config.tile_allocation_memory_address =
74 cl_address(job->tile_alloc, 0);
75 config.tile_allocation_memory_size = job->tile_alloc->size;
76 }
77 #endif
78
79 cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION_PART1, config) {
80 #if V3D_VERSION >= 40
81 config.width_in_pixels_minus_1 = vc5->framebuffer.width - 1;
82 config.height_in_pixels_minus_1 = vc5->framebuffer.height - 1;
83 config.number_of_render_targets_minus_1 =
84 MAX2(vc5->framebuffer.nr_cbufs, 1) - 1;
85 #else /* V3D_VERSION < 40 */
86 config.tile_state_data_array_base_address =
87 cl_address(job->tile_state, 0);
88
89 config.width_in_tiles = job->draw_tiles_x;
90 config.height_in_tiles = job->draw_tiles_y;
91 /* Must be >= 1 */
92 config.number_of_render_targets =
93 MAX2(vc5->framebuffer.nr_cbufs, 1);
94 #endif /* V3D_VERSION < 40 */
95
96 config.multisample_mode_4x = job->msaa;
97
98 config.maximum_bpp_of_all_render_targets = job->internal_bpp;
99 }
100
101 /* There's definitely nothing in the VCD cache we want. */
102 cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
103
104 /* Disable any leftover OQ state from another job. */
105 cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter);
106
107 /* "Binning mode lists must have a Start Tile Binning item (6) after
108 * any prefix state data before the binning list proper starts."
109 */
110 cl_emit(&job->bcl, START_TILE_BINNING, bin);
111
112 job->needs_flush = true;
113 job->draw_width = vc5->framebuffer.width;
114 job->draw_height = vc5->framebuffer.height;
115 }
116
117 static void
118 vc5_predraw_check_textures(struct pipe_context *pctx,
119 struct vc5_texture_stateobj *stage_tex)
120 {
121 struct vc5_context *vc5 = vc5_context(pctx);
122
123 for (int i = 0; i < stage_tex->num_textures; i++) {
124 struct pipe_sampler_view *view = stage_tex->textures[i];
125 if (!view)
126 continue;
127
128 vc5_flush_jobs_writing_resource(vc5, view->texture);
129 }
130 }
131
132 static void
133 vc5_emit_gl_shader_state(struct vc5_context *vc5,
134 const struct pipe_draw_info *info)
135 {
136 struct vc5_job *job = vc5->job;
137 /* VC5_DIRTY_VTXSTATE */
138 struct vc5_vertex_stateobj *vtx = vc5->vtx;
139 /* VC5_DIRTY_VTXBUF */
140 struct vc5_vertexbuf_stateobj *vertexbuf = &vc5->vertexbuf;
141
142 /* Upload the uniforms to the indirect CL first */
143 struct vc5_cl_reloc fs_uniforms =
144 vc5_write_uniforms(vc5, vc5->prog.fs,
145 &vc5->constbuf[PIPE_SHADER_FRAGMENT],
146 &vc5->fragtex);
147 struct vc5_cl_reloc vs_uniforms =
148 vc5_write_uniforms(vc5, vc5->prog.vs,
149 &vc5->constbuf[PIPE_SHADER_VERTEX],
150 &vc5->verttex);
151 struct vc5_cl_reloc cs_uniforms =
152 vc5_write_uniforms(vc5, vc5->prog.cs,
153 &vc5->constbuf[PIPE_SHADER_VERTEX],
154 &vc5->verttex);
155
156 /* See GFXH-930 workaround below */
157 uint32_t num_elements_to_emit = MAX2(vtx->num_elements, 1);
158 uint32_t shader_rec_offset =
159 vc5_cl_ensure_space(&job->indirect,
160 cl_packet_length(GL_SHADER_STATE_RECORD) +
161 num_elements_to_emit *
162 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
163 32);
164
165 cl_emit(&job->indirect, GL_SHADER_STATE_RECORD, shader) {
166 shader.enable_clipping = true;
167 /* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
168 shader.point_size_in_shaded_vertex_data =
169 (info->mode == PIPE_PRIM_POINTS &&
170 vc5->rasterizer->base.point_size_per_vertex);
171
172 /* Must be set if the shader modifies Z, discards, or modifies
173 * the sample mask. For any of these cases, the fragment
174 * shader needs to write the Z value (even just discards).
175 */
176 shader.fragment_shader_does_z_writes =
177 (vc5->prog.fs->prog_data.fs->writes_z ||
178 vc5->prog.fs->prog_data.fs->discard);
179
180 shader.number_of_varyings_in_fragment_shader =
181 vc5->prog.fs->prog_data.base->num_inputs;
182
183 shader.propagate_nans = true;
184
185 shader.coordinate_shader_code_address =
186 cl_address(vc5->prog.cs->bo, 0);
187 shader.vertex_shader_code_address =
188 cl_address(vc5->prog.vs->bo, 0);
189 shader.fragment_shader_code_address =
190 cl_address(vc5->prog.fs->bo, 0);
191
192 /* XXX: Use combined input/output size flag in the common
193 * case.
194 */
195 shader.coordinate_shader_has_separate_input_and_output_vpm_blocks = true;
196 shader.vertex_shader_has_separate_input_and_output_vpm_blocks = true;
197 shader.coordinate_shader_input_vpm_segment_size =
198 MAX2(vc5->prog.cs->prog_data.vs->vpm_input_size, 1);
199 shader.vertex_shader_input_vpm_segment_size =
200 MAX2(vc5->prog.vs->prog_data.vs->vpm_input_size, 1);
201
202 shader.coordinate_shader_output_vpm_segment_size =
203 vc5->prog.cs->prog_data.vs->vpm_output_size;
204 shader.vertex_shader_output_vpm_segment_size =
205 vc5->prog.vs->prog_data.vs->vpm_output_size;
206
207 shader.coordinate_shader_uniforms_address = cs_uniforms;
208 shader.vertex_shader_uniforms_address = vs_uniforms;
209 shader.fragment_shader_uniforms_address = fs_uniforms;
210
211 #if V3D_VERSION >= 41
212 shader.coordinate_shader_4_way_threadable =
213 vc5->prog.cs->prog_data.vs->base.threads == 4;
214 shader.vertex_shader_4_way_threadable =
215 vc5->prog.vs->prog_data.vs->base.threads == 4;
216 shader.fragment_shader_4_way_threadable =
217 vc5->prog.fs->prog_data.fs->base.threads == 4;
218
219 shader.coordinate_shader_start_in_final_thread_section =
220 vc5->prog.cs->prog_data.vs->base.single_seg;
221 shader.vertex_shader_start_in_final_thread_section =
222 vc5->prog.vs->prog_data.vs->base.single_seg;
223 shader.fragment_shader_start_in_final_thread_section =
224 vc5->prog.fs->prog_data.fs->base.single_seg;
225 #else
226 shader.coordinate_shader_4_way_threadable =
227 vc5->prog.cs->prog_data.vs->base.threads == 4;
228 shader.coordinate_shader_2_way_threadable =
229 vc5->prog.cs->prog_data.vs->base.threads == 2;
230 shader.vertex_shader_4_way_threadable =
231 vc5->prog.vs->prog_data.vs->base.threads == 4;
232 shader.vertex_shader_2_way_threadable =
233 vc5->prog.vs->prog_data.vs->base.threads == 2;
234 shader.fragment_shader_4_way_threadable =
235 vc5->prog.fs->prog_data.fs->base.threads == 4;
236 shader.fragment_shader_2_way_threadable =
237 vc5->prog.fs->prog_data.fs->base.threads == 2;
238 #endif
239
240 shader.vertex_id_read_by_coordinate_shader =
241 vc5->prog.cs->prog_data.vs->uses_vid;
242 shader.instance_id_read_by_coordinate_shader =
243 vc5->prog.cs->prog_data.vs->uses_iid;
244 shader.vertex_id_read_by_vertex_shader =
245 vc5->prog.vs->prog_data.vs->uses_vid;
246 shader.instance_id_read_by_vertex_shader =
247 vc5->prog.vs->prog_data.vs->uses_iid;
248
249 shader.address_of_default_attribute_values =
250 cl_address(vtx->default_attribute_values, 0);
251 }
252
253 for (int i = 0; i < vtx->num_elements; i++) {
254 struct pipe_vertex_element *elem = &vtx->pipe[i];
255 struct pipe_vertex_buffer *vb =
256 &vertexbuf->vb[elem->vertex_buffer_index];
257 struct vc5_resource *rsc = vc5_resource(vb->buffer.resource);
258
259 const uint32_t size =
260 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
261 cl_emit_with_prepacked(&job->indirect,
262 GL_SHADER_STATE_ATTRIBUTE_RECORD,
263 &vtx->attrs[i * size], attr) {
264 attr.stride = vb->stride;
265 attr.address = cl_address(rsc->bo,
266 vb->buffer_offset +
267 elem->src_offset);
268 attr.number_of_values_read_by_coordinate_shader =
269 vc5->prog.cs->prog_data.vs->vattr_sizes[i];
270 attr.number_of_values_read_by_vertex_shader =
271 vc5->prog.vs->prog_data.vs->vattr_sizes[i];
272 #if V3D_VERSION >= 41
273 attr.maximum_index = 0xffffff;
274 #endif
275 }
276 }
277
278 if (vtx->num_elements == 0) {
279 /* GFXH-930: At least one attribute must be enabled and read
280 * by CS and VS. If we have no attributes being consumed by
281 * the shader, set up a dummy to be loaded into the VPM.
282 */
283 cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
284 /* Valid address of data whose value will be unused. */
285 attr.address = cl_address(job->indirect.bo, 0);
286
287 attr.type = ATTRIBUTE_FLOAT;
288 attr.stride = 0;
289 attr.vec_size = 1;
290
291 attr.number_of_values_read_by_coordinate_shader = 1;
292 attr.number_of_values_read_by_vertex_shader = 1;
293 }
294 }
295
296 cl_emit(&job->bcl, GL_SHADER_STATE, state) {
297 state.address = cl_address(job->indirect.bo, shader_rec_offset);
298 state.number_of_attribute_arrays = num_elements_to_emit;
299 }
300
301 vc5_bo_unreference(&cs_uniforms.bo);
302 vc5_bo_unreference(&vs_uniforms.bo);
303 vc5_bo_unreference(&fs_uniforms.bo);
304
305 job->shader_rec_count++;
306 }
307
308 /**
309 * Computes the various transform feedback statistics, since they can't be
310 * recorded by CL packets.
311 */
312 static void
313 vc5_tf_statistics_record(struct vc5_context *vc5,
314 const struct pipe_draw_info *info,
315 bool prim_tf)
316 {
317 if (!vc5->active_queries)
318 return;
319
320 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
321 vc5->prims_generated += prims;
322
323 if (prim_tf) {
324 /* XXX: Only count if we didn't overflow. */
325 vc5->tf_prims_generated += prims;
326 }
327 }
328
329 static void
330 vc5_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
331 {
332 struct vc5_context *vc5 = vc5_context(pctx);
333
334 if (!info->count_from_stream_output && !info->indirect &&
335 !info->primitive_restart &&
336 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
337 return;
338
339 /* Fall back for weird desktop GL primitive restart values. */
340 if (info->primitive_restart &&
341 info->index_size) {
342 uint32_t mask = ~0;
343
344 switch (info->index_size) {
345 case 2:
346 mask = 0xffff;
347 break;
348 case 1:
349 mask = 0xff;
350 break;
351 }
352
353 if (info->restart_index != mask) {
354 util_draw_vbo_without_prim_restart(pctx, info);
355 return;
356 }
357 }
358
359 if (info->mode >= PIPE_PRIM_QUADS) {
360 util_primconvert_save_rasterizer_state(vc5->primconvert, &vc5->rasterizer->base);
361 util_primconvert_draw_vbo(vc5->primconvert, info);
362 perf_debug("Fallback conversion for %d %s vertices\n",
363 info->count, u_prim_name(info->mode));
364 return;
365 }
366
367 /* Before setting up the draw, flush anything writing to the textures
368 * that we read from.
369 */
370 vc5_predraw_check_textures(pctx, &vc5->verttex);
371 vc5_predraw_check_textures(pctx, &vc5->fragtex);
372
373 struct vc5_job *job = vc5_get_job_for_fbo(vc5);
374
375 /* Get space to emit our draw call into the BCL, using a branch to
376 * jump to a new BO if necessary.
377 */
378 vc5_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
379
380 if (vc5->prim_mode != info->mode) {
381 vc5->prim_mode = info->mode;
382 vc5->dirty |= VC5_DIRTY_PRIM_MODE;
383 }
384
385 vc5_start_draw(vc5);
386 vc5_update_compiled_shaders(vc5, info->mode);
387
388 vc5_emit_state(pctx);
389
390 if (vc5->dirty & (VC5_DIRTY_VTXBUF |
391 VC5_DIRTY_VTXSTATE |
392 VC5_DIRTY_PRIM_MODE |
393 VC5_DIRTY_RASTERIZER |
394 VC5_DIRTY_COMPILED_CS |
395 VC5_DIRTY_COMPILED_VS |
396 VC5_DIRTY_COMPILED_FS |
397 vc5->prog.cs->uniform_dirty_bits |
398 vc5->prog.vs->uniform_dirty_bits |
399 vc5->prog.fs->uniform_dirty_bits)) {
400 vc5_emit_gl_shader_state(vc5, info);
401 }
402
403 vc5->dirty = 0;
404
405 /* The Base Vertex/Base Instance packet sets those values to nonzero
406 * for the next draw call only.
407 */
408 if (info->index_bias || info->start_instance) {
409 cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
410 base.base_instance = info->start_instance;
411 base.base_vertex = info->index_bias;
412 }
413 }
414
415 uint32_t prim_tf_enable = 0;
416 #if V3D_VERSION < 40
417 /* V3D 3.x: The HW only processes transform feedback on primitives
418 * with the flag set.
419 */
420 if (vc5->streamout.num_targets)
421 prim_tf_enable = (V3D_PRIM_POINTS_TF - V3D_PRIM_POINTS);
422 #endif
423
424 vc5_tf_statistics_record(vc5, info, vc5->streamout.num_targets);
425
426 /* Note that the primitive type fields match with OpenGL/gallium
427 * definitions, up to but not including QUADS.
428 */
429 if (info->index_size) {
430 uint32_t index_size = info->index_size;
431 uint32_t offset = info->start * index_size;
432 struct pipe_resource *prsc;
433 if (info->has_user_indices) {
434 prsc = NULL;
435 u_upload_data(vc5->uploader, 0,
436 info->count * info->index_size, 4,
437 info->index.user,
438 &offset, &prsc);
439 } else {
440 prsc = info->index.resource;
441 }
442 struct vc5_resource *rsc = vc5_resource(prsc);
443
444 #if V3D_VERSION >= 40
445 cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
446 ib.address = cl_address(rsc->bo, 0);
447 ib.size = rsc->bo->size;
448 }
449 #endif
450
451 if (info->instance_count > 1) {
452 cl_emit(&job->bcl, INDEXED_INSTANCED_PRIMITIVE_LIST, prim) {
453 prim.index_type = ffs(info->index_size) - 1;
454 #if V3D_VERSION >= 40
455 prim.index_offset = offset;
456 #else /* V3D_VERSION < 40 */
457 prim.maximum_index = (1u << 31) - 1; /* XXX */
458 prim.address_of_indices_list =
459 cl_address(rsc->bo, offset);
460 #endif /* V3D_VERSION < 40 */
461 prim.mode = info->mode | prim_tf_enable;
462 prim.enable_primitive_restarts = info->primitive_restart;
463
464 prim.number_of_instances = info->instance_count;
465 prim.instance_length = info->count;
466 }
467 } else {
468 cl_emit(&job->bcl, INDEXED_PRIMITIVE_LIST, prim) {
469 prim.index_type = ffs(info->index_size) - 1;
470 prim.length = info->count;
471 #if V3D_VERSION >= 40
472 prim.index_offset = offset;
473 #else /* V3D_VERSION < 40 */
474 prim.maximum_index = (1u << 31) - 1; /* XXX */
475 prim.address_of_indices_list =
476 cl_address(rsc->bo, offset);
477 #endif /* V3D_VERSION < 40 */
478 prim.mode = info->mode | prim_tf_enable;
479 prim.enable_primitive_restarts = info->primitive_restart;
480 }
481 }
482
483 job->draw_calls_queued++;
484
485 if (info->has_user_indices)
486 pipe_resource_reference(&prsc, NULL);
487 } else {
488 if (info->instance_count > 1) {
489 cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMITIVES, prim) {
490 prim.mode = info->mode | prim_tf_enable;
491 prim.index_of_first_vertex = info->start;
492 prim.number_of_instances = info->instance_count;
493 prim.instance_length = info->count;
494 }
495 } else {
496 cl_emit(&job->bcl, VERTEX_ARRAY_PRIMITIVES, prim) {
497 prim.mode = info->mode | prim_tf_enable;
498 prim.length = info->count;
499 prim.index_of_first_vertex = info->start;
500 }
501 }
502 }
503 job->draw_calls_queued++;
504
505 if (vc5->zsa && job->zsbuf &&
506 (vc5->zsa->base.depth.enabled ||
507 vc5->zsa->base.stencil[0].enabled)) {
508 struct vc5_resource *rsc = vc5_resource(job->zsbuf->texture);
509 vc5_job_add_bo(job, rsc->bo);
510
511 if (vc5->zsa->base.depth.enabled) {
512 job->resolve |= PIPE_CLEAR_DEPTH;
513 rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
514
515 if (vc5->zsa->early_z_enable)
516 job->uses_early_z = true;
517 }
518
519 if (vc5->zsa->base.stencil[0].enabled) {
520 job->resolve |= PIPE_CLEAR_STENCIL;
521 rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
522 }
523 }
524
525 for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
526 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
527
528 if (job->resolve & bit || !job->cbufs[i])
529 continue;
530 struct vc5_resource *rsc = vc5_resource(job->cbufs[i]->texture);
531
532 job->resolve |= bit;
533 vc5_job_add_bo(job, rsc->bo);
534 }
535
536 if (job->referenced_size > 768 * 1024 * 1024) {
537 perf_debug("Flushing job with %dkb to try to free up memory\n",
538 job->referenced_size / 1024);
539 vc5_flush(pctx);
540 }
541
542 if (V3D_DEBUG & V3D_DEBUG_ALWAYS_FLUSH)
543 vc5_flush(pctx);
544 }
545
546 static void
547 vc5_clear(struct pipe_context *pctx, unsigned buffers,
548 const union pipe_color_union *color, double depth, unsigned stencil)
549 {
550 struct vc5_context *vc5 = vc5_context(pctx);
551 struct vc5_job *job = vc5_get_job_for_fbo(vc5);
552
553 /* We can't flag new buffers for clearing once we've queued draws. We
554 * could avoid this by using the 3d engine to clear.
555 */
556 if (job->draw_calls_queued) {
557 perf_debug("Flushing rendering to process new clear.\n");
558 vc5_job_submit(vc5, job);
559 job = vc5_get_job_for_fbo(vc5);
560 }
561
562 for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
563 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
564 if (!(buffers & bit))
565 continue;
566
567 struct pipe_surface *psurf = vc5->framebuffer.cbufs[i];
568 struct vc5_surface *surf = vc5_surface(psurf);
569 struct vc5_resource *rsc = vc5_resource(psurf->texture);
570
571 union util_color uc;
572 uint32_t internal_size = 4 << surf->internal_bpp;
573
574 static union pipe_color_union swapped_color;
575 if (vc5->swap_color_rb & (1 << i)) {
576 swapped_color.f[0] = color->f[2];
577 swapped_color.f[1] = color->f[1];
578 swapped_color.f[2] = color->f[0];
579 swapped_color.f[3] = color->f[3];
580 color = &swapped_color;
581 }
582
583 switch (surf->internal_type) {
584 case V3D_INTERNAL_TYPE_8:
585 if (surf->format == PIPE_FORMAT_B4G4R4A4_UNORM ||
586 surf->format == PIPE_FORMAT_B4G4R4A4_UNORM) {
587 /* Our actual hardware layout is ABGR4444, but
588 * we apply a swizzle when texturing to flip
589 * things back around.
590 */
591 util_pack_color(color->f, PIPE_FORMAT_A8R8G8B8_UNORM,
592 &uc);
593 } else {
594 util_pack_color(color->f, PIPE_FORMAT_R8G8B8A8_UNORM,
595 &uc);
596 }
597 memcpy(job->clear_color[i], uc.ui, internal_size);
598 break;
599 case V3D_INTERNAL_TYPE_8I:
600 case V3D_INTERNAL_TYPE_8UI:
601 job->clear_color[i][0] = ((uc.ui[0] & 0xff) |
602 (uc.ui[1] & 0xff) << 8 |
603 (uc.ui[2] & 0xff) << 16 |
604 (uc.ui[3] & 0xff) << 24);
605 break;
606 case V3D_INTERNAL_TYPE_16F:
607 util_pack_color(color->f, PIPE_FORMAT_R16G16B16A16_FLOAT,
608 &uc);
609 memcpy(job->clear_color[i], uc.ui, internal_size);
610 break;
611 case V3D_INTERNAL_TYPE_16I:
612 case V3D_INTERNAL_TYPE_16UI:
613 job->clear_color[i][0] = ((uc.ui[0] & 0xffff) |
614 uc.ui[1] << 16);
615 job->clear_color[i][1] = ((uc.ui[2] & 0xffff) |
616 uc.ui[3] << 16);
617 break;
618 case V3D_INTERNAL_TYPE_32F:
619 case V3D_INTERNAL_TYPE_32I:
620 case V3D_INTERNAL_TYPE_32UI:
621 memcpy(job->clear_color[i], color->ui, internal_size);
622 break;
623 }
624
625 rsc->initialized_buffers |= bit;
626 }
627
628 unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
629 if (zsclear) {
630 struct vc5_resource *rsc =
631 vc5_resource(vc5->framebuffer.zsbuf->texture);
632
633 if (zsclear & PIPE_CLEAR_DEPTH)
634 job->clear_z = depth;
635 if (zsclear & PIPE_CLEAR_STENCIL)
636 job->clear_s = stencil;
637
638 rsc->initialized_buffers |= zsclear;
639 }
640
641 job->draw_min_x = 0;
642 job->draw_min_y = 0;
643 job->draw_max_x = vc5->framebuffer.width;
644 job->draw_max_y = vc5->framebuffer.height;
645 job->cleared |= buffers;
646 job->resolve |= buffers;
647
648 vc5_start_draw(vc5);
649 }
650
651 static void
652 vc5_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
653 const union pipe_color_union *color,
654 unsigned x, unsigned y, unsigned w, unsigned h,
655 bool render_condition_enabled)
656 {
657 fprintf(stderr, "unimpl: clear RT\n");
658 }
659
660 static void
661 vc5_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
662 unsigned buffers, double depth, unsigned stencil,
663 unsigned x, unsigned y, unsigned w, unsigned h,
664 bool render_condition_enabled)
665 {
666 fprintf(stderr, "unimpl: clear DS\n");
667 }
668
669 void
670 v3dX(draw_init)(struct pipe_context *pctx)
671 {
672 pctx->draw_vbo = vc5_draw_vbo;
673 pctx->clear = vc5_clear;
674 pctx->clear_render_target = vc5_clear_render_target;
675 pctx->clear_depth_stencil = vc5_clear_depth_stencil;
676 }