v3d: Add a separate flag for CLIF ABI output versus human-readable CLs.
[mesa.git] / src / gallium / drivers / vc4 / vc4_draw.c
1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "util/u_blitter.h"
26 #include "util/u_prim.h"
27 #include "util/u_format.h"
28 #include "util/u_pack_color.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
31
32 #include "vc4_context.h"
33 #include "vc4_resource.h"
34
35 #define VC4_HW_2116_COUNT 0x1ef0
36
37 static void
38 vc4_get_draw_cl_space(struct vc4_job *job, int vert_count)
39 {
40 /* The SW-5891 workaround may cause us to emit multiple shader recs
41 * and draw packets.
42 */
43 int num_draws = DIV_ROUND_UP(vert_count, 65535 - 2) + 1;
44
45 /* Binner gets our packet state -- vc4_emit.c contents,
46 * and the primitive itself.
47 */
48 cl_ensure_space(&job->bcl,
49 256 + (VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE +
50 VC4_PACKET_GL_SHADER_STATE_SIZE) * num_draws);
51
52 /* Nothing for rcl -- that's covered by vc4_context.c */
53
54 /* shader_rec gets up to 12 dwords of reloc handles plus a maximally
55 * sized shader_rec (104 bytes base for 8 vattrs plus 32 bytes of
56 * vattr stride).
57 */
58 cl_ensure_space(&job->shader_rec,
59 (12 * sizeof(uint32_t) + 104 + 8 * 32) * num_draws);
60
61 /* Uniforms are covered by vc4_write_uniforms(). */
62
63 /* There could be up to 16 textures per stage, plus misc other
64 * pointers.
65 */
66 cl_ensure_space(&job->bo_handles, (2 * 16 + 20) * sizeof(uint32_t));
67 cl_ensure_space(&job->bo_pointers,
68 (2 * 16 + 20) * sizeof(struct vc4_bo *));
69 }
70
71 /**
72 * Does the initial bining command list setup for drawing to a given FBO.
73 */
74 static void
75 vc4_start_draw(struct vc4_context *vc4)
76 {
77 struct vc4_job *job = vc4->job;
78
79 if (job->needs_flush)
80 return;
81
82 vc4_get_draw_cl_space(job, 0);
83
84 cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION, bin) {
85 bin.width_in_tiles = job->draw_tiles_x;
86 bin.height_in_tiles = job->draw_tiles_y;
87 bin.multisample_mode_4x = job->msaa;
88 }
89
90 /* START_TILE_BINNING resets the statechange counters in the hardware,
91 * which are what is used when a primitive is binned to a tile to
92 * figure out what new state packets need to be written to that tile's
93 * command list.
94 */
95 cl_emit(&job->bcl, START_TILE_BINNING, start);
96
97 /* Reset the current compressed primitives format. This gets modified
98 * by VC4_PACKET_GL_INDEXED_PRIMITIVE and
99 * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start
100 * of every tile.
101 */
102 cl_emit(&job->bcl, PRIMITIVE_LIST_FORMAT, list) {
103 list.data_type = _16_BIT_INDEX;
104 list.primitive_type = TRIANGLES_LIST;
105 }
106
107 job->needs_flush = true;
108 job->draw_width = vc4->framebuffer.width;
109 job->draw_height = vc4->framebuffer.height;
110 }
111
112 static void
113 vc4_predraw_check_textures(struct pipe_context *pctx,
114 struct vc4_texture_stateobj *stage_tex)
115 {
116 struct vc4_context *vc4 = vc4_context(pctx);
117
118 for (int i = 0; i < stage_tex->num_textures; i++) {
119 struct vc4_sampler_view *view =
120 vc4_sampler_view(stage_tex->textures[i]);
121 if (!view)
122 continue;
123
124 if (view->texture != view->base.texture)
125 vc4_update_shadow_baselevel_texture(pctx, &view->base);
126
127 vc4_flush_jobs_writing_resource(vc4, view->texture);
128 }
129 }
130
131 static void
132 vc4_emit_gl_shader_state(struct vc4_context *vc4,
133 const struct pipe_draw_info *info,
134 uint32_t extra_index_bias)
135 {
136 struct vc4_job *job = vc4->job;
137 /* VC4_DIRTY_VTXSTATE */
138 struct vc4_vertex_stateobj *vtx = vc4->vtx;
139 /* VC4_DIRTY_VTXBUF */
140 struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf;
141
142 /* The simulator throws a fit if VS or CS don't read an attribute, so
143 * we emit a dummy read.
144 */
145 uint32_t num_elements_emit = MAX2(vtx->num_elements, 1);
146
147 /* Emit the shader record. */
148 cl_start_shader_reloc(&job->shader_rec, 3 + num_elements_emit);
149
150 cl_emit(&job->shader_rec, SHADER_RECORD, rec) {
151 rec.enable_clipping = true;
152
153 /* VC4_DIRTY_COMPILED_FS */
154 rec.fragment_shader_is_single_threaded =
155 !vc4->prog.fs->fs_threaded;
156
157 /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */
158 rec.point_size_included_in_shaded_vertex_data =
159 (info->mode == PIPE_PRIM_POINTS &&
160 vc4->rasterizer->base.point_size_per_vertex);
161
162 /* VC4_DIRTY_COMPILED_FS */
163 rec.fragment_shader_number_of_varyings =
164 vc4->prog.fs->num_inputs;
165 rec.fragment_shader_code_address =
166 cl_address(vc4->prog.fs->bo, 0);
167
168 rec.coordinate_shader_attribute_array_select_bits =
169 vc4->prog.cs->vattrs_live;
170 rec.coordinate_shader_total_attributes_size =
171 vc4->prog.cs->vattr_offsets[8];
172 rec.coordinate_shader_code_address =
173 cl_address(vc4->prog.cs->bo, 0);
174
175 rec.vertex_shader_attribute_array_select_bits =
176 vc4->prog.vs->vattrs_live;
177 rec.vertex_shader_total_attributes_size =
178 vc4->prog.vs->vattr_offsets[8];
179 rec.vertex_shader_code_address =
180 cl_address(vc4->prog.vs->bo, 0);
181 };
182
183 uint32_t max_index = 0xffff;
184 for (int i = 0; i < vtx->num_elements; i++) {
185 struct pipe_vertex_element *elem = &vtx->pipe[i];
186 struct pipe_vertex_buffer *vb =
187 &vertexbuf->vb[elem->vertex_buffer_index];
188 struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
189 /* not vc4->dirty tracked: vc4->last_index_bias */
190 uint32_t offset = (vb->buffer_offset +
191 elem->src_offset +
192 vb->stride * (info->index_bias +
193 extra_index_bias));
194 uint32_t vb_size = rsc->bo->size - offset;
195 uint32_t elem_size =
196 util_format_get_blocksize(elem->src_format);
197
198 cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
199 attr.address = cl_address(rsc->bo, offset);
200 attr.number_of_bytes_minus_1 = elem_size - 1;
201 attr.stride = vb->stride;
202 attr.coordinate_shader_vpm_offset =
203 vc4->prog.cs->vattr_offsets[i];
204 attr.vertex_shader_vpm_offset =
205 vc4->prog.vs->vattr_offsets[i];
206 }
207
208 if (vb->stride > 0) {
209 max_index = MIN2(max_index,
210 (vb_size - elem_size) / vb->stride);
211 }
212 }
213
214 if (vtx->num_elements == 0) {
215 assert(num_elements_emit == 1);
216 struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO");
217
218 cl_emit(&job->shader_rec, ATTRIBUTE_RECORD, attr) {
219 attr.address = cl_address(bo, 0);
220 attr.number_of_bytes_minus_1 = 16 - 1;
221 attr.stride = 0;
222 attr.coordinate_shader_vpm_offset = 0;
223 attr.vertex_shader_vpm_offset = 0;
224 }
225 }
226
227 cl_emit(&job->bcl, GL_SHADER_STATE, shader_state) {
228 /* Note that number of attributes == 0 in the packet means 8
229 * attributes. This field also contains the offset into
230 * shader_rec.
231 */
232 assert(vtx->num_elements <= 8);
233 shader_state.number_of_attribute_arrays =
234 num_elements_emit & 0x7;
235 }
236
237 vc4_write_uniforms(vc4, vc4->prog.fs,
238 &vc4->constbuf[PIPE_SHADER_FRAGMENT],
239 &vc4->fragtex);
240 vc4_write_uniforms(vc4, vc4->prog.vs,
241 &vc4->constbuf[PIPE_SHADER_VERTEX],
242 &vc4->verttex);
243 vc4_write_uniforms(vc4, vc4->prog.cs,
244 &vc4->constbuf[PIPE_SHADER_VERTEX],
245 &vc4->verttex);
246
247 vc4->last_index_bias = info->index_bias + extra_index_bias;
248 vc4->max_index = max_index;
249 job->shader_rec_count++;
250 }
251
252 /**
253 * HW-2116 workaround: Flush the batch before triggering the hardware state
254 * counter wraparound behavior.
255 *
256 * State updates are tracked by a global counter which increments at the first
257 * state update after a draw or a START_BINNING. Tiles can then have their
258 * state updated at draw time with a set of cheap checks for whether the
259 * state's copy of the global counter matches the global counter the last time
260 * that state was written to the tile.
261 *
262 * The state counters are relatively small and wrap around quickly, so you
263 * could get false negatives for needing to update a particular state in the
264 * tile. To avoid this, the hardware attempts to write all of the state in
265 * the tile at wraparound time. This apparently is broken, so we just flush
266 * everything before that behavior is triggered. A batch flush is sufficient
267 * to get our current contents drawn and reset the counters to 0.
268 *
269 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
270 * tiles with VC4_PACKET_RETURN_FROM_LIST.
271 */
272 static void
273 vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
274 {
275 struct vc4_context *vc4 = vc4_context(pctx);
276 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
277
278 if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
279 perf_debug("Flushing batch due to HW-2116 workaround "
280 "(too many draw calls per scene\n");
281 vc4_job_submit(vc4, job);
282 }
283 }
284
285 static void
286 vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
287 {
288 struct vc4_context *vc4 = vc4_context(pctx);
289 struct pipe_draw_info local_info;
290
291 if (!info->count_from_stream_output && !info->indirect &&
292 !info->primitive_restart &&
293 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
294 return;
295
296 if (info->mode >= PIPE_PRIM_QUADS) {
297 if (info->mode == PIPE_PRIM_QUADS &&
298 info->count == 4 &&
299 !vc4->rasterizer->base.flatshade) {
300 local_info = *info;
301 local_info.mode = PIPE_PRIM_TRIANGLE_FAN;
302 info = &local_info;
303 } else {
304 util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base);
305 util_primconvert_draw_vbo(vc4->primconvert, info);
306 perf_debug("Fallback conversion for %d %s vertices\n",
307 info->count, u_prim_name(info->mode));
308 return;
309 }
310 }
311
312 /* Before setting up the draw, do any fixup blits necessary. */
313 vc4_predraw_check_textures(pctx, &vc4->verttex);
314 vc4_predraw_check_textures(pctx, &vc4->fragtex);
315
316 vc4_hw_2116_workaround(pctx, info->count);
317
318 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
319
320 /* Make sure that the raster order flags haven't changed, which can
321 * only be set at job granularity.
322 */
323 if (job->flags != vc4->rasterizer->tile_raster_order_flags) {
324 vc4_job_submit(vc4, job);
325 job = vc4_get_job_for_fbo(vc4);
326 }
327
328 vc4_get_draw_cl_space(job, info->count);
329
330 if (vc4->prim_mode != info->mode) {
331 vc4->prim_mode = info->mode;
332 vc4->dirty |= VC4_DIRTY_PRIM_MODE;
333 }
334
335 vc4_start_draw(vc4);
336 if (!vc4_update_compiled_shaders(vc4, info->mode)) {
337 debug_warn_once("shader compile failed, skipping draw call.\n");
338 return;
339 }
340
341 vc4_emit_state(pctx);
342
343 bool needs_drawarrays_shader_state = false;
344 if ((vc4->dirty & (VC4_DIRTY_VTXBUF |
345 VC4_DIRTY_VTXSTATE |
346 VC4_DIRTY_PRIM_MODE |
347 VC4_DIRTY_RASTERIZER |
348 VC4_DIRTY_COMPILED_CS |
349 VC4_DIRTY_COMPILED_VS |
350 VC4_DIRTY_COMPILED_FS |
351 vc4->prog.cs->uniform_dirty_bits |
352 vc4->prog.vs->uniform_dirty_bits |
353 vc4->prog.fs->uniform_dirty_bits)) ||
354 vc4->last_index_bias != info->index_bias) {
355 if (info->index_size)
356 vc4_emit_gl_shader_state(vc4, info, 0);
357 else
358 needs_drawarrays_shader_state = true;
359 }
360
361 vc4->dirty = 0;
362
363 /* Note that the primitive type fields match with OpenGL/gallium
364 * definitions, up to but not including QUADS.
365 */
366 if (info->index_size) {
367 uint32_t index_size = info->index_size;
368 uint32_t offset = info->start * index_size;
369 struct pipe_resource *prsc;
370 if (info->index_size == 4) {
371 prsc = vc4_get_shadow_index_buffer(pctx, info,
372 offset,
373 info->count, &offset);
374 index_size = 2;
375 } else {
376 if (info->has_user_indices) {
377 prsc = NULL;
378 u_upload_data(vc4->uploader, 0,
379 info->count * index_size, 4,
380 info->index.user,
381 &offset, &prsc);
382 } else {
383 prsc = info->index.resource;
384 }
385 }
386 struct vc4_resource *rsc = vc4_resource(prsc);
387
388 struct vc4_cl_out *bcl = cl_start(&job->bcl);
389
390 /* The original design for the VC4 kernel UABI had multiple
391 * packets that used relocations in the BCL (some of which
392 * needed two BOs), but later modifications eliminated all but
393 * this one usage. We have an arbitrary 32-bit offset value,
394 * and need to also supply an arbitrary 32-bit index buffer
395 * GEM handle, so we have this fake packet we emit in our BCL
396 * to be validated, which the kernel uses at validation time
397 * to perform the relocation in the IB packet (without
398 * emitting to the actual HW).
399 */
400 uint32_t hindex = vc4_gem_hindex(job, rsc->bo);
401 if (job->last_gem_handle_hindex != hindex) {
402 cl_u8(&bcl, VC4_PACKET_GEM_HANDLES);
403 cl_u32(&bcl, hindex);
404 cl_u32(&bcl, 0);
405 job->last_gem_handle_hindex = hindex;
406 }
407
408 cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE);
409 cl_u8(&bcl,
410 info->mode |
411 (index_size == 2 ?
412 VC4_INDEX_BUFFER_U16:
413 VC4_INDEX_BUFFER_U8));
414 cl_u32(&bcl, info->count);
415 cl_u32(&bcl, offset);
416 cl_u32(&bcl, vc4->max_index);
417
418 cl_end(&job->bcl, bcl);
419 job->draw_calls_queued++;
420
421 if (info->index_size == 4 || info->has_user_indices)
422 pipe_resource_reference(&prsc, NULL);
423 } else {
424 uint32_t count = info->count;
425 uint32_t start = info->start;
426 uint32_t extra_index_bias = 0;
427 static const uint32_t max_verts = 65535;
428
429 /* GFXH-515 / SW-5891: The binner emits 16 bit indices for
430 * drawarrays, which means that if start + count > 64k it
431 * would truncate the top bits. Work around this by emitting
432 * a limited number of primitives at a time and reemitting the
433 * shader state pointing farther down the vertex attribute
434 * arrays.
435 *
436 * To do this properly for line loops or trifans, we'd need to
437 * make a new VB containing the first vertex plus whatever
438 * remainder.
439 */
440 if (start + count > max_verts) {
441 extra_index_bias = start;
442 start = 0;
443 needs_drawarrays_shader_state = true;
444 }
445
446 while (count) {
447 uint32_t this_count = count;
448 uint32_t step = count;
449
450 if (needs_drawarrays_shader_state) {
451 vc4_emit_gl_shader_state(vc4, info,
452 extra_index_bias);
453 }
454
455 if (count > max_verts) {
456 switch (info->mode) {
457 case PIPE_PRIM_POINTS:
458 this_count = step = max_verts;
459 break;
460 case PIPE_PRIM_LINES:
461 this_count = step = max_verts - (max_verts % 2);
462 break;
463 case PIPE_PRIM_LINE_STRIP:
464 this_count = max_verts;
465 step = max_verts - 1;
466 break;
467 case PIPE_PRIM_LINE_LOOP:
468 this_count = max_verts;
469 step = max_verts - 1;
470 debug_warn_once("unhandled line loop "
471 "looping behavior with "
472 ">65535 verts\n");
473 break;
474 case PIPE_PRIM_TRIANGLES:
475 this_count = step = max_verts - (max_verts % 3);
476 break;
477 case PIPE_PRIM_TRIANGLE_STRIP:
478 this_count = max_verts;
479 step = max_verts - 2;
480 break;
481 default:
482 debug_warn_once("unhandled primitive "
483 "max vert count, truncating\n");
484 this_count = step = max_verts;
485 }
486 }
487
488 cl_emit(&job->bcl, VERTEX_ARRAY_PRIMITIVES, array) {
489 array.primitive_mode = info->mode;
490 array.length = this_count;
491 array.index_of_first_vertex = start;
492 }
493 job->draw_calls_queued++;
494
495 count -= step;
496 extra_index_bias += start + step;
497 start = 0;
498 needs_drawarrays_shader_state = true;
499 }
500 }
501
502 /* We shouldn't have tripped the HW_2116 bug with the GFXH-515
503 * workaround.
504 */
505 assert(job->draw_calls_queued <= VC4_HW_2116_COUNT);
506
507 if (vc4->zsa && vc4->framebuffer.zsbuf) {
508 struct vc4_resource *rsc =
509 vc4_resource(vc4->framebuffer.zsbuf->texture);
510
511 if (vc4->zsa->base.depth.enabled) {
512 job->resolve |= PIPE_CLEAR_DEPTH;
513 rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
514 }
515
516 if (vc4->zsa->base.stencil[0].enabled) {
517 job->resolve |= PIPE_CLEAR_STENCIL;
518 rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
519 }
520 }
521
522 job->resolve |= PIPE_CLEAR_COLOR0;
523
524 /* If we've used half of the presumably 256MB CMA area, flush the job
525 * so that we don't accumulate a job that will end up not being
526 * executable.
527 */
528 if (job->bo_space > 128 * 1024 * 1024)
529 vc4_flush(pctx);
530
531 if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH)
532 vc4_flush(pctx);
533 }
534
535 static uint32_t
536 pack_rgba(enum pipe_format format, const float *rgba)
537 {
538 union util_color uc;
539 util_pack_color(rgba, format, &uc);
540 if (util_format_get_blocksize(format) == 2)
541 return uc.us;
542 else
543 return uc.ui[0];
544 }
545
546 static void
547 vc4_clear(struct pipe_context *pctx, unsigned buffers,
548 const union pipe_color_union *color, double depth, unsigned stencil)
549 {
550 struct vc4_context *vc4 = vc4_context(pctx);
551 struct vc4_job *job = vc4_get_job_for_fbo(vc4);
552
553 if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
554 struct vc4_resource *rsc =
555 vc4_resource(vc4->framebuffer.zsbuf->texture);
556 unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
557
558 /* Clearing ZS will clear both Z and stencil, so if we're
559 * trying to clear just one then we need to draw a quad to do
560 * it instead. We need to do this before setting up
561 * tile-based clears in vc4->job, because the blitter may
562 * submit the current job.
563 */
564 if ((zsclear == PIPE_CLEAR_DEPTH ||
565 zsclear == PIPE_CLEAR_STENCIL) &&
566 (rsc->initialized_buffers & ~(zsclear | job->cleared)) &&
567 util_format_is_depth_and_stencil(vc4->framebuffer.zsbuf->format)) {
568 static const union pipe_color_union dummy_color = {};
569
570 perf_debug("Partial clear of Z+stencil buffer, "
571 "drawing a quad instead of fast clearing\n");
572 vc4_blitter_save(vc4);
573 util_blitter_clear(vc4->blitter,
574 vc4->framebuffer.width,
575 vc4->framebuffer.height,
576 1,
577 zsclear,
578 &dummy_color, depth, stencil);
579 buffers &= ~zsclear;
580 if (!buffers)
581 return;
582 job = vc4_get_job_for_fbo(vc4);
583 }
584 }
585
586 /* We can't flag new buffers for clearing once we've queued draws. We
587 * could avoid this by using the 3d engine to clear.
588 */
589 if (job->draw_calls_queued) {
590 perf_debug("Flushing rendering to process new clear.\n");
591 vc4_job_submit(vc4, job);
592 job = vc4_get_job_for_fbo(vc4);
593 }
594
595 if (buffers & PIPE_CLEAR_COLOR0) {
596 struct vc4_resource *rsc =
597 vc4_resource(vc4->framebuffer.cbufs[0]->texture);
598 uint32_t clear_color;
599
600 if (vc4_rt_format_is_565(vc4->framebuffer.cbufs[0]->format)) {
601 /* In 565 mode, the hardware will be packing our color
602 * for us.
603 */
604 clear_color = pack_rgba(PIPE_FORMAT_R8G8B8A8_UNORM,
605 color->f);
606 } else {
607 /* Otherwise, we need to do this packing because we
608 * support multiple swizzlings of RGBA8888.
609 */
610 clear_color =
611 pack_rgba(vc4->framebuffer.cbufs[0]->format,
612 color->f);
613 }
614 job->clear_color[0] = job->clear_color[1] = clear_color;
615 rsc->initialized_buffers |= (buffers & PIPE_CLEAR_COLOR0);
616 }
617
618 if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
619 struct vc4_resource *rsc =
620 vc4_resource(vc4->framebuffer.zsbuf->texture);
621
622 /* Though the depth buffer is stored with Z in the high 24,
623 * for this field we just need to store it in the low 24.
624 */
625 if (buffers & PIPE_CLEAR_DEPTH) {
626 job->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
627 depth);
628 }
629 if (buffers & PIPE_CLEAR_STENCIL)
630 job->clear_stencil = stencil;
631
632 rsc->initialized_buffers |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
633 }
634
635 job->draw_min_x = 0;
636 job->draw_min_y = 0;
637 job->draw_max_x = vc4->framebuffer.width;
638 job->draw_max_y = vc4->framebuffer.height;
639 job->cleared |= buffers;
640 job->resolve |= buffers;
641
642 vc4_start_draw(vc4);
643 }
644
645 static void
646 vc4_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
647 const union pipe_color_union *color,
648 unsigned x, unsigned y, unsigned w, unsigned h,
649 bool render_condition_enabled)
650 {
651 fprintf(stderr, "unimpl: clear RT\n");
652 }
653
654 static void
655 vc4_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
656 unsigned buffers, double depth, unsigned stencil,
657 unsigned x, unsigned y, unsigned w, unsigned h,
658 bool render_condition_enabled)
659 {
660 fprintf(stderr, "unimpl: clear DS\n");
661 }
662
663 void
664 vc4_draw_init(struct pipe_context *pctx)
665 {
666 pctx->draw_vbo = vc4_draw_vbo;
667 pctx->clear = vc4_clear;
668 pctx->clear_render_target = vc4_clear_render_target;
669 pctx->clear_depth_stencil = vc4_clear_depth_stencil;
670 }