gallium: Add pipe cap for primitive restart with fixed index
[mesa.git] / src / gallium / drivers / v3d / v3dx_draw.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/u_blitter.h"
25 #include "util/u_prim.h"
26 #include "util/format/u_format.h"
27 #include "util/u_pack_color.h"
28 #include "util/u_prim_restart.h"
29 #include "util/u_upload_mgr.h"
30 #include "indices/u_primconvert.h"
31
32 #include "v3d_context.h"
33 #include "v3d_resource.h"
34 #include "v3d_cl.h"
35 #include "broadcom/compiler/v3d_compiler.h"
36 #include "broadcom/common/v3d_macros.h"
37 #include "broadcom/cle/v3dx_pack.h"
38
39 /**
40 * Does the initial bining command list setup for drawing to a given FBO.
41 */
42 static void
43 v3d_start_draw(struct v3d_context *v3d)
44 {
45 struct v3d_job *job = v3d->job;
46
47 if (job->needs_flush)
48 return;
49
50 /* Get space to emit our BCL state, using a branch to jump to a new BO
51 * if necessary.
52 */
53 v3d_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
54
55 job->submit.bcl_start = job->bcl.bo->offset;
56 v3d_job_add_bo(job, job->bcl.bo);
57
58 uint32_t fb_layers = util_framebuffer_get_num_layers(&v3d->framebuffer);
59
60 /* The PTB will request the tile alloc initial size per tile at start
61 * of tile binning.
62 */
63 uint32_t tile_alloc_size =
64 MAX2(fb_layers, 1) * job->draw_tiles_x * job->draw_tiles_y * 64;
65
66 /* The PTB allocates in aligned 4k chunks after the initial setup. */
67 tile_alloc_size = align(tile_alloc_size, 4096);
68
69 /* Include the first two chunk allocations that the PTB does so that
70 * we definitely clear the OOM condition before triggering one (the HW
71 * won't trigger OOM during the first allocations).
72 */
73 tile_alloc_size += 8192;
74
75 /* For performance, allocate some extra initial memory after the PTB's
76 * minimal allocations, so that we hopefully don't have to block the
77 * GPU on the kernel handling an OOM signal.
78 */
79 tile_alloc_size += 512 * 1024;
80
81 job->tile_alloc = v3d_bo_alloc(v3d->screen, tile_alloc_size,
82 "tile_alloc");
83 uint32_t tsda_per_tile_size = v3d->screen->devinfo.ver >= 40 ? 256 : 64;
84 job->tile_state = v3d_bo_alloc(v3d->screen,
85 MAX2(fb_layers, 1) *
86 job->draw_tiles_y *
87 job->draw_tiles_x *
88 tsda_per_tile_size,
89 "TSDA");
90 #if V3D_VERSION >= 41
91 /* This must go before the binning mode configuration. It is
92 * required for layered framebuffers to work.
93 */
94 if (fb_layers > 0) {
95 cl_emit(&job->bcl, NUMBER_OF_LAYERS, config) {
96 config.number_of_layers = fb_layers;
97 }
98 }
99 #endif
100
101 #if V3D_VERSION >= 40
102 cl_emit(&job->bcl, TILE_BINNING_MODE_CFG, config) {
103 config.width_in_pixels = v3d->framebuffer.width;
104 config.height_in_pixels = v3d->framebuffer.height;
105 config.number_of_render_targets =
106 MAX2(v3d->framebuffer.nr_cbufs, 1);
107
108 config.multisample_mode_4x = job->msaa;
109
110 config.maximum_bpp_of_all_render_targets = job->internal_bpp;
111 }
112 #else /* V3D_VERSION < 40 */
113 /* "Binning mode lists start with a Tile Binning Mode Configuration
114 * item (120)"
115 *
116 * Part1 signals the end of binning config setup.
117 */
118 cl_emit(&job->bcl, TILE_BINNING_MODE_CFG_PART2, config) {
119 config.tile_allocation_memory_address =
120 cl_address(job->tile_alloc, 0);
121 config.tile_allocation_memory_size = job->tile_alloc->size;
122 }
123
124 cl_emit(&job->bcl, TILE_BINNING_MODE_CFG_PART1, config) {
125 config.tile_state_data_array_base_address =
126 cl_address(job->tile_state, 0);
127
128 config.width_in_tiles = job->draw_tiles_x;
129 config.height_in_tiles = job->draw_tiles_y;
130 /* Must be >= 1 */
131 config.number_of_render_targets =
132 MAX2(v3d->framebuffer.nr_cbufs, 1);
133
134 config.multisample_mode_4x = job->msaa;
135
136 config.maximum_bpp_of_all_render_targets = job->internal_bpp;
137 }
138 #endif /* V3D_VERSION < 40 */
139
140 /* There's definitely nothing in the VCD cache we want. */
141 cl_emit(&job->bcl, FLUSH_VCD_CACHE, bin);
142
143 /* Disable any leftover OQ state from another job. */
144 cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter);
145
146 /* "Binning mode lists must have a Start Tile Binning item (6) after
147 * any prefix state data before the binning list proper starts."
148 */
149 cl_emit(&job->bcl, START_TILE_BINNING, bin);
150
151 job->needs_flush = true;
152 job->draw_width = v3d->framebuffer.width;
153 job->draw_height = v3d->framebuffer.height;
154 job->num_layers = fb_layers;
155 }
156
157 static void
158 v3d_predraw_check_stage_inputs(struct pipe_context *pctx,
159 enum pipe_shader_type s)
160 {
161 struct v3d_context *v3d = v3d_context(pctx);
162
163 /* Flush writes to textures we're sampling. */
164 for (int i = 0; i < v3d->tex[s].num_textures; i++) {
165 struct pipe_sampler_view *pview = v3d->tex[s].textures[i];
166 if (!pview)
167 continue;
168 struct v3d_sampler_view *view = v3d_sampler_view(pview);
169
170 if (view->texture != view->base.texture &&
171 view->base.format != PIPE_FORMAT_X32_S8X24_UINT)
172 v3d_update_shadow_texture(pctx, &view->base);
173
174 v3d_flush_jobs_writing_resource(v3d, view->texture,
175 V3D_FLUSH_DEFAULT,
176 s == PIPE_SHADER_COMPUTE);
177 }
178
179 /* Flush writes to UBOs. */
180 foreach_bit(i, v3d->constbuf[s].enabled_mask) {
181 struct pipe_constant_buffer *cb = &v3d->constbuf[s].cb[i];
182 if (cb->buffer) {
183 v3d_flush_jobs_writing_resource(v3d, cb->buffer,
184 V3D_FLUSH_DEFAULT,
185 s == PIPE_SHADER_COMPUTE);
186 }
187 }
188
189 /* Flush reads/writes to our SSBOs */
190 foreach_bit(i, v3d->ssbo[s].enabled_mask) {
191 struct pipe_shader_buffer *sb = &v3d->ssbo[s].sb[i];
192 if (sb->buffer) {
193 v3d_flush_jobs_reading_resource(v3d, sb->buffer,
194 V3D_FLUSH_NOT_CURRENT_JOB,
195 s == PIPE_SHADER_COMPUTE);
196 }
197 }
198
199 /* Flush reads/writes to our image views */
200 foreach_bit(i, v3d->shaderimg[s].enabled_mask) {
201 struct v3d_image_view *view = &v3d->shaderimg[s].si[i];
202
203 v3d_flush_jobs_reading_resource(v3d, view->base.resource,
204 V3D_FLUSH_NOT_CURRENT_JOB,
205 s == PIPE_SHADER_COMPUTE);
206 }
207
208 /* Flush writes to our vertex buffers (i.e. from transform feedback) */
209 if (s == PIPE_SHADER_VERTEX) {
210 foreach_bit(i, v3d->vertexbuf.enabled_mask) {
211 struct pipe_vertex_buffer *vb = &v3d->vertexbuf.vb[i];
212
213 v3d_flush_jobs_writing_resource(v3d, vb->buffer.resource,
214 V3D_FLUSH_DEFAULT,
215 false);
216 }
217 }
218 }
219
220 static void
221 v3d_predraw_check_outputs(struct pipe_context *pctx)
222 {
223 struct v3d_context *v3d = v3d_context(pctx);
224
225 /* Flush jobs reading from TF buffers that we are about to write. */
226 if (v3d_transform_feedback_enabled(v3d)) {
227 struct v3d_streamout_stateobj *so = &v3d->streamout;
228
229 for (int i = 0; i < so->num_targets; i++) {
230 if (!so->targets[i])
231 continue;
232
233 const struct pipe_stream_output_target *target =
234 so->targets[i];
235 v3d_flush_jobs_reading_resource(v3d, target->buffer,
236 V3D_FLUSH_DEFAULT,
237 false);
238 }
239 }
240 }
241
242 /**
243 * Checks if the state for the current draw reads a particular resource in
244 * in the given shader stage.
245 */
246 static bool
247 v3d_state_reads_resource(struct v3d_context *v3d,
248 struct pipe_resource *prsc,
249 enum pipe_shader_type s)
250 {
251 struct v3d_resource *rsc = v3d_resource(prsc);
252
253 /* Vertex buffers */
254 if (s == PIPE_SHADER_VERTEX) {
255 foreach_bit(i, v3d->vertexbuf.enabled_mask) {
256 struct pipe_vertex_buffer *vb = &v3d->vertexbuf.vb[i];
257 if (!vb->buffer.resource)
258 continue;
259
260 struct v3d_resource *vb_rsc =
261 v3d_resource(vb->buffer.resource);
262 if (rsc->bo == vb_rsc->bo)
263 return true;
264 }
265 }
266
267 /* Constant buffers */
268 foreach_bit(i, v3d->constbuf[s].enabled_mask) {
269 struct pipe_constant_buffer *cb = &v3d->constbuf[s].cb[i];
270 if (!cb->buffer)
271 continue;
272
273 struct v3d_resource *cb_rsc = v3d_resource(cb->buffer);
274 if (rsc->bo == cb_rsc->bo)
275 return true;
276 }
277
278 /* Shader storage buffers */
279 foreach_bit(i, v3d->ssbo[s].enabled_mask) {
280 struct pipe_shader_buffer *sb = &v3d->ssbo[s].sb[i];
281 if (!sb->buffer)
282 continue;
283
284 struct v3d_resource *sb_rsc = v3d_resource(sb->buffer);
285 if (rsc->bo == sb_rsc->bo)
286 return true;
287 }
288
289 /* Textures */
290 for (int i = 0; i < v3d->tex[s].num_textures; i++) {
291 struct pipe_sampler_view *pview = v3d->tex[s].textures[i];
292 if (!pview)
293 continue;
294
295 struct v3d_sampler_view *view = v3d_sampler_view(pview);
296 struct v3d_resource *v_rsc = v3d_resource(view->texture);
297 if (rsc->bo == v_rsc->bo)
298 return true;
299 }
300
301 return false;
302 }
303
304 static void
305 v3d_emit_wait_for_tf(struct v3d_job *job)
306 {
307 /* XXX: we might be able to skip this in some cases, for now we
308 * always emit it.
309 */
310 cl_emit(&job->bcl, FLUSH_TRANSFORM_FEEDBACK_DATA, flush);
311
312 cl_emit(&job->bcl, WAIT_FOR_TRANSFORM_FEEDBACK, wait) {
313 /* XXX: Wait for all outstanding writes... maybe we can do
314 * better in some cases.
315 */
316 wait.block_count = 255;
317 }
318
319 /* We have just flushed all our outstanding TF work in this job so make
320 * sure we don't emit TF flushes again for any of it again.
321 */
322 _mesa_set_clear(job->tf_write_prscs, NULL);
323 }
324
325 static void
326 v3d_emit_wait_for_tf_if_needed(struct v3d_context *v3d, struct v3d_job *job)
327 {
328 if (!job->tf_enabled)
329 return;
330
331 set_foreach(job->tf_write_prscs, entry) {
332 struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
333 for (int s = 0; s < PIPE_SHADER_COMPUTE; s++) {
334 /* Fragment shaders can only start executing after all
335 * binning (and thus TF) is complete.
336 *
337 * XXX: For VS/GS/TES, if the binning shader does not
338 * read the resource then we could also avoid emitting
339 * the wait.
340 */
341 if (s == PIPE_SHADER_FRAGMENT)
342 continue;
343
344 if (v3d_state_reads_resource(v3d, prsc, s)) {
345 v3d_emit_wait_for_tf(job);
346 return;
347 }
348 }
349 }
350 }
351
352 struct vpm_config {
353 uint32_t As;
354 uint32_t Vc;
355 uint32_t Gs;
356 uint32_t Gd;
357 uint32_t Gv;
358 uint32_t Ve;
359 uint32_t gs_width;
360 };
361
362 #if V3D_VERSION >= 41
363 static void
364 v3d_emit_gs_state_record(struct v3d_job *job,
365 struct v3d_compiled_shader *gs_bin,
366 struct v3d_cl_reloc gs_bin_uniforms,
367 struct v3d_compiled_shader *gs,
368 struct v3d_cl_reloc gs_render_uniforms)
369 {
370 cl_emit(&job->indirect, GEOMETRY_SHADER_STATE_RECORD, shader) {
371 shader.geometry_bin_mode_shader_code_address =
372 cl_address(v3d_resource(gs_bin->resource)->bo,
373 gs_bin->offset);
374 shader.geometry_bin_mode_shader_4_way_threadable =
375 gs_bin->prog_data.gs->base.threads == 4;
376 shader.geometry_bin_mode_shader_start_in_final_thread_section =
377 gs_bin->prog_data.gs->base.single_seg;
378 shader.geometry_bin_mode_shader_propagate_nans = true;
379 shader.geometry_bin_mode_shader_uniforms_address =
380 gs_bin_uniforms;
381
382 shader.geometry_render_mode_shader_code_address =
383 cl_address(v3d_resource(gs->resource)->bo, gs->offset);
384 shader.geometry_render_mode_shader_4_way_threadable =
385 gs->prog_data.gs->base.threads == 4;
386 shader.geometry_render_mode_shader_start_in_final_thread_section =
387 gs->prog_data.gs->base.single_seg;
388 shader.geometry_render_mode_shader_propagate_nans = true;
389 shader.geometry_render_mode_shader_uniforms_address =
390 gs_render_uniforms;
391 }
392 }
393
394 static uint8_t
395 v3d_gs_output_primitive(uint32_t prim_type)
396 {
397 switch (prim_type) {
398 case GL_POINTS:
399 return GEOMETRY_SHADER_POINTS;
400 case GL_LINE_STRIP:
401 return GEOMETRY_SHADER_LINE_STRIP;
402 case GL_TRIANGLE_STRIP:
403 return GEOMETRY_SHADER_TRI_STRIP;
404 default:
405 unreachable("Unsupported primitive type");
406 }
407 }
408
409 static void
410 v3d_emit_tes_gs_common_params(struct v3d_job *job,
411 uint8_t gs_out_prim_type,
412 uint8_t gs_num_invocations)
413 {
414 /* This, and v3d_emit_tes_gs_shader_params below, fill in default
415 * values for tessellation fields even though we don't support
416 * tessellation yet because our packing functions (and the simulator)
417 * complain if we don't.
418 */
419 cl_emit(&job->indirect, TESSELLATION_GEOMETRY_COMMON_PARAMS, shader) {
420 shader.tessellation_type = TESSELLATION_TYPE_TRIANGLE;
421 shader.tessellation_point_mode = false;
422 shader.tessellation_edge_spacing = TESSELLATION_EDGE_SPACING_EVEN;
423 shader.tessellation_clockwise = true;
424 shader.tessellation_invocations = 1;
425
426 shader.geometry_shader_output_format =
427 v3d_gs_output_primitive(gs_out_prim_type);
428 shader.geometry_shader_instances = gs_num_invocations & 0x1F;
429 }
430 }
431
432 static uint8_t
433 simd_width_to_gs_pack_mode(uint32_t width)
434 {
435 switch (width) {
436 case 16:
437 return V3D_PACK_MODE_16_WAY;
438 case 8:
439 return V3D_PACK_MODE_8_WAY;
440 case 4:
441 return V3D_PACK_MODE_4_WAY;
442 case 1:
443 return V3D_PACK_MODE_1_WAY;
444 default:
445 unreachable("Invalid SIMD width");
446 };
447 }
448
449 static void
450 v3d_emit_tes_gs_shader_params(struct v3d_job *job,
451 uint32_t gs_simd,
452 uint32_t gs_vpm_output_size,
453 uint32_t gs_max_vpm_input_size_per_batch)
454 {
455 cl_emit(&job->indirect, TESSELLATION_GEOMETRY_SHADER_PARAMS, shader) {
456 shader.tcs_batch_flush_mode = V3D_TCS_FLUSH_MODE_FULLY_PACKED;
457 shader.per_patch_data_column_depth = 1;
458 shader.tcs_output_segment_size_in_sectors = 1;
459 shader.tcs_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
460 shader.tes_output_segment_size_in_sectors = 1;
461 shader.tes_output_segment_pack_mode = V3D_PACK_MODE_16_WAY;
462 shader.gs_output_segment_size_in_sectors = gs_vpm_output_size;
463 shader.gs_output_segment_pack_mode =
464 simd_width_to_gs_pack_mode(gs_simd);
465 shader.tbg_max_patches_per_tcs_batch = 1;
466 shader.tbg_max_extra_vertex_segs_for_patches_after_first = 0;
467 shader.tbg_min_tcs_output_segments_required_in_play = 1;
468 shader.tbg_min_per_patch_data_segments_required_in_play = 1;
469 shader.tpg_max_patches_per_tes_batch = 1;
470 shader.tpg_max_vertex_segments_per_tes_batch = 0;
471 shader.tpg_max_tcs_output_segments_per_tes_batch = 1;
472 shader.tpg_min_tes_output_segments_required_in_play = 1;
473 shader.gbg_max_tes_output_vertex_segments_per_gs_batch =
474 gs_max_vpm_input_size_per_batch;
475 shader.gbg_min_gs_output_segments_required_in_play = 1;
476 }
477 }
478
479 static inline uint32_t
480 compute_vpm_size_in_sectors(const struct v3d_device_info *devinfo)
481 {
482 assert(devinfo->vpm_size > 0);
483 const uint32_t sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
484 return devinfo->vpm_size / sector_size;
485 }
486
487 /* Computes various parameters affecting VPM memory configuration for programs
488 * involving geometry shaders to ensure the program fits in memory and honors
489 * requirements described in section "VPM usage" of the programming manual.
490 */
491 static void
492 compute_vpm_config_gs(struct v3d_device_info *devinfo,
493 struct v3d_vs_prog_data *vs,
494 struct v3d_gs_prog_data *gs,
495 struct vpm_config *vpm_cfg_out)
496 {
497 const uint32_t A = vs->separate_segments ? 1 : 0;
498 const uint32_t Ad = vs->vpm_input_size;
499 const uint32_t Vd = vs->vpm_output_size;
500
501 const uint32_t vpm_size = compute_vpm_size_in_sectors(devinfo);
502
503 /* Try to fit program into our VPM memory budget by adjusting
504 * configurable parameters iteratively. We do this in two phases:
505 * the first phase tries to fit the program into the total available
506 * VPM memory. If we suceed at that, then the second phase attempts
507 * to fit the program into half of that budget so we can run bin and
508 * render programs in parallel.
509 */
510 struct vpm_config vpm_cfg[2];
511 struct vpm_config *final_vpm_cfg = NULL;
512 uint32_t phase = 0;
513
514 vpm_cfg[phase].As = 1;
515 vpm_cfg[phase].Gs = 1;
516 vpm_cfg[phase].Gd = gs->vpm_output_size;
517 vpm_cfg[phase].gs_width = gs->simd_width;
518
519 /* While there is a requirement that Vc >= [Vn / 16], this is
520 * always the case when tessellation is not present because in that
521 * case Vn can only be 6 at most (when input primitive is triangles
522 * with adjacency).
523 *
524 * We always choose Vc=2. We can't go lower than this due to GFXH-1744,
525 * and Broadcom has not found it worth it to increase it beyond this
526 * in general. Increasing Vc also increases VPM memory pressure which
527 * can turn up being detrimental for performance in some scenarios.
528 */
529 vpm_cfg[phase].Vc = 2;
530
531 /* Gv is a constraint on the hardware to not exceed the
532 * specified number of vertex segments per GS batch. If adding a
533 * new primitive to a GS batch would result in a range of more
534 * than Gv vertex segments being referenced by the batch, then
535 * the hardware will flush the batch and start a new one. This
536 * means that we can choose any value we want, we just need to
537 * be aware that larger values improve GS batch utilization
538 * at the expense of more VPM memory pressure (which can affect
539 * other performance aspects, such as GS dispatch width).
540 * We start with the largest value, and will reduce it if we
541 * find that total memory pressure is too high.
542 */
543 vpm_cfg[phase].Gv = 3;
544 do {
545 /* When GS is present in absence of TES, then we need to satisfy
546 * that Ve >= Gv. We go with the smallest value of Ve to avoid
547 * increasing memory pressure.
548 */
549 vpm_cfg[phase].Ve = vpm_cfg[phase].Gv;
550
551 uint32_t vpm_sectors =
552 A * vpm_cfg[phase].As * Ad +
553 (vpm_cfg[phase].Vc + vpm_cfg[phase].Ve) * Vd +
554 vpm_cfg[phase].Gs * vpm_cfg[phase].Gd;
555
556 /* Ideally we want to use no more than half of the available
557 * memory so we can execute a bin and render program in parallel
558 * without stalls. If we achieved that then we are done.
559 */
560 if (vpm_sectors <= vpm_size / 2) {
561 final_vpm_cfg = &vpm_cfg[phase];
562 break;
563 }
564
565 /* At the very least, we should not allocate more than the
566 * total available VPM memory. If we have a configuration that
567 * succeeds at this we save it and continue to see if we can
568 * meet the half-memory-use criteria too.
569 */
570 if (phase == 0 && vpm_sectors <= vpm_size) {
571 vpm_cfg[1] = vpm_cfg[0];
572 phase = 1;
573 }
574
575 /* Try lowering Gv */
576 if (vpm_cfg[phase].Gv > 0) {
577 vpm_cfg[phase].Gv--;
578 continue;
579 }
580
581 /* Try lowering GS dispatch width */
582 if (vpm_cfg[phase].gs_width > 1) {
583 do {
584 vpm_cfg[phase].gs_width >>= 1;
585 vpm_cfg[phase].Gd =
586 align(vpm_cfg[phase].Gd, 2) / 2;
587 } while (vpm_cfg[phase].gs_width == 2);
588
589 /* Reset Gv to max after dropping dispatch width */
590 vpm_cfg[phase].Gv = 3;
591 continue;
592 }
593
594 /* We ran out of options to reduce memory pressure. If we
595 * are at phase 1 we have at least a valid configuration, so we
596 * we use that.
597 */
598 if (phase == 1)
599 final_vpm_cfg = &vpm_cfg[0];
600 break;
601 } while (true);
602
603 if (!final_vpm_cfg) {
604 /* FIXME: maybe return a boolean to indicate failure and use
605 * that to stop the submission for this draw call.
606 */
607 fprintf(stderr, "Failed to allocate VPM memory.\n");
608 abort();
609 }
610
611 assert(final_vpm_cfg);
612 assert(final_vpm_cfg->Gd <= 16);
613 assert(final_vpm_cfg->Gv < 4);
614 assert(final_vpm_cfg->Ve < 4);
615 assert(final_vpm_cfg->Vc >= 2 && final_vpm_cfg->Vc <= 4);
616 assert(final_vpm_cfg->gs_width == 1 ||
617 final_vpm_cfg->gs_width == 4 ||
618 final_vpm_cfg->gs_width == 8 ||
619 final_vpm_cfg->gs_width == 16);
620
621 *vpm_cfg_out = *final_vpm_cfg;
622 }
623 #endif
624
625 static void
626 v3d_emit_gl_shader_state(struct v3d_context *v3d,
627 const struct pipe_draw_info *info)
628 {
629 struct v3d_job *job = v3d->job;
630 /* VC5_DIRTY_VTXSTATE */
631 struct v3d_vertex_stateobj *vtx = v3d->vtx;
632 /* VC5_DIRTY_VTXBUF */
633 struct v3d_vertexbuf_stateobj *vertexbuf = &v3d->vertexbuf;
634
635 /* Upload the uniforms to the indirect CL first */
636 struct v3d_cl_reloc fs_uniforms =
637 v3d_write_uniforms(v3d, job, v3d->prog.fs,
638 PIPE_SHADER_FRAGMENT);
639
640 struct v3d_cl_reloc gs_uniforms = { NULL, 0 };
641 struct v3d_cl_reloc gs_bin_uniforms = { NULL, 0 };
642 if (v3d->prog.gs) {
643 gs_uniforms = v3d_write_uniforms(v3d, job, v3d->prog.gs,
644 PIPE_SHADER_GEOMETRY);
645 }
646 if (v3d->prog.gs_bin) {
647 gs_bin_uniforms = v3d_write_uniforms(v3d, job, v3d->prog.gs_bin,
648 PIPE_SHADER_GEOMETRY);
649 }
650
651 struct v3d_cl_reloc vs_uniforms =
652 v3d_write_uniforms(v3d, job, v3d->prog.vs,
653 PIPE_SHADER_VERTEX);
654 struct v3d_cl_reloc cs_uniforms =
655 v3d_write_uniforms(v3d, job, v3d->prog.cs,
656 PIPE_SHADER_VERTEX);
657
658 /* Update the cache dirty flag based on the shader progs data */
659 job->tmu_dirty_rcl |= v3d->prog.cs->prog_data.vs->base.tmu_dirty_rcl;
660 job->tmu_dirty_rcl |= v3d->prog.vs->prog_data.vs->base.tmu_dirty_rcl;
661 if (v3d->prog.gs_bin) {
662 job->tmu_dirty_rcl |=
663 v3d->prog.gs_bin->prog_data.gs->base.tmu_dirty_rcl;
664 }
665 if (v3d->prog.gs) {
666 job->tmu_dirty_rcl |=
667 v3d->prog.gs->prog_data.gs->base.tmu_dirty_rcl;
668 }
669 job->tmu_dirty_rcl |= v3d->prog.fs->prog_data.fs->base.tmu_dirty_rcl;
670
671 /* See GFXH-930 workaround below */
672 uint32_t num_elements_to_emit = MAX2(vtx->num_elements, 1);
673
674 uint32_t shader_state_record_length =
675 cl_packet_length(GL_SHADER_STATE_RECORD);
676 #if V3D_VERSION >= 41
677 if (v3d->prog.gs) {
678 shader_state_record_length +=
679 cl_packet_length(GEOMETRY_SHADER_STATE_RECORD) +
680 cl_packet_length(TESSELLATION_GEOMETRY_COMMON_PARAMS) +
681 2 * cl_packet_length(TESSELLATION_GEOMETRY_SHADER_PARAMS);
682 }
683 #endif
684
685 uint32_t shader_rec_offset =
686 v3d_cl_ensure_space(&job->indirect,
687 shader_state_record_length +
688 num_elements_to_emit *
689 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
690 32);
691
692 /* XXX perf: We should move most of the SHADER_STATE_RECORD setup to
693 * compile time, so that we mostly just have to OR the VS and FS
694 * records together at draw time.
695 */
696
697 struct vpm_config vpm_cfg_bin, vpm_cfg;
698
699 assert(v3d->screen->devinfo.ver >= 41 || !v3d->prog.gs);
700 if (!v3d->prog.gs) {
701 vpm_cfg_bin.As = 1;
702 vpm_cfg_bin.Ve = 0;
703 vpm_cfg_bin.Vc = v3d->prog.cs->prog_data.vs->vcm_cache_size;
704
705 vpm_cfg.As = 1;
706 vpm_cfg.Ve = 0;
707 vpm_cfg.Vc = v3d->prog.vs->prog_data.vs->vcm_cache_size;
708 }
709 #if V3D_VERSION >= 41
710 else {
711 v3d_emit_gs_state_record(v3d->job,
712 v3d->prog.gs_bin, gs_bin_uniforms,
713 v3d->prog.gs, gs_uniforms);
714
715 struct v3d_gs_prog_data *gs = v3d->prog.gs->prog_data.gs;
716 struct v3d_gs_prog_data *gs_bin = v3d->prog.gs_bin->prog_data.gs;
717
718 v3d_emit_tes_gs_common_params(v3d->job,
719 gs->out_prim_type,
720 gs->num_invocations);
721
722 /* Bin Tes/Gs params */
723 struct v3d_vs_prog_data *vs_bin = v3d->prog.cs->prog_data.vs;
724 compute_vpm_config_gs(&v3d->screen->devinfo,
725 vs_bin, gs_bin, &vpm_cfg_bin);
726
727 v3d_emit_tes_gs_shader_params(v3d->job,
728 vpm_cfg_bin.gs_width,
729 vpm_cfg_bin.Gd,
730 vpm_cfg_bin.Gv);
731
732 /* Render Tes/Gs params */
733 struct v3d_vs_prog_data *vs = v3d->prog.vs->prog_data.vs;
734 compute_vpm_config_gs(&v3d->screen->devinfo,
735 vs, gs, &vpm_cfg);
736
737 v3d_emit_tes_gs_shader_params(v3d->job,
738 vpm_cfg.gs_width,
739 vpm_cfg.Gd,
740 vpm_cfg.Gv);
741 }
742 #endif
743
744 cl_emit(&job->indirect, GL_SHADER_STATE_RECORD, shader) {
745 shader.enable_clipping = true;
746 /* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
747 shader.point_size_in_shaded_vertex_data =
748 (info->mode == PIPE_PRIM_POINTS &&
749 v3d->rasterizer->base.point_size_per_vertex);
750
751 /* Must be set if the shader modifies Z, discards, or modifies
752 * the sample mask. For any of these cases, the fragment
753 * shader needs to write the Z value (even just discards).
754 */
755 shader.fragment_shader_does_z_writes =
756 v3d->prog.fs->prog_data.fs->writes_z;
757 /* Set if the EZ test must be disabled (due to shader side
758 * effects and the early_z flag not being present in the
759 * shader).
760 */
761 shader.turn_off_early_z_test =
762 v3d->prog.fs->prog_data.fs->disable_ez;
763
764 shader.fragment_shader_uses_real_pixel_centre_w_in_addition_to_centroid_w2 =
765 v3d->prog.fs->prog_data.fs->uses_center_w;
766
767 #if V3D_VERSION >= 41
768 shader.any_shader_reads_hardware_written_primitive_id =
769 v3d->prog.gs ? v3d->prog.gs->prog_data.gs->uses_pid :
770 false;
771 #endif
772
773 #if V3D_VERSION >= 40
774 shader.do_scoreboard_wait_on_first_thread_switch =
775 v3d->prog.fs->prog_data.fs->lock_scoreboard_on_first_thrsw;
776 shader.disable_implicit_point_line_varyings =
777 !v3d->prog.fs->prog_data.fs->uses_implicit_point_line_varyings;
778 #endif
779
780 shader.number_of_varyings_in_fragment_shader =
781 v3d->prog.fs->prog_data.fs->num_inputs;
782
783 shader.coordinate_shader_propagate_nans = true;
784 shader.vertex_shader_propagate_nans = true;
785 shader.fragment_shader_propagate_nans = true;
786
787 shader.coordinate_shader_code_address =
788 cl_address(v3d_resource(v3d->prog.cs->resource)->bo,
789 v3d->prog.cs->offset);
790 shader.vertex_shader_code_address =
791 cl_address(v3d_resource(v3d->prog.vs->resource)->bo,
792 v3d->prog.vs->offset);
793 shader.fragment_shader_code_address =
794 cl_address(v3d_resource(v3d->prog.fs->resource)->bo,
795 v3d->prog.fs->offset);
796
797 /* XXX: Use combined input/output size flag in the common
798 * case.
799 */
800 shader.coordinate_shader_has_separate_input_and_output_vpm_blocks =
801 v3d->prog.cs->prog_data.vs->separate_segments;
802 shader.vertex_shader_has_separate_input_and_output_vpm_blocks =
803 v3d->prog.vs->prog_data.vs->separate_segments;
804
805 shader.coordinate_shader_input_vpm_segment_size =
806 v3d->prog.cs->prog_data.vs->separate_segments ?
807 v3d->prog.cs->prog_data.vs->vpm_input_size : 1;
808 shader.vertex_shader_input_vpm_segment_size =
809 v3d->prog.vs->prog_data.vs->separate_segments ?
810 v3d->prog.vs->prog_data.vs->vpm_input_size : 1;
811
812 shader.coordinate_shader_output_vpm_segment_size =
813 v3d->prog.cs->prog_data.vs->vpm_output_size;
814 shader.vertex_shader_output_vpm_segment_size =
815 v3d->prog.vs->prog_data.vs->vpm_output_size;
816
817 shader.coordinate_shader_uniforms_address = cs_uniforms;
818 shader.vertex_shader_uniforms_address = vs_uniforms;
819 shader.fragment_shader_uniforms_address = fs_uniforms;
820
821 #if V3D_VERSION >= 41
822 shader.min_coord_shader_input_segments_required_in_play =
823 vpm_cfg_bin.As;
824 shader.min_vertex_shader_input_segments_required_in_play =
825 vpm_cfg.As;
826
827 shader.min_coord_shader_output_segments_required_in_play_in_addition_to_vcm_cache_size =
828 vpm_cfg_bin.Ve;
829 shader.min_vertex_shader_output_segments_required_in_play_in_addition_to_vcm_cache_size =
830 vpm_cfg.Ve;
831
832 shader.coordinate_shader_4_way_threadable =
833 v3d->prog.cs->prog_data.vs->base.threads == 4;
834 shader.vertex_shader_4_way_threadable =
835 v3d->prog.vs->prog_data.vs->base.threads == 4;
836 shader.fragment_shader_4_way_threadable =
837 v3d->prog.fs->prog_data.fs->base.threads == 4;
838
839 shader.coordinate_shader_start_in_final_thread_section =
840 v3d->prog.cs->prog_data.vs->base.single_seg;
841 shader.vertex_shader_start_in_final_thread_section =
842 v3d->prog.vs->prog_data.vs->base.single_seg;
843 shader.fragment_shader_start_in_final_thread_section =
844 v3d->prog.fs->prog_data.fs->base.single_seg;
845 #else
846 shader.coordinate_shader_4_way_threadable =
847 v3d->prog.cs->prog_data.vs->base.threads == 4;
848 shader.coordinate_shader_2_way_threadable =
849 v3d->prog.cs->prog_data.vs->base.threads == 2;
850 shader.vertex_shader_4_way_threadable =
851 v3d->prog.vs->prog_data.vs->base.threads == 4;
852 shader.vertex_shader_2_way_threadable =
853 v3d->prog.vs->prog_data.vs->base.threads == 2;
854 shader.fragment_shader_4_way_threadable =
855 v3d->prog.fs->prog_data.fs->base.threads == 4;
856 shader.fragment_shader_2_way_threadable =
857 v3d->prog.fs->prog_data.fs->base.threads == 2;
858 #endif
859
860 shader.vertex_id_read_by_coordinate_shader =
861 v3d->prog.cs->prog_data.vs->uses_vid;
862 shader.instance_id_read_by_coordinate_shader =
863 v3d->prog.cs->prog_data.vs->uses_iid;
864 shader.vertex_id_read_by_vertex_shader =
865 v3d->prog.vs->prog_data.vs->uses_vid;
866 shader.instance_id_read_by_vertex_shader =
867 v3d->prog.vs->prog_data.vs->uses_iid;
868
869 shader.address_of_default_attribute_values =
870 cl_address(v3d_resource(vtx->defaults)->bo,
871 vtx->defaults_offset);
872 }
873
874 bool cs_loaded_any = false;
875 for (int i = 0; i < vtx->num_elements; i++) {
876 struct pipe_vertex_element *elem = &vtx->pipe[i];
877 struct pipe_vertex_buffer *vb =
878 &vertexbuf->vb[elem->vertex_buffer_index];
879 struct v3d_resource *rsc = v3d_resource(vb->buffer.resource);
880
881 const uint32_t size =
882 cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
883 cl_emit_with_prepacked(&job->indirect,
884 GL_SHADER_STATE_ATTRIBUTE_RECORD,
885 &vtx->attrs[i * size], attr) {
886 attr.stride = vb->stride;
887 attr.address = cl_address(rsc->bo,
888 vb->buffer_offset +
889 elem->src_offset);
890 attr.number_of_values_read_by_coordinate_shader =
891 v3d->prog.cs->prog_data.vs->vattr_sizes[i];
892 attr.number_of_values_read_by_vertex_shader =
893 v3d->prog.vs->prog_data.vs->vattr_sizes[i];
894
895 /* GFXH-930: At least one attribute must be enabled
896 * and read by CS and VS. If we have attributes being
897 * consumed by the VS but not the CS, then set up a
898 * dummy load of the last attribute into the CS's VPM
899 * inputs. (Since CS is just dead-code-elimination
900 * compared to VS, we can't have CS loading but not
901 * VS).
902 */
903 if (v3d->prog.cs->prog_data.vs->vattr_sizes[i])
904 cs_loaded_any = true;
905 if (i == vtx->num_elements - 1 && !cs_loaded_any) {
906 attr.number_of_values_read_by_coordinate_shader = 1;
907 }
908 #if V3D_VERSION >= 41
909 attr.maximum_index = 0xffffff;
910 #endif
911 }
912 STATIC_ASSERT(sizeof(vtx->attrs) >= V3D_MAX_VS_INPUTS / 4 * size);
913 }
914
915 if (vtx->num_elements == 0) {
916 /* GFXH-930: At least one attribute must be enabled and read
917 * by CS and VS. If we have no attributes being consumed by
918 * the shader, set up a dummy to be loaded into the VPM.
919 */
920 cl_emit(&job->indirect, GL_SHADER_STATE_ATTRIBUTE_RECORD, attr) {
921 /* Valid address of data whose value will be unused. */
922 attr.address = cl_address(job->indirect.bo, 0);
923
924 attr.type = ATTRIBUTE_FLOAT;
925 attr.stride = 0;
926 attr.vec_size = 1;
927
928 attr.number_of_values_read_by_coordinate_shader = 1;
929 attr.number_of_values_read_by_vertex_shader = 1;
930 }
931 }
932
933 cl_emit(&job->bcl, VCM_CACHE_SIZE, vcm) {
934 vcm.number_of_16_vertex_batches_for_binning = vpm_cfg_bin.Vc;
935 vcm.number_of_16_vertex_batches_for_rendering = vpm_cfg.Vc;
936 }
937
938 #if V3D_VERSION >= 41
939 if (v3d->prog.gs) {
940 cl_emit(&job->bcl, GL_SHADER_STATE_INCLUDING_GS, state) {
941 state.address = cl_address(job->indirect.bo,
942 shader_rec_offset);
943 state.number_of_attribute_arrays = num_elements_to_emit;
944 }
945 } else {
946 cl_emit(&job->bcl, GL_SHADER_STATE, state) {
947 state.address = cl_address(job->indirect.bo,
948 shader_rec_offset);
949 state.number_of_attribute_arrays = num_elements_to_emit;
950 }
951 }
952 #else
953 assert(!v3d->prog.gs);
954 cl_emit(&job->bcl, GL_SHADER_STATE, state) {
955 state.address = cl_address(job->indirect.bo, shader_rec_offset);
956 state.number_of_attribute_arrays = num_elements_to_emit;
957 }
958 #endif
959
960 v3d_bo_unreference(&cs_uniforms.bo);
961 v3d_bo_unreference(&vs_uniforms.bo);
962 if (gs_uniforms.bo)
963 v3d_bo_unreference(&gs_uniforms.bo);
964 if (gs_bin_uniforms.bo)
965 v3d_bo_unreference(&gs_bin_uniforms.bo);
966 v3d_bo_unreference(&fs_uniforms.bo);
967 }
968
969 /**
970 * Updates the number of primitives generated from the number of vertices
971 * to draw. This only works when no GS is present, since otherwise the number
972 * of primitives generated cannot be determined in advance and we need to
973 * use the PRIMITIVE_COUNTS_FEEDBACK command instead, however, that requires
974 * a sync wait for the draw to complete, so we only use that when GS is present.
975 */
976 static void
977 v3d_update_primitives_generated_counter(struct v3d_context *v3d,
978 const struct pipe_draw_info *info)
979 {
980 assert(!v3d->prog.gs);
981
982 if (!v3d->active_queries)
983 return;
984
985 uint32_t prims = u_prims_for_vertices(info->mode, info->count);
986 v3d->prims_generated += prims;
987 }
988
989 static void
990 v3d_update_job_ez(struct v3d_context *v3d, struct v3d_job *job)
991 {
992 switch (v3d->zsa->ez_state) {
993 case VC5_EZ_UNDECIDED:
994 /* If the Z/S state didn't pick a direction but didn't
995 * disable, then go along with the current EZ state. This
996 * allows EZ optimization for Z func == EQUAL or NEVER.
997 */
998 break;
999
1000 case VC5_EZ_LT_LE:
1001 case VC5_EZ_GT_GE:
1002 /* If the Z/S state picked a direction, then it needs to match
1003 * the current direction if we've decided on one.
1004 */
1005 if (job->ez_state == VC5_EZ_UNDECIDED)
1006 job->ez_state = v3d->zsa->ez_state;
1007 else if (job->ez_state != v3d->zsa->ez_state)
1008 job->ez_state = VC5_EZ_DISABLED;
1009 break;
1010
1011 case VC5_EZ_DISABLED:
1012 /* If the current Z/S state disables EZ because of a bad Z
1013 * func or stencil operation, then we can't do any more EZ in
1014 * this frame.
1015 */
1016 job->ez_state = VC5_EZ_DISABLED;
1017 break;
1018 }
1019
1020 /* If the FS affects the Z of the pixels, then it may update against
1021 * the chosen EZ direction (though we could use
1022 * ARB_conservative_depth's hints to avoid this)
1023 */
1024 if (v3d->prog.fs->prog_data.fs->writes_z) {
1025 job->ez_state = VC5_EZ_DISABLED;
1026 }
1027
1028 if (job->first_ez_state == VC5_EZ_UNDECIDED &&
1029 (job->ez_state != VC5_EZ_DISABLED || job->draw_calls_queued == 0))
1030 job->first_ez_state = job->ez_state;
1031 }
1032
1033 static uint32_t
1034 v3d_hw_prim_type(enum pipe_prim_type prim_type)
1035 {
1036 switch (prim_type) {
1037 case PIPE_PRIM_POINTS:
1038 case PIPE_PRIM_LINES:
1039 case PIPE_PRIM_LINE_LOOP:
1040 case PIPE_PRIM_LINE_STRIP:
1041 case PIPE_PRIM_TRIANGLES:
1042 case PIPE_PRIM_TRIANGLE_STRIP:
1043 case PIPE_PRIM_TRIANGLE_FAN:
1044 return prim_type;
1045
1046 case PIPE_PRIM_LINES_ADJACENCY:
1047 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
1048 case PIPE_PRIM_TRIANGLES_ADJACENCY:
1049 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
1050 return 8 + (prim_type - PIPE_PRIM_LINES_ADJACENCY);
1051
1052 default:
1053 unreachable("Unsupported primitive type");
1054 }
1055 }
1056
1057 static bool
1058 v3d_check_compiled_shaders(struct v3d_context *v3d)
1059 {
1060 static bool warned[5] = { 0 };
1061
1062 uint32_t failed_stage = MESA_SHADER_NONE;
1063 if (!v3d->prog.vs->resource || !v3d->prog.cs->resource) {
1064 failed_stage = MESA_SHADER_VERTEX;
1065 } else if ((v3d->prog.gs_bin && !v3d->prog.gs_bin->resource) ||
1066 (v3d->prog.gs && !v3d->prog.gs->resource)) {
1067 failed_stage = MESA_SHADER_GEOMETRY;
1068 } else if (v3d->prog.fs && !v3d->prog.fs->resource) {
1069 failed_stage = MESA_SHADER_FRAGMENT;
1070 }
1071
1072 if (likely(failed_stage == MESA_SHADER_NONE))
1073 return true;
1074
1075 if (!warned[failed_stage]) {
1076 fprintf(stderr,
1077 "%s shader failed to compile. Expect corruption.\n",
1078 _mesa_shader_stage_to_string(failed_stage));
1079 warned[failed_stage] = true;
1080 }
1081 return false;
1082 }
1083
1084 static void
1085 v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
1086 {
1087 struct v3d_context *v3d = v3d_context(pctx);
1088
1089 if (!info->count_from_stream_output && !info->indirect &&
1090 !info->primitive_restart &&
1091 !u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
1092 return;
1093
1094 /* Fall back for weird desktop GL primitive restart values. */
1095 if (info->primitive_restart &&
1096 info->index_size) {
1097 uint32_t mask = ~0;
1098
1099 switch (info->index_size) {
1100 case 2:
1101 mask = 0xffff;
1102 break;
1103 case 1:
1104 mask = 0xff;
1105 break;
1106 }
1107
1108 if (info->restart_index != mask) {
1109 util_draw_vbo_without_prim_restart(pctx, info);
1110 return;
1111 }
1112 }
1113
1114 if (info->mode >= PIPE_PRIM_QUADS && info->mode <= PIPE_PRIM_POLYGON) {
1115 util_primconvert_save_rasterizer_state(v3d->primconvert, &v3d->rasterizer->base);
1116 util_primconvert_draw_vbo(v3d->primconvert, info);
1117 perf_debug("Fallback conversion for %d %s vertices\n",
1118 info->count, u_prim_name(info->mode));
1119 return;
1120 }
1121
1122 /* Before setting up the draw, flush anything writing to the resources
1123 * that we read from or reading from resources we write to.
1124 */
1125 for (int s = 0; s < PIPE_SHADER_COMPUTE; s++)
1126 v3d_predraw_check_stage_inputs(pctx, s);
1127
1128 if (info->indirect) {
1129 v3d_flush_jobs_writing_resource(v3d, info->indirect->buffer,
1130 V3D_FLUSH_DEFAULT, false);
1131 }
1132
1133 v3d_predraw_check_outputs(pctx);
1134
1135 /* If transform feedback is active and we are switching primitive type
1136 * we need to submit the job before drawing and update the vertex count
1137 * written to TF based on the primitive type since we will need to
1138 * know the exact vertex count if the application decides to call
1139 * glDrawTransformFeedback() later.
1140 */
1141 if (v3d->streamout.num_targets > 0 &&
1142 u_base_prim_type(info->mode) != u_base_prim_type(v3d->prim_mode)) {
1143 v3d_update_primitive_counters(v3d);
1144 }
1145
1146 struct v3d_job *job = v3d_get_job_for_fbo(v3d);
1147
1148 /* If vertex texturing depends on the output of rendering, we need to
1149 * ensure that that rendering is complete before we run a coordinate
1150 * shader that depends on it.
1151 *
1152 * Given that doing that is unusual, for now we just block the binner
1153 * on the last submitted render, rather than tracking the last
1154 * rendering to each texture's BO.
1155 */
1156 if (v3d->tex[PIPE_SHADER_VERTEX].num_textures || info->indirect) {
1157 perf_debug("Blocking binner on last render "
1158 "due to vertex texturing or indirect drawing.\n");
1159 job->submit.in_sync_bcl = v3d->out_sync;
1160 }
1161
1162 /* We also need to ensure that compute is complete when render depends
1163 * on resources written by it.
1164 */
1165 if (v3d->sync_on_last_compute_job) {
1166 job->submit.in_sync_bcl = v3d->out_sync;
1167 v3d->sync_on_last_compute_job = false;
1168 }
1169
1170 /* Mark SSBOs and images as being written. We don't actually know
1171 * which ones are read vs written, so just assume the worst.
1172 */
1173 for (int s = 0; s < PIPE_SHADER_COMPUTE; s++) {
1174 foreach_bit(i, v3d->ssbo[s].enabled_mask) {
1175 v3d_job_add_write_resource(job,
1176 v3d->ssbo[s].sb[i].buffer);
1177 job->tmu_dirty_rcl = true;
1178 }
1179
1180 foreach_bit(i, v3d->shaderimg[s].enabled_mask) {
1181 v3d_job_add_write_resource(job,
1182 v3d->shaderimg[s].si[i].base.resource);
1183 job->tmu_dirty_rcl = true;
1184 }
1185 }
1186
1187 /* Get space to emit our draw call into the BCL, using a branch to
1188 * jump to a new BO if necessary.
1189 */
1190 v3d_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
1191
1192 if (v3d->prim_mode != info->mode) {
1193 v3d->prim_mode = info->mode;
1194 v3d->dirty |= VC5_DIRTY_PRIM_MODE;
1195 }
1196
1197 v3d_start_draw(v3d);
1198 v3d_update_compiled_shaders(v3d, info->mode);
1199 if (!v3d_check_compiled_shaders(v3d))
1200 return;
1201 v3d_update_job_ez(v3d, job);
1202
1203 /* If this job was writing to transform feedback buffers before this
1204 * draw and we are reading from them here, then we need to wait for TF
1205 * to complete before we emit this draw.
1206 *
1207 * Notice this check needs to happen before we emit state for the
1208 * current draw call, where we update job->tf_enabled, so we can ensure
1209 * that we only check TF writes for prior draws.
1210 */
1211 v3d_emit_wait_for_tf_if_needed(v3d, job);
1212
1213 #if V3D_VERSION >= 41
1214 v3d41_emit_state(pctx);
1215 #else
1216 v3d33_emit_state(pctx);
1217 #endif
1218
1219 if (v3d->dirty & (VC5_DIRTY_VTXBUF |
1220 VC5_DIRTY_VTXSTATE |
1221 VC5_DIRTY_PRIM_MODE |
1222 VC5_DIRTY_RASTERIZER |
1223 VC5_DIRTY_COMPILED_CS |
1224 VC5_DIRTY_COMPILED_VS |
1225 VC5_DIRTY_COMPILED_GS_BIN |
1226 VC5_DIRTY_COMPILED_GS |
1227 VC5_DIRTY_COMPILED_FS |
1228 v3d->prog.cs->uniform_dirty_bits |
1229 v3d->prog.vs->uniform_dirty_bits |
1230 (v3d->prog.gs_bin ?
1231 v3d->prog.gs_bin->uniform_dirty_bits : 0) |
1232 (v3d->prog.gs ?
1233 v3d->prog.gs->uniform_dirty_bits : 0) |
1234 v3d->prog.fs->uniform_dirty_bits)) {
1235 v3d_emit_gl_shader_state(v3d, info);
1236 }
1237
1238 v3d->dirty = 0;
1239
1240 /* The Base Vertex/Base Instance packet sets those values to nonzero
1241 * for the next draw call only.
1242 */
1243 if (info->index_bias || info->start_instance) {
1244 cl_emit(&job->bcl, BASE_VERTEX_BASE_INSTANCE, base) {
1245 base.base_instance = info->start_instance;
1246 base.base_vertex = info->index_bias;
1247 }
1248 }
1249
1250 uint32_t prim_tf_enable = 0;
1251 #if V3D_VERSION < 40
1252 /* V3D 3.x: The HW only processes transform feedback on primitives
1253 * with the flag set.
1254 */
1255 if (v3d->streamout.num_targets)
1256 prim_tf_enable = (V3D_PRIM_POINTS_TF - V3D_PRIM_POINTS);
1257 #endif
1258
1259 if (!v3d->prog.gs)
1260 v3d_update_primitives_generated_counter(v3d, info);
1261
1262 uint32_t hw_prim_type = v3d_hw_prim_type(info->mode);
1263 if (info->index_size) {
1264 uint32_t index_size = info->index_size;
1265 uint32_t offset = info->start * index_size;
1266 struct pipe_resource *prsc;
1267 if (info->has_user_indices) {
1268 prsc = NULL;
1269 u_upload_data(v3d->uploader, 0,
1270 info->count * info->index_size, 4,
1271 info->index.user,
1272 &offset, &prsc);
1273 } else {
1274 prsc = info->index.resource;
1275 }
1276 struct v3d_resource *rsc = v3d_resource(prsc);
1277
1278 #if V3D_VERSION >= 40
1279 cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
1280 ib.address = cl_address(rsc->bo, 0);
1281 ib.size = rsc->bo->size;
1282 }
1283 #endif
1284
1285 if (info->indirect) {
1286 cl_emit(&job->bcl, INDIRECT_INDEXED_INSTANCED_PRIM_LIST, prim) {
1287 prim.index_type = ffs(info->index_size) - 1;
1288 #if V3D_VERSION < 40
1289 prim.address_of_indices_list =
1290 cl_address(rsc->bo, offset);
1291 #endif /* V3D_VERSION < 40 */
1292 prim.mode = hw_prim_type | prim_tf_enable;
1293 prim.enable_primitive_restarts = info->primitive_restart;
1294
1295 prim.number_of_draw_indirect_indexed_records = info->indirect->draw_count;
1296
1297 prim.stride_in_multiples_of_4_bytes = info->indirect->stride >> 2;
1298 prim.address = cl_address(v3d_resource(info->indirect->buffer)->bo,
1299 info->indirect->offset);
1300 }
1301 } else if (info->instance_count > 1) {
1302 cl_emit(&job->bcl, INDEXED_INSTANCED_PRIM_LIST, prim) {
1303 prim.index_type = ffs(info->index_size) - 1;
1304 #if V3D_VERSION >= 40
1305 prim.index_offset = offset;
1306 #else /* V3D_VERSION < 40 */
1307 prim.maximum_index = (1u << 31) - 1; /* XXX */
1308 prim.address_of_indices_list =
1309 cl_address(rsc->bo, offset);
1310 #endif /* V3D_VERSION < 40 */
1311 prim.mode = hw_prim_type | prim_tf_enable;
1312 prim.enable_primitive_restarts = info->primitive_restart;
1313
1314 prim.number_of_instances = info->instance_count;
1315 prim.instance_length = info->count;
1316 }
1317 } else {
1318 cl_emit(&job->bcl, INDEXED_PRIM_LIST, prim) {
1319 prim.index_type = ffs(info->index_size) - 1;
1320 prim.length = info->count;
1321 #if V3D_VERSION >= 40
1322 prim.index_offset = offset;
1323 #else /* V3D_VERSION < 40 */
1324 prim.maximum_index = (1u << 31) - 1; /* XXX */
1325 prim.address_of_indices_list =
1326 cl_address(rsc->bo, offset);
1327 #endif /* V3D_VERSION < 40 */
1328 prim.mode = hw_prim_type | prim_tf_enable;
1329 prim.enable_primitive_restarts = info->primitive_restart;
1330 }
1331 }
1332
1333 if (info->has_user_indices)
1334 pipe_resource_reference(&prsc, NULL);
1335 } else {
1336 if (info->indirect) {
1337 cl_emit(&job->bcl, INDIRECT_VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
1338 prim.mode = hw_prim_type | prim_tf_enable;
1339 prim.number_of_draw_indirect_array_records = info->indirect->draw_count;
1340
1341 prim.stride_in_multiples_of_4_bytes = info->indirect->stride >> 2;
1342 prim.address = cl_address(v3d_resource(info->indirect->buffer)->bo,
1343 info->indirect->offset);
1344 }
1345 } else if (info->instance_count > 1) {
1346 struct pipe_stream_output_target *so =
1347 info->count_from_stream_output;
1348 uint32_t vert_count = so ?
1349 v3d_stream_output_target_get_vertex_count(so) :
1350 info->count;
1351 cl_emit(&job->bcl, VERTEX_ARRAY_INSTANCED_PRIMS, prim) {
1352 prim.mode = hw_prim_type | prim_tf_enable;
1353 prim.index_of_first_vertex = info->start;
1354 prim.number_of_instances = info->instance_count;
1355 prim.instance_length = vert_count;
1356 }
1357 } else {
1358 struct pipe_stream_output_target *so =
1359 info->count_from_stream_output;
1360 uint32_t vert_count = so ?
1361 v3d_stream_output_target_get_vertex_count(so) :
1362 info->count;
1363 cl_emit(&job->bcl, VERTEX_ARRAY_PRIMS, prim) {
1364 prim.mode = hw_prim_type | prim_tf_enable;
1365 prim.length = vert_count;
1366 prim.index_of_first_vertex = info->start;
1367 }
1368 }
1369 }
1370
1371 /* A flush is required in between a TF draw and any following TF specs
1372 * packet, or the GPU may hang. Just flush each time for now.
1373 */
1374 if (v3d->streamout.num_targets)
1375 cl_emit(&job->bcl, TRANSFORM_FEEDBACK_FLUSH_AND_COUNT, flush);
1376
1377 job->draw_calls_queued++;
1378 if (v3d->streamout.num_targets)
1379 job->tf_draw_calls_queued++;
1380
1381 /* Increment the TF offsets by how many verts we wrote. XXX: This
1382 * needs some clamping to the buffer size.
1383 */
1384 for (int i = 0; i < v3d->streamout.num_targets; i++)
1385 v3d->streamout.offsets[i] += info->count;
1386
1387 if (v3d->zsa && job->zsbuf && v3d->zsa->base.depth.enabled) {
1388 struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
1389 v3d_job_add_bo(job, rsc->bo);
1390
1391 job->load |= PIPE_CLEAR_DEPTH & ~job->clear;
1392 if (v3d->zsa->base.depth.writemask)
1393 job->store |= PIPE_CLEAR_DEPTH;
1394 rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
1395 }
1396
1397 if (v3d->zsa && job->zsbuf && v3d->zsa->base.stencil[0].enabled) {
1398 struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
1399 if (rsc->separate_stencil)
1400 rsc = rsc->separate_stencil;
1401
1402 v3d_job_add_bo(job, rsc->bo);
1403
1404 job->load |= PIPE_CLEAR_STENCIL & ~job->clear;
1405 if (v3d->zsa->base.stencil[0].writemask ||
1406 v3d->zsa->base.stencil[1].writemask) {
1407 job->store |= PIPE_CLEAR_STENCIL;
1408 }
1409 rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
1410 }
1411
1412 for (int i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
1413 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
1414 int blend_rt = v3d->blend->base.independent_blend_enable ? i : 0;
1415
1416 if (job->store & bit || !job->cbufs[i])
1417 continue;
1418 struct v3d_resource *rsc = v3d_resource(job->cbufs[i]->texture);
1419
1420 job->load |= bit & ~job->clear;
1421 if (v3d->blend->base.rt[blend_rt].colormask)
1422 job->store |= bit;
1423 v3d_job_add_bo(job, rsc->bo);
1424 }
1425
1426 if (job->referenced_size > 768 * 1024 * 1024) {
1427 perf_debug("Flushing job with %dkb to try to free up memory\n",
1428 job->referenced_size / 1024);
1429 v3d_flush(pctx);
1430 }
1431
1432 if (V3D_DEBUG & V3D_DEBUG_ALWAYS_FLUSH)
1433 v3d_flush(pctx);
1434 }
1435
1436 #if V3D_VERSION >= 41
1437 #define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
1438 #define V3D_CSD_CFG012_WG_OFFSET_SHIFT 0
1439 /* Allow this dispatch to start while the last one is still running. */
1440 #define V3D_CSD_CFG3_OVERLAP_WITH_PREV (1 << 26)
1441 /* Maximum supergroup ID. 6 bits. */
1442 #define V3D_CSD_CFG3_MAX_SG_ID_SHIFT 20
1443 /* Batches per supergroup minus 1. 8 bits. */
1444 #define V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT 12
1445 /* Workgroups per supergroup, 0 means 16 */
1446 #define V3D_CSD_CFG3_WGS_PER_SG_SHIFT 8
1447 #define V3D_CSD_CFG3_WG_SIZE_SHIFT 0
1448
1449 #define V3D_CSD_CFG5_PROPAGATE_NANS (1 << 2)
1450 #define V3D_CSD_CFG5_SINGLE_SEG (1 << 1)
1451 #define V3D_CSD_CFG5_THREADING (1 << 0)
1452
1453 static void
1454 v3d_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
1455 {
1456 struct v3d_context *v3d = v3d_context(pctx);
1457 struct v3d_screen *screen = v3d->screen;
1458
1459 v3d_predraw_check_stage_inputs(pctx, PIPE_SHADER_COMPUTE);
1460
1461 v3d_update_compiled_cs(v3d);
1462
1463 if (!v3d->prog.compute->resource) {
1464 static bool warned = false;
1465 if (!warned) {
1466 fprintf(stderr,
1467 "Compute shader failed to compile. "
1468 "Expect corruption.\n");
1469 warned = true;
1470 }
1471 return;
1472 }
1473
1474 /* Some of the units of scale:
1475 *
1476 * - Batches of 16 work items (shader invocations) that will be queued
1477 * to the run on a QPU at once.
1478 *
1479 * - Workgroups composed of work items based on the shader's layout
1480 * declaration.
1481 *
1482 * - Supergroups of 1-16 workgroups. There can only be 16 supergroups
1483 * running at a time on the core, so we want to keep them large to
1484 * keep the QPUs busy, but a whole supergroup will sync at a barrier
1485 * so we want to keep them small if one is present.
1486 */
1487 struct drm_v3d_submit_csd submit = { 0 };
1488 struct v3d_job *job = v3d_job_create(v3d);
1489
1490 /* Set up the actual number of workgroups, synchronously mapping the
1491 * indirect buffer if necessary to get the dimensions.
1492 */
1493 if (info->indirect) {
1494 struct pipe_transfer *transfer;
1495 uint32_t *map = pipe_buffer_map_range(pctx, info->indirect,
1496 info->indirect_offset,
1497 3 * sizeof(uint32_t),
1498 PIPE_TRANSFER_READ,
1499 &transfer);
1500 memcpy(v3d->compute_num_workgroups, map, 3 * sizeof(uint32_t));
1501 pipe_buffer_unmap(pctx, transfer);
1502
1503 if (v3d->compute_num_workgroups[0] == 0 ||
1504 v3d->compute_num_workgroups[1] == 0 ||
1505 v3d->compute_num_workgroups[2] == 0) {
1506 /* Nothing to dispatch, so skip the draw (CSD can't
1507 * handle 0 workgroups).
1508 */
1509 return;
1510 }
1511 } else {
1512 v3d->compute_num_workgroups[0] = info->grid[0];
1513 v3d->compute_num_workgroups[1] = info->grid[1];
1514 v3d->compute_num_workgroups[2] = info->grid[2];
1515 }
1516
1517 for (int i = 0; i < 3; i++) {
1518 submit.cfg[i] |= (v3d->compute_num_workgroups[i] <<
1519 V3D_CSD_CFG012_WG_COUNT_SHIFT);
1520 }
1521
1522 perf_debug("CSD only using single WG per SG currently, "
1523 "should increase that when possible.");
1524 int wgs_per_sg = 1;
1525 int wg_size = info->block[0] * info->block[1] * info->block[2];
1526 submit.cfg[3] |= wgs_per_sg << V3D_CSD_CFG3_WGS_PER_SG_SHIFT;
1527 submit.cfg[3] |= ((DIV_ROUND_UP(wgs_per_sg * wg_size, 16) - 1) <<
1528 V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT);
1529 submit.cfg[3] |= (wg_size & 0xff) << V3D_CSD_CFG3_WG_SIZE_SHIFT;
1530
1531 int batches_per_wg = DIV_ROUND_UP(wg_size, 16);
1532 /* Number of batches the dispatch will invoke (minus 1). */
1533 submit.cfg[4] = batches_per_wg * (v3d->compute_num_workgroups[0] *
1534 v3d->compute_num_workgroups[1] *
1535 v3d->compute_num_workgroups[2]) - 1;
1536
1537 /* Make sure we didn't accidentally underflow. */
1538 assert(submit.cfg[4] != ~0);
1539
1540 v3d_job_add_bo(job, v3d_resource(v3d->prog.compute->resource)->bo);
1541 submit.cfg[5] = (v3d_resource(v3d->prog.compute->resource)->bo->offset +
1542 v3d->prog.compute->offset);
1543 submit.cfg[5] |= V3D_CSD_CFG5_PROPAGATE_NANS;
1544 if (v3d->prog.compute->prog_data.base->single_seg)
1545 submit.cfg[5] |= V3D_CSD_CFG5_SINGLE_SEG;
1546 if (v3d->prog.compute->prog_data.base->threads == 4)
1547 submit.cfg[5] |= V3D_CSD_CFG5_THREADING;
1548
1549 if (v3d->prog.compute->prog_data.compute->shared_size) {
1550 v3d->compute_shared_memory =
1551 v3d_bo_alloc(v3d->screen,
1552 v3d->prog.compute->prog_data.compute->shared_size *
1553 wgs_per_sg,
1554 "shared_vars");
1555 }
1556
1557 struct v3d_cl_reloc uniforms = v3d_write_uniforms(v3d, job,
1558 v3d->prog.compute,
1559 PIPE_SHADER_COMPUTE);
1560 v3d_job_add_bo(job, uniforms.bo);
1561 submit.cfg[6] = uniforms.bo->offset + uniforms.offset;
1562
1563 /* Pull some job state that was stored in a SUBMIT_CL struct out to
1564 * our SUBMIT_CSD struct
1565 */
1566 submit.bo_handles = job->submit.bo_handles;
1567 submit.bo_handle_count = job->submit.bo_handle_count;
1568
1569 /* Serialize this in the rest of our command stream. */
1570 submit.in_sync = v3d->out_sync;
1571 submit.out_sync = v3d->out_sync;
1572
1573 if (!(V3D_DEBUG & V3D_DEBUG_NORAST)) {
1574 int ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_SUBMIT_CSD,
1575 &submit);
1576 static bool warned = false;
1577 if (ret && !warned) {
1578 fprintf(stderr, "CSD submit call returned %s. "
1579 "Expect corruption.\n", strerror(errno));
1580 warned = true;
1581 }
1582 }
1583
1584 v3d_job_free(v3d, job);
1585
1586 /* Mark SSBOs as being written.. we don't actually know which ones are
1587 * read vs written, so just assume the worst
1588 */
1589 foreach_bit(i, v3d->ssbo[PIPE_SHADER_COMPUTE].enabled_mask) {
1590 struct v3d_resource *rsc = v3d_resource(
1591 v3d->ssbo[PIPE_SHADER_COMPUTE].sb[i].buffer);
1592 rsc->writes++;
1593 rsc->compute_written = true;
1594 }
1595
1596 foreach_bit(i, v3d->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
1597 struct v3d_resource *rsc = v3d_resource(
1598 v3d->shaderimg[PIPE_SHADER_COMPUTE].si[i].base.resource);
1599 rsc->writes++;
1600 rsc->compute_written = true;
1601 }
1602
1603 v3d_bo_unreference(&uniforms.bo);
1604 v3d_bo_unreference(&v3d->compute_shared_memory);
1605 }
1606 #endif
1607
1608 /**
1609 * Implements gallium's clear() hook (glClear()) by drawing a pair of triangles.
1610 */
1611 static void
1612 v3d_draw_clear(struct v3d_context *v3d,
1613 unsigned buffers,
1614 const union pipe_color_union *color,
1615 double depth, unsigned stencil)
1616 {
1617 static const union pipe_color_union dummy_color = {};
1618
1619 /* The blitter util dereferences the color regardless, even though the
1620 * gallium clear API may not pass one in when only Z/S are cleared.
1621 */
1622 if (!color)
1623 color = &dummy_color;
1624
1625 v3d_blitter_save(v3d);
1626 util_blitter_clear(v3d->blitter,
1627 v3d->framebuffer.width,
1628 v3d->framebuffer.height,
1629 util_framebuffer_get_num_layers(&v3d->framebuffer),
1630 buffers, color, depth, stencil,
1631 util_framebuffer_get_num_samples(&v3d->framebuffer) > 1);
1632 }
1633
1634 /**
1635 * Attempts to perform the GL clear by using the TLB's fast clear at the start
1636 * of the frame.
1637 */
1638 static unsigned
1639 v3d_tlb_clear(struct v3d_job *job, unsigned buffers,
1640 const union pipe_color_union *color,
1641 double depth, unsigned stencil)
1642 {
1643 struct v3d_context *v3d = job->v3d;
1644
1645 if (job->draw_calls_queued) {
1646 /* If anything in the CL has drawn using the buffer, then the
1647 * TLB clear we're trying to add now would happen before that
1648 * drawing.
1649 */
1650 buffers &= ~(job->load | job->store);
1651 }
1652
1653 /* GFXH-1461: If we were to emit a load of just depth or just stencil,
1654 * then the clear for the other may get lost. We need to decide now
1655 * if it would be possible to need to emit a load of just one after
1656 * we've set up our TLB clears.
1657 */
1658 if (buffers & PIPE_CLEAR_DEPTHSTENCIL &&
1659 (buffers & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL &&
1660 job->zsbuf &&
1661 util_format_is_depth_and_stencil(job->zsbuf->texture->format)) {
1662 buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
1663 }
1664
1665 for (int i = 0; i < V3D_MAX_DRAW_BUFFERS; i++) {
1666 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
1667 if (!(buffers & bit))
1668 continue;
1669
1670 struct pipe_surface *psurf = v3d->framebuffer.cbufs[i];
1671 struct v3d_surface *surf = v3d_surface(psurf);
1672 struct v3d_resource *rsc = v3d_resource(psurf->texture);
1673
1674 union util_color uc;
1675 uint32_t internal_size = 4 << surf->internal_bpp;
1676
1677 static union pipe_color_union swapped_color;
1678 if (v3d->swap_color_rb & (1 << i)) {
1679 swapped_color.f[0] = color->f[2];
1680 swapped_color.f[1] = color->f[1];
1681 swapped_color.f[2] = color->f[0];
1682 swapped_color.f[3] = color->f[3];
1683 color = &swapped_color;
1684 }
1685
1686 switch (surf->internal_type) {
1687 case V3D_INTERNAL_TYPE_8:
1688 util_pack_color(color->f, PIPE_FORMAT_R8G8B8A8_UNORM,
1689 &uc);
1690 memcpy(job->clear_color[i], uc.ui, internal_size);
1691 break;
1692 case V3D_INTERNAL_TYPE_8I:
1693 case V3D_INTERNAL_TYPE_8UI:
1694 job->clear_color[i][0] = ((color->ui[0] & 0xff) |
1695 (color->ui[1] & 0xff) << 8 |
1696 (color->ui[2] & 0xff) << 16 |
1697 (color->ui[3] & 0xff) << 24);
1698 break;
1699 case V3D_INTERNAL_TYPE_16F:
1700 util_pack_color(color->f, PIPE_FORMAT_R16G16B16A16_FLOAT,
1701 &uc);
1702 memcpy(job->clear_color[i], uc.ui, internal_size);
1703 break;
1704 case V3D_INTERNAL_TYPE_16I:
1705 case V3D_INTERNAL_TYPE_16UI:
1706 job->clear_color[i][0] = ((color->ui[0] & 0xffff) |
1707 color->ui[1] << 16);
1708 job->clear_color[i][1] = ((color->ui[2] & 0xffff) |
1709 color->ui[3] << 16);
1710 break;
1711 case V3D_INTERNAL_TYPE_32F:
1712 case V3D_INTERNAL_TYPE_32I:
1713 case V3D_INTERNAL_TYPE_32UI:
1714 memcpy(job->clear_color[i], color->ui, internal_size);
1715 break;
1716 }
1717
1718 rsc->initialized_buffers |= bit;
1719 }
1720
1721 unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
1722 if (zsclear) {
1723 struct v3d_resource *rsc =
1724 v3d_resource(v3d->framebuffer.zsbuf->texture);
1725
1726 if (zsclear & PIPE_CLEAR_DEPTH)
1727 job->clear_z = depth;
1728 if (zsclear & PIPE_CLEAR_STENCIL)
1729 job->clear_s = stencil;
1730
1731 rsc->initialized_buffers |= zsclear;
1732 }
1733
1734 job->draw_min_x = 0;
1735 job->draw_min_y = 0;
1736 job->draw_max_x = v3d->framebuffer.width;
1737 job->draw_max_y = v3d->framebuffer.height;
1738 job->clear |= buffers;
1739 job->store |= buffers;
1740
1741 v3d_start_draw(v3d);
1742
1743 return buffers;
1744 }
1745
1746 static void
1747 v3d_clear(struct pipe_context *pctx, unsigned buffers, const struct pipe_scissor_state *scissor_state,
1748 const union pipe_color_union *color, double depth, unsigned stencil)
1749 {
1750 struct v3d_context *v3d = v3d_context(pctx);
1751 struct v3d_job *job = v3d_get_job_for_fbo(v3d);
1752
1753 buffers &= ~v3d_tlb_clear(job, buffers, color, depth, stencil);
1754
1755 if (buffers)
1756 v3d_draw_clear(v3d, buffers, color, depth, stencil);
1757 }
1758
1759 static void
1760 v3d_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
1761 const union pipe_color_union *color,
1762 unsigned x, unsigned y, unsigned w, unsigned h,
1763 bool render_condition_enabled)
1764 {
1765 fprintf(stderr, "unimpl: clear RT\n");
1766 }
1767
1768 static void
1769 v3d_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
1770 unsigned buffers, double depth, unsigned stencil,
1771 unsigned x, unsigned y, unsigned w, unsigned h,
1772 bool render_condition_enabled)
1773 {
1774 fprintf(stderr, "unimpl: clear DS\n");
1775 }
1776
1777 void
1778 v3dX(draw_init)(struct pipe_context *pctx)
1779 {
1780 pctx->draw_vbo = v3d_draw_vbo;
1781 pctx->clear = v3d_clear;
1782 pctx->clear_render_target = v3d_clear_render_target;
1783 pctx->clear_depth_stencil = v3d_clear_depth_stencil;
1784 #if V3D_VERSION >= 41
1785 if (v3d_context(pctx)->screen->has_csd)
1786 pctx->launch_grid = v3d_launch_grid;
1787 #endif
1788 }