2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Rob Clark <robclark@freedesktop.org>
28 #include "pipe/p_state.h"
29 #include "util/u_string.h"
30 #include "util/u_memory.h"
31 #include "util/u_helpers.h"
32 #include "util/format/u_format.h"
33 #include "util/u_viewport.h"
35 #include "freedreno_log.h"
36 #include "freedreno_resource.h"
37 #include "freedreno_state.h"
38 #include "freedreno_query_hw.h"
39 #include "common/freedreno_guardband.h"
42 #include "fd6_blend.h"
43 #include "fd6_const.h"
44 #include "fd6_context.h"
45 #include "fd6_image.h"
47 #include "fd6_program.h"
48 #include "fd6_rasterizer.h"
49 #include "fd6_texture.h"
50 #include "fd6_format.h"
53 /* Border color layout is diff from a4xx/a5xx.. if it turns out to be
54 * the same as a6xx then move this somewhere common ;-)
56 * Entry layout looks like (total size, 0x60 bytes):
59 struct PACKED bcolor_entry
{
71 uint32_t z24
; /* also s8? */
72 uint16_t srgb
[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
76 #define FD6_BORDER_COLOR_SIZE sizeof(struct bcolor_entry)
77 #define FD6_BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * FD6_BORDER_COLOR_SIZE)
80 setup_border_colors(struct fd_texture_stateobj
*tex
, struct bcolor_entry
*entries
)
83 STATIC_ASSERT(sizeof(struct bcolor_entry
) == FD6_BORDER_COLOR_SIZE
);
85 for (i
= 0; i
< tex
->num_samplers
; i
++) {
86 struct bcolor_entry
*e
= &entries
[i
];
87 struct pipe_sampler_state
*sampler
= tex
->samplers
[i
];
88 union pipe_color_union
*bc
;
93 bc
= &sampler
->border_color
;
98 * The border colors need to be swizzled in a particular
99 * format-dependent order. Even though samplers don't know about
100 * formats, we can assume that with a GL state tracker, there's a
101 * 1:1 correspondence between sampler and texture. Take advantage
104 if ((i
>= tex
->num_textures
) || !tex
->textures
[i
])
107 struct pipe_sampler_view
*view
= tex
->textures
[i
];
108 enum pipe_format format
= view
->format
;
109 const struct util_format_description
*desc
=
110 util_format_description(format
);
118 unsigned char swiz
[4];
120 fd6_tex_swiz(format
, swiz
,
121 view
->swizzle_r
, view
->swizzle_g
,
122 view
->swizzle_b
, view
->swizzle_a
);
124 for (j
= 0; j
< 4; j
++) {
129 * HACK: for PIPE_FORMAT_X24S8_UINT we end up w/ the
130 * stencil border color value in bc->ui[0] but according
131 * to desc->swizzle and desc->channel, the .x/.w component
132 * is NONE and the stencil value is in the y component.
133 * Meanwhile the hardware wants this in the .w component
134 * for x24s8 and the .x component for x32_s8x24.
136 if ((format
== PIPE_FORMAT_X24S8_UINT
) ||
137 (format
== PIPE_FORMAT_X32_S8X24_UINT
)) {
140 cd
= (format
== PIPE_FORMAT_X32_S8X24_UINT
) ? 0 : 3;
149 if (desc
->channel
[c
].pure_integer
) {
151 switch (desc
->channel
[c
].size
) {
153 assert(desc
->channel
[c
].type
== UTIL_FORMAT_TYPE_UNSIGNED
);
154 clamped
= CLAMP(bc
->ui
[j
], 0, 0x3);
157 if (desc
->channel
[c
].type
== UTIL_FORMAT_TYPE_SIGNED
)
158 clamped
= CLAMP(bc
->i
[j
], -128, 127);
160 clamped
= CLAMP(bc
->ui
[j
], 0, 255);
163 assert(desc
->channel
[c
].type
== UTIL_FORMAT_TYPE_UNSIGNED
);
164 clamped
= CLAMP(bc
->ui
[j
], 0, 0x3ff);
167 if (desc
->channel
[c
].type
== UTIL_FORMAT_TYPE_SIGNED
)
168 clamped
= CLAMP(bc
->i
[j
], -32768, 32767);
170 clamped
= CLAMP(bc
->ui
[j
], 0, 65535);
173 assert(!"Unexpected bit size");
178 e
->fp32
[cd
] = bc
->ui
[j
];
179 e
->fp16
[cd
] = clamped
;
182 float f_u
= CLAMP(f
, 0, 1);
183 float f_s
= CLAMP(f
, -1, 1);
186 e
->fp16
[c
] = util_float_to_half(f
);
187 e
->srgb
[c
] = util_float_to_half(f_u
);
188 e
->ui16
[c
] = f_u
* 0xffff;
189 e
->si16
[c
] = f_s
* 0x7fff;
190 e
->ui8
[c
] = f_u
* 0xff;
191 e
->si8
[c
] = f_s
* 0x7f;
193 e
->rgb565
|= (int)(f_u
* 0x3f) << 5;
195 e
->rgb565
|= (int)(f_u
* 0x1f) << (c
? 11 : 0);
197 e
->rgb5a1
|= (f_u
> 0.5) ? 0x8000 : 0;
199 e
->rgb5a1
|= (int)(f_u
* 0x1f) << (c
* 5);
201 e
->rgb10a2
|= (int)(f_u
* 0x3) << 30;
203 e
->rgb10a2
|= (int)(f_u
* 0x3ff) << (c
* 10);
204 e
->rgba4
|= (int)(f_u
* 0xf) << (c
* 4);
206 e
->z24
= f_u
* 0xffffff;
211 memset(&e
->__pad0
, 0, sizeof(e
->__pad0
));
212 memset(&e
->__pad1
, 0, sizeof(e
->__pad1
));
218 emit_border_color(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
)
220 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
221 struct bcolor_entry
*entries
;
225 STATIC_ASSERT(sizeof(struct bcolor_entry
) == FD6_BORDER_COLOR_SIZE
);
227 u_upload_alloc(fd6_ctx
->border_color_uploader
,
228 0, FD6_BORDER_COLOR_UPLOAD_SIZE
,
229 FD6_BORDER_COLOR_UPLOAD_SIZE
, &off
,
230 &fd6_ctx
->border_color_buf
,
235 setup_border_colors(&ctx
->tex
[PIPE_SHADER_VERTEX
], &entries
[0]);
236 setup_border_colors(&ctx
->tex
[PIPE_SHADER_FRAGMENT
],
237 &entries
[ctx
->tex
[PIPE_SHADER_VERTEX
].num_samplers
]);
239 OUT_PKT4(ring
, REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO
, 2);
240 OUT_RELOC(ring
, fd_resource(fd6_ctx
->border_color_buf
)->bo
, off
, 0, 0);
242 u_upload_unmap(fd6_ctx
->border_color_uploader
);
246 fd6_emit_fb_tex(struct fd_ringbuffer
*state
, struct fd_context
*ctx
)
248 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
249 struct pipe_surface
*psurf
= pfb
->cbufs
[0];
250 struct fd_resource
*rsc
= fd_resource(psurf
->texture
);
252 uint32_t texconst0
= fd6_tex_const_0(psurf
->texture
, psurf
->u
.tex
.level
,
253 psurf
->format
, PIPE_SWIZZLE_X
, PIPE_SWIZZLE_Y
,
254 PIPE_SWIZZLE_Z
, PIPE_SWIZZLE_W
);
256 /* always TILE6_2 mode in GMEM.. which also means no swap: */
257 texconst0
&= ~(A6XX_TEX_CONST_0_SWAP__MASK
| A6XX_TEX_CONST_0_TILE_MODE__MASK
);
258 texconst0
|= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2
);
260 OUT_RING(state
, texconst0
);
261 OUT_RING(state
, A6XX_TEX_CONST_1_WIDTH(pfb
->width
) |
262 A6XX_TEX_CONST_1_HEIGHT(pfb
->height
));
263 OUT_RINGP(state
, A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D
),
264 &ctx
->batch
->fb_read_patches
);
265 OUT_RING(state
, A6XX_TEX_CONST_3_ARRAY_PITCH(rsc
->layout
.layer_size
));
267 OUT_RING(state
, A6XX_TEX_CONST_4_BASE_LO(ctx
->screen
->gmem_base
));
268 OUT_RING(state
, A6XX_TEX_CONST_5_BASE_HI(ctx
->screen
->gmem_base
>> 32) |
269 A6XX_TEX_CONST_5_DEPTH(1));
270 OUT_RING(state
, 0); /* texconst6 */
271 OUT_RING(state
, 0); /* texconst7 */
272 OUT_RING(state
, 0); /* texconst8 */
273 OUT_RING(state
, 0); /* texconst9 */
274 OUT_RING(state
, 0); /* texconst10 */
275 OUT_RING(state
, 0); /* texconst11 */
283 fd6_emit_textures(struct fd_pipe
*pipe
, struct fd_ringbuffer
*ring
,
284 enum pipe_shader_type type
, struct fd_texture_stateobj
*tex
,
285 unsigned bcolor_offset
,
286 /* can be NULL if no image/SSBO/fb state to merge in: */
287 const struct ir3_shader_variant
*v
, struct fd_context
*ctx
)
289 bool needs_border
= false;
290 unsigned opcode
, tex_samp_reg
, tex_const_reg
, tex_count_reg
;
291 enum a6xx_state_block sb
;
294 case PIPE_SHADER_VERTEX
:
296 opcode
= CP_LOAD_STATE6_GEOM
;
297 tex_samp_reg
= REG_A6XX_SP_VS_TEX_SAMP_LO
;
298 tex_const_reg
= REG_A6XX_SP_VS_TEX_CONST_LO
;
299 tex_count_reg
= REG_A6XX_SP_VS_TEX_COUNT
;
301 case PIPE_SHADER_TESS_CTRL
:
303 opcode
= CP_LOAD_STATE6_GEOM
;
304 tex_samp_reg
= REG_A6XX_SP_HS_TEX_SAMP_LO
;
305 tex_const_reg
= REG_A6XX_SP_HS_TEX_CONST_LO
;
306 tex_count_reg
= REG_A6XX_SP_HS_TEX_COUNT
;
308 case PIPE_SHADER_TESS_EVAL
:
310 opcode
= CP_LOAD_STATE6_GEOM
;
311 tex_samp_reg
= REG_A6XX_SP_DS_TEX_SAMP_LO
;
312 tex_const_reg
= REG_A6XX_SP_DS_TEX_CONST_LO
;
313 tex_count_reg
= REG_A6XX_SP_DS_TEX_COUNT
;
315 case PIPE_SHADER_GEOMETRY
:
317 opcode
= CP_LOAD_STATE6_GEOM
;
318 tex_samp_reg
= REG_A6XX_SP_GS_TEX_SAMP_LO
;
319 tex_const_reg
= REG_A6XX_SP_GS_TEX_CONST_LO
;
320 tex_count_reg
= REG_A6XX_SP_GS_TEX_COUNT
;
322 case PIPE_SHADER_FRAGMENT
:
324 opcode
= CP_LOAD_STATE6_FRAG
;
325 tex_samp_reg
= REG_A6XX_SP_FS_TEX_SAMP_LO
;
326 tex_const_reg
= REG_A6XX_SP_FS_TEX_CONST_LO
;
327 tex_count_reg
= REG_A6XX_SP_FS_TEX_COUNT
;
329 case PIPE_SHADER_COMPUTE
:
331 opcode
= CP_LOAD_STATE6_FRAG
;
332 tex_samp_reg
= REG_A6XX_SP_CS_TEX_SAMP_LO
;
333 tex_const_reg
= REG_A6XX_SP_CS_TEX_CONST_LO
;
334 tex_count_reg
= REG_A6XX_SP_CS_TEX_COUNT
;
337 unreachable("bad state block");
340 if (tex
->num_samplers
> 0) {
341 struct fd_ringbuffer
*state
=
342 fd_ringbuffer_new_object(pipe
, tex
->num_samplers
* 4 * 4);
343 for (unsigned i
= 0; i
< tex
->num_samplers
; i
++) {
344 static const struct fd6_sampler_stateobj dummy_sampler
= {};
345 const struct fd6_sampler_stateobj
*sampler
= tex
->samplers
[i
] ?
346 fd6_sampler_stateobj(tex
->samplers
[i
]) : &dummy_sampler
;
347 OUT_RING(state
, sampler
->texsamp0
);
348 OUT_RING(state
, sampler
->texsamp1
);
349 OUT_RING(state
, sampler
->texsamp2
|
350 A6XX_TEX_SAMP_2_BCOLOR(i
+ bcolor_offset
));
351 OUT_RING(state
, sampler
->texsamp3
);
352 needs_border
|= sampler
->needs_border
;
355 /* output sampler state: */
356 OUT_PKT7(ring
, opcode
, 3);
357 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
358 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
359 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
360 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
361 CP_LOAD_STATE6_0_NUM_UNIT(tex
->num_samplers
));
362 OUT_RB(ring
, state
); /* SRC_ADDR_LO/HI */
364 OUT_PKT4(ring
, tex_samp_reg
, 2);
365 OUT_RB(ring
, state
); /* SRC_ADDR_LO/HI */
367 fd_ringbuffer_del(state
);
370 unsigned num_merged_textures
= tex
->num_textures
;
371 unsigned num_textures
= tex
->num_textures
;
373 num_merged_textures
+= v
->image_mapping
.num_tex
;
376 num_merged_textures
++;
378 /* There could be more bound textures than what the shader uses.
379 * Which isn't known at shader compile time. So in the case we
380 * are merging tex state, only emit the textures that the shader
381 * uses (since the image/SSBO related tex state comes immediately
384 num_textures
= v
->image_mapping
.tex_base
;
387 if (num_merged_textures
> 0) {
388 struct fd_ringbuffer
*state
=
389 fd_ringbuffer_new_object(pipe
, num_merged_textures
* 16 * 4);
390 for (unsigned i
= 0; i
< num_textures
; i
++) {
391 static const struct fd6_pipe_sampler_view dummy_view
= {};
392 const struct fd6_pipe_sampler_view
*view
= tex
->textures
[i
] ?
393 fd6_pipe_sampler_view(tex
->textures
[i
]) : &dummy_view
;
394 struct fd_resource
*rsc
= NULL
;
396 if (view
->base
.texture
)
397 rsc
= fd_resource(view
->base
.texture
);
399 OUT_RING(state
, view
->texconst0
);
400 OUT_RING(state
, view
->texconst1
);
401 OUT_RING(state
, view
->texconst2
);
402 OUT_RING(state
, view
->texconst3
);
405 if (view
->base
.format
== PIPE_FORMAT_X32_S8X24_UINT
)
407 OUT_RELOC(state
, rsc
->bo
, view
->offset
,
408 (uint64_t)view
->texconst5
<< 32, 0);
410 OUT_RING(state
, 0x00000000);
411 OUT_RING(state
, view
->texconst5
);
414 OUT_RING(state
, view
->texconst6
);
416 if (rsc
&& view
->ubwc_enabled
) {
417 OUT_RELOC(state
, rsc
->bo
, view
->ubwc_offset
, 0, 0);
423 OUT_RING(state
, view
->texconst9
);
424 OUT_RING(state
, view
->texconst10
);
425 OUT_RING(state
, view
->texconst11
);
433 const struct ir3_ibo_mapping
*mapping
= &v
->image_mapping
;
434 struct fd_shaderbuf_stateobj
*buf
= &ctx
->shaderbuf
[type
];
435 struct fd_shaderimg_stateobj
*img
= &ctx
->shaderimg
[type
];
437 for (unsigned i
= 0; i
< mapping
->num_tex
; i
++) {
438 unsigned idx
= mapping
->tex_to_image
[i
];
439 if (idx
& IBO_SSBO
) {
440 fd6_emit_ssbo_tex(state
, &buf
->sb
[idx
& ~IBO_SSBO
]);
442 fd6_emit_image_tex(state
, &img
->si
[idx
]);
447 fd6_emit_fb_tex(state
, ctx
);
451 /* emit texture state: */
452 OUT_PKT7(ring
, opcode
, 3);
453 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
454 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS
) |
455 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
456 CP_LOAD_STATE6_0_STATE_BLOCK(sb
) |
457 CP_LOAD_STATE6_0_NUM_UNIT(num_merged_textures
));
458 OUT_RB(ring
, state
); /* SRC_ADDR_LO/HI */
460 OUT_PKT4(ring
, tex_const_reg
, 2);
461 OUT_RB(ring
, state
); /* SRC_ADDR_LO/HI */
463 fd_ringbuffer_del(state
);
466 OUT_PKT4(ring
, tex_count_reg
, 1);
467 OUT_RING(ring
, num_merged_textures
);
472 /* Emits combined texture state, which also includes any Image/SSBO
473 * related texture state merged in (because we must have all texture
474 * state for a given stage in a single buffer). In the fast-path, if
475 * we don't need to merge in any image/ssbo related texture state, we
476 * just use cached texture stateobj. Otherwise we generate a single-
479 * TODO Is there some sane way we can still use cached texture stateobj
480 * with image/ssbo in use?
482 * returns whether border_color is required:
485 fd6_emit_combined_textures(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
,
486 enum pipe_shader_type type
, const struct ir3_shader_variant
*v
)
488 struct fd_context
*ctx
= emit
->ctx
;
489 bool needs_border
= false;
491 static const struct {
492 enum fd6_state_id state_id
;
493 unsigned enable_mask
;
494 } s
[PIPE_SHADER_TYPES
] = {
495 [PIPE_SHADER_VERTEX
] = { FD6_GROUP_VS_TEX
, ENABLE_ALL
},
496 [PIPE_SHADER_TESS_CTRL
] = { FD6_GROUP_HS_TEX
, ENABLE_ALL
},
497 [PIPE_SHADER_TESS_EVAL
] = { FD6_GROUP_DS_TEX
, ENABLE_ALL
},
498 [PIPE_SHADER_GEOMETRY
] = { FD6_GROUP_GS_TEX
, ENABLE_ALL
},
499 [PIPE_SHADER_FRAGMENT
] = { FD6_GROUP_FS_TEX
, ENABLE_DRAW
},
502 debug_assert(s
[type
].state_id
);
504 if (!v
->image_mapping
.num_tex
&& !v
->fb_read
) {
505 /* in the fast-path, when we don't have to mix in any image/SSBO
506 * related texture state, we can just lookup the stateobj and
509 * Also, framebuffer-read is a slow-path because an extra
510 * texture needs to be inserted.
512 * TODO we can probably simmplify things if we also treated
513 * border_color as a slow-path.. this way the tex state key
514 * wouldn't depend on bcolor_offset.. but fb_read might rather
515 * be *somehow* a fast-path if we eventually used it for PLS.
516 * I suppose there would be no harm in just *always* inserting
517 * an fb_read texture?
519 if ((ctx
->dirty_shader
[type
] & FD_DIRTY_SHADER_TEX
) &&
520 ctx
->tex
[type
].num_textures
> 0) {
521 struct fd6_texture_state
*tex
= fd6_texture_state(ctx
,
522 type
, &ctx
->tex
[type
]);
524 needs_border
|= tex
->needs_border
;
526 fd6_emit_add_group(emit
, tex
->stateobj
, s
[type
].state_id
,
527 s
[type
].enable_mask
);
530 /* In the slow-path, create a one-shot texture state object
531 * if either TEX|PROG|SSBO|IMAGE state is dirty:
533 if ((ctx
->dirty_shader
[type
] &
534 (FD_DIRTY_SHADER_TEX
| FD_DIRTY_SHADER_PROG
|
535 FD_DIRTY_SHADER_IMAGE
| FD_DIRTY_SHADER_SSBO
)) ||
537 struct fd_texture_stateobj
*tex
= &ctx
->tex
[type
];
538 struct fd_ringbuffer
*stateobj
=
539 fd_submit_new_ringbuffer(ctx
->batch
->submit
,
540 0x1000, FD_RINGBUFFER_STREAMING
);
541 unsigned bcolor_offset
=
542 fd6_border_color_offset(ctx
, type
, tex
);
544 needs_border
|= fd6_emit_textures(ctx
->pipe
, stateobj
, type
, tex
,
545 bcolor_offset
, v
, ctx
);
547 fd6_emit_take_group(emit
, stateobj
, s
[type
].state_id
,
548 s
[type
].enable_mask
);
555 static struct fd_ringbuffer
*
556 build_vbo_state(struct fd6_emit
*emit
)
558 const struct fd_vertex_state
*vtx
= emit
->vtx
;
560 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(emit
->ctx
->batch
->submit
,
561 4 * (1 + vtx
->vertexbuf
.count
* 4), FD_RINGBUFFER_STREAMING
);
563 OUT_PKT4(ring
, REG_A6XX_VFD_FETCH(0), 4 * vtx
->vertexbuf
.count
);
564 for (int32_t j
= 0; j
< vtx
->vertexbuf
.count
; j
++) {
565 const struct pipe_vertex_buffer
*vb
= &vtx
->vertexbuf
.vb
[j
];
566 struct fd_resource
*rsc
= fd_resource(vb
->buffer
.resource
);
573 uint32_t off
= vb
->buffer_offset
;
574 uint32_t size
= fd_bo_size(rsc
->bo
) - off
;
576 OUT_RELOC(ring
, rsc
->bo
, off
, 0, 0);
577 OUT_RING(ring
, size
); /* VFD_FETCH[j].SIZE */
578 OUT_RING(ring
, vb
->stride
); /* VFD_FETCH[j].STRIDE */
585 static enum a6xx_ztest_mode
586 compute_ztest_mode(struct fd6_emit
*emit
, bool lrz_valid
)
588 struct fd_context
*ctx
= emit
->ctx
;
589 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
590 struct fd6_zsa_stateobj
*zsa
= fd6_zsa_stateobj(ctx
->zsa
);
591 const struct ir3_shader_variant
*fs
= emit
->fs
;
593 if (fs
->shader
->nir
->info
.fs
.early_fragment_tests
)
596 if (fs
->no_earlyz
|| fs
->writes_pos
|| !zsa
->base
.depth
.enabled
) {
598 } else if ((fs
->has_kill
|| zsa
->alpha_test
) &&
599 (zsa
->base
.depth
.writemask
|| !pfb
->zsbuf
)) {
600 /* Slightly odd, but seems like the hw wants us to select
601 * LATE_Z mode if there is no depth buffer + discard. Either
602 * that, or when occlusion query is enabled. See:
604 * dEQP-GLES31.functional.fbo.no_attachments.*
606 return lrz_valid
? A6XX_EARLY_LRZ_LATE_Z
: A6XX_LATE_Z
;
613 * Calculate normalized LRZ state based on zsa/prog/blend state, updating
614 * the zsbuf's lrz state as necessary to detect the cases where we need
617 static struct fd6_lrz_state
618 compute_lrz_state(struct fd6_emit
*emit
, bool binning_pass
)
620 struct fd_context
*ctx
= emit
->ctx
;
621 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
622 const struct ir3_shader_variant
*fs
= emit
->fs
;
623 struct fd6_lrz_state lrz
;
626 memset(&lrz
, 0, sizeof(lrz
));
628 lrz
.z_mode
= compute_ztest_mode(emit
, false);
633 struct fd6_blend_stateobj
*blend
= fd6_blend_stateobj(ctx
->blend
);
634 struct fd6_zsa_stateobj
*zsa
= fd6_zsa_stateobj(ctx
->zsa
);
635 struct fd_resource
*rsc
= fd_resource(pfb
->zsbuf
->texture
);
639 /* normalize lrz state: */
640 if (blend
->reads_dest
|| fs
->writes_pos
|| fs
->no_earlyz
|| fs
->has_kill
) {
646 /* if we change depthfunc direction, bail out on using LRZ. The
647 * LRZ buffer encodes a min/max depth value per block, but if
648 * we switch from GT/GE <-> LT/LE, those values cannot be
649 * interpreted properly.
651 if (zsa
->base
.depth
.enabled
&&
652 (rsc
->lrz_direction
!= FD_LRZ_UNKNOWN
) &&
653 (rsc
->lrz_direction
!= lrz
.direction
)) {
654 rsc
->lrz_valid
= false;
657 if (zsa
->invalidate_lrz
|| !rsc
->lrz_valid
) {
658 rsc
->lrz_valid
= false;
659 memset(&lrz
, 0, sizeof(lrz
));
662 if (fs
->no_earlyz
|| fs
->writes_pos
) {
669 lrz
.z_mode
= compute_ztest_mode(emit
, rsc
->lrz_valid
);
672 /* Once we start writing to the real depth buffer, we lock in the
673 * direction for LRZ.. if we have to skip a LRZ write for any
674 * reason, it is still safe to have LRZ until there is a direction
675 * reversal. Prior to the reversal, since we disabled LRZ writes
676 * in the "unsafe" cases, this just means that the LRZ test may
677 * not early-discard some things that end up not passing a later
678 * test (ie. be overly concervative). But once you have a reversal
679 * of direction, it is possible to increase/decrease the z value
680 * to the point where the overly-conservative test is incorrect.
682 if (zsa
->base
.depth
.writemask
) {
683 rsc
->lrz_direction
= lrz
.direction
;
689 static struct fd_ringbuffer
*
690 build_lrz(struct fd6_emit
*emit
, bool binning_pass
)
692 struct fd_context
*ctx
= emit
->ctx
;
693 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
694 struct fd6_lrz_state lrz
=
695 compute_lrz_state(emit
, binning_pass
);
697 /* If the LRZ state has not changed, we can skip the emit: */
698 if (!ctx
->last
.dirty
&&
699 !memcmp(&fd6_ctx
->last
.lrz
[binning_pass
], &lrz
, sizeof(lrz
)))
702 fd6_ctx
->last
.lrz
[binning_pass
] = lrz
;
704 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(ctx
->batch
->submit
,
705 8*4, FD_RINGBUFFER_STREAMING
);
707 OUT_REG(ring
, A6XX_GRAS_LRZ_CNTL(
708 .enable
= lrz
.enable
,
709 .lrz_write
= lrz
.write
,
710 .greater
= lrz
.direction
== FD_LRZ_GREATER
,
711 .z_test_enable
= lrz
.test
,
713 OUT_REG(ring
, A6XX_RB_LRZ_CNTL(
714 .enable
= lrz
.enable
,
717 OUT_REG(ring
, A6XX_RB_DEPTH_PLANE_CNTL(
718 .z_mode
= lrz
.z_mode
,
721 OUT_REG(ring
, A6XX_GRAS_SU_DEPTH_PLANE_CNTL(
722 .z_mode
= lrz
.z_mode
,
729 fd6_emit_streamout(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
, struct ir3_stream_output_info
*info
)
731 struct fd_context
*ctx
= emit
->ctx
;
732 const struct fd6_program_state
*prog
= fd6_emit_get_prog(emit
);
733 struct fd_streamout_stateobj
*so
= &ctx
->streamout
;
735 emit
->streamout_mask
= 0;
737 for (unsigned i
= 0; i
< so
->num_targets
; i
++) {
738 struct pipe_stream_output_target
*target
= so
->targets
[i
];
743 OUT_PKT4(ring
, REG_A6XX_VPC_SO_BUFFER_BASE_LO(i
), 3);
744 /* VPC_SO[i].BUFFER_BASE_LO: */
745 OUT_RELOC(ring
, fd_resource(target
->buffer
)->bo
, target
->buffer_offset
, 0, 0);
746 OUT_RING(ring
, target
->buffer_size
- target
->buffer_offset
);
748 if (so
->reset
& (1 << i
)) {
749 unsigned offset
= (so
->offsets
[i
] * info
->stride
[i
] * 4);
750 OUT_PKT4(ring
, REG_A6XX_VPC_SO_BUFFER_OFFSET(i
), 1);
751 OUT_RING(ring
, offset
);
753 OUT_PKT7(ring
, CP_MEM_TO_REG
, 3);
754 OUT_RING(ring
, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(i
)) |
755 CP_MEM_TO_REG_0_SHIFT_BY_2
| CP_MEM_TO_REG_0_UNK31
|
756 CP_MEM_TO_REG_0_CNT(0));
757 OUT_RELOC(ring
, control_ptr(fd6_context(ctx
), flush_base
[i
].offset
));
760 OUT_PKT4(ring
, REG_A6XX_VPC_SO_FLUSH_BASE_LO(i
), 2);
761 OUT_RELOC(ring
, control_ptr(fd6_context(ctx
), flush_base
[i
]));
763 so
->reset
&= ~(1 << i
);
765 emit
->streamout_mask
|= (1 << i
);
768 if (emit
->streamout_mask
) {
769 fd6_emit_add_group(emit
, prog
->streamout_stateobj
, FD6_GROUP_SO
, ENABLE_ALL
);
771 /* If we transition from a draw with streamout to one without, turn
774 if (ctx
->last
.streamout_mask
!= 0) {
775 struct fd_ringbuffer
*obj
= fd_submit_new_ringbuffer(emit
->ctx
->batch
->submit
,
776 5 * 4, FD_RINGBUFFER_STREAMING
);
778 OUT_PKT7(obj
, CP_CONTEXT_REG_BUNCH
, 4);
779 OUT_RING(obj
, REG_A6XX_VPC_SO_CNTL
);
781 OUT_RING(obj
, REG_A6XX_VPC_SO_BUF_CNTL
);
784 fd6_emit_take_group(emit
, obj
, FD6_GROUP_SO
, ENABLE_ALL
);
788 ctx
->last
.streamout_mask
= emit
->streamout_mask
;
792 fd6_emit_state(struct fd_ringbuffer
*ring
, struct fd6_emit
*emit
)
794 struct fd_context
*ctx
= emit
->ctx
;
795 struct pipe_framebuffer_state
*pfb
= &ctx
->batch
->framebuffer
;
796 const struct fd6_program_state
*prog
= fd6_emit_get_prog(emit
);
797 const struct ir3_shader_variant
*vs
= emit
->vs
;
798 const struct ir3_shader_variant
*hs
= emit
->hs
;
799 const struct ir3_shader_variant
*ds
= emit
->ds
;
800 const struct ir3_shader_variant
*gs
= emit
->gs
;
801 const struct ir3_shader_variant
*fs
= emit
->fs
;
802 const enum fd_dirty_3d_state dirty
= emit
->dirty
;
803 bool needs_border
= false;
805 emit_marker6(ring
, 5);
807 /* NOTE: we track fb_read differently than _BLEND_ENABLED since
808 * we might at some point decide to do sysmem in some cases when
812 ctx
->batch
->gmem_reason
|= FD_GMEM_FB_READ
;
814 if (emit
->dirty
& FD_DIRTY_VTXSTATE
) {
815 struct fd6_vertex_stateobj
*vtx
= fd6_vertex_stateobj(ctx
->vtx
.vtx
);
817 fd6_emit_add_group(emit
, vtx
->stateobj
, FD6_GROUP_VTXSTATE
, ENABLE_ALL
);
820 if (emit
->dirty
& FD_DIRTY_VTXBUF
) {
821 struct fd_ringbuffer
*state
;
823 state
= build_vbo_state(emit
);
824 fd6_emit_take_group(emit
, state
, FD6_GROUP_VBO
, ENABLE_ALL
);
827 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_RASTERIZER
)) {
828 struct fd_ringbuffer
*state
=
830 util_format_is_pure_integer(pipe_surface_format(pfb
->cbufs
[0])),
831 fd_depth_clamp_enabled(ctx
));
833 fd6_emit_add_group(emit
, state
, FD6_GROUP_ZSA
, ENABLE_ALL
);
836 if (dirty
& (FD_DIRTY_ZSA
| FD_DIRTY_BLEND
| FD_DIRTY_PROG
)) {
837 struct fd_ringbuffer
*state
;
839 state
= build_lrz(emit
, false);
841 fd6_emit_take_group(emit
, state
, FD6_GROUP_LRZ
, ENABLE_DRAW
);
844 state
= build_lrz(emit
, true);
846 fd6_emit_take_group(emit
, state
,
847 FD6_GROUP_LRZ_BINNING
, CP_SET_DRAW_STATE__0_BINNING
);
851 if (dirty
& FD_DIRTY_STENCIL_REF
) {
852 struct pipe_stencil_ref
*sr
= &ctx
->stencil_ref
;
854 OUT_PKT4(ring
, REG_A6XX_RB_STENCILREF
, 1);
855 OUT_RING(ring
, A6XX_RB_STENCILREF_REF(sr
->ref_value
[0]) |
856 A6XX_RB_STENCILREF_BFREF(sr
->ref_value
[1]));
859 /* NOTE: scissor enabled bit is part of rasterizer state, but
860 * fd_rasterizer_state_bind() will mark scissor dirty if needed:
862 if (dirty
& FD_DIRTY_SCISSOR
) {
863 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(
864 emit
->ctx
->batch
->submit
, 3*4, FD_RINGBUFFER_STREAMING
);
865 struct pipe_scissor_state
*scissor
= fd_context_get_scissor(ctx
);
868 A6XX_GRAS_SC_SCREEN_SCISSOR_TL(0,
872 A6XX_GRAS_SC_SCREEN_SCISSOR_BR(0,
873 .x
= MAX2(scissor
->maxx
, 1) - 1,
874 .y
= MAX2(scissor
->maxy
, 1) - 1
878 fd6_emit_take_group(emit
, ring
, FD6_GROUP_SCISSOR
, ENABLE_ALL
);
880 ctx
->batch
->max_scissor
.minx
= MIN2(ctx
->batch
->max_scissor
.minx
, scissor
->minx
);
881 ctx
->batch
->max_scissor
.miny
= MIN2(ctx
->batch
->max_scissor
.miny
, scissor
->miny
);
882 ctx
->batch
->max_scissor
.maxx
= MAX2(ctx
->batch
->max_scissor
.maxx
, scissor
->maxx
);
883 ctx
->batch
->max_scissor
.maxy
= MAX2(ctx
->batch
->max_scissor
.maxy
, scissor
->maxy
);
886 if (dirty
& FD_DIRTY_VIEWPORT
) {
887 struct pipe_scissor_state
*scissor
= &ctx
->viewport_scissor
;
890 A6XX_GRAS_CL_VPORT_XOFFSET(0, ctx
->viewport
.translate
[0]),
891 A6XX_GRAS_CL_VPORT_XSCALE(0, ctx
->viewport
.scale
[0]),
892 A6XX_GRAS_CL_VPORT_YOFFSET(0, ctx
->viewport
.translate
[1]),
893 A6XX_GRAS_CL_VPORT_YSCALE(0, ctx
->viewport
.scale
[1]),
894 A6XX_GRAS_CL_VPORT_ZOFFSET(0, ctx
->viewport
.translate
[2]),
895 A6XX_GRAS_CL_VPORT_ZSCALE(0, ctx
->viewport
.scale
[2])
899 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(0,
903 A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(0,
904 .x
= MAX2(scissor
->maxx
, 1) - 1,
905 .y
= MAX2(scissor
->maxy
, 1) - 1
909 unsigned guardband_x
=
910 fd_calc_guardband(ctx
->viewport
.translate
[0], ctx
->viewport
.scale
[0],
912 unsigned guardband_y
=
913 fd_calc_guardband(ctx
->viewport
.translate
[1], ctx
->viewport
.scale
[1],
916 OUT_REG(ring
, A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ(
923 /* The clamp ranges are only used when the rasterizer wants depth
926 if ((dirty
& (FD_DIRTY_VIEWPORT
| FD_DIRTY_RASTERIZER
)) &&
927 fd_depth_clamp_enabled(ctx
)) {
929 util_viewport_zmin_zmax(&ctx
->viewport
, ctx
->rasterizer
->clip_halfz
,
933 A6XX_GRAS_CL_Z_CLAMP_MIN(0, zmin
),
934 A6XX_GRAS_CL_Z_CLAMP_MAX(0, zmax
));
937 A6XX_RB_Z_CLAMP_MIN(zmin
),
938 A6XX_RB_Z_CLAMP_MAX(zmax
));
941 if (dirty
& FD_DIRTY_PROG
) {
942 fd6_emit_add_group(emit
, prog
->config_stateobj
, FD6_GROUP_PROG_CONFIG
, ENABLE_ALL
);
943 fd6_emit_add_group(emit
, prog
->stateobj
, FD6_GROUP_PROG
, ENABLE_DRAW
);
944 fd6_emit_add_group(emit
, prog
->binning_stateobj
,
945 FD6_GROUP_PROG_BINNING
, CP_SET_DRAW_STATE__0_BINNING
);
947 /* emit remaining streaming program state, ie. what depends on
948 * other emit state, so cannot be pre-baked.
950 struct fd_ringbuffer
*streaming
= fd6_program_interp_state(emit
);
952 fd6_emit_take_group(emit
, streaming
, FD6_GROUP_PROG_INTERP
, ENABLE_DRAW
);
955 if (dirty
& FD_DIRTY_RASTERIZER
) {
956 struct fd_ringbuffer
*stateobj
=
957 fd6_rasterizer_state(ctx
, emit
->primitive_restart
);
958 fd6_emit_add_group(emit
, stateobj
,
959 FD6_GROUP_RASTERIZER
, ENABLE_ALL
);
962 if (dirty
& (FD_DIRTY_FRAMEBUFFER
| FD_DIRTY_RASTERIZER_DISCARD
| FD_DIRTY_PROG
)) {
963 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(
964 emit
->ctx
->batch
->submit
, 5 * 4, FD_RINGBUFFER_STREAMING
);
966 unsigned nr
= pfb
->nr_cbufs
;
968 if (ctx
->rasterizer
->rasterizer_discard
)
971 OUT_PKT4(ring
, REG_A6XX_RB_FS_OUTPUT_CNTL0
, 2);
972 OUT_RING(ring
, COND(fs
->writes_pos
, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z
) |
973 COND(fs
->writes_smask
&& pfb
->samples
> 1,
974 A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK
));
975 OUT_RING(ring
, A6XX_RB_FS_OUTPUT_CNTL1_MRT(nr
));
977 OUT_PKT4(ring
, REG_A6XX_SP_FS_OUTPUT_CNTL1
, 1);
978 OUT_RING(ring
, A6XX_SP_FS_OUTPUT_CNTL1_MRT(nr
));
980 fd6_emit_take_group(emit
, ring
, FD6_GROUP_PROG_FB_RAST
, ENABLE_DRAW
);
983 fd6_emit_consts(emit
);
985 struct ir3_stream_output_info
*info
= &fd6_last_shader(prog
)->shader
->stream_output
;
986 if (info
->num_outputs
)
987 fd6_emit_streamout(ring
, emit
, info
);
989 if (dirty
& (FD_DIRTY_BLEND
| FD_DIRTY_SAMPLE_MASK
)) {
990 struct fd6_blend_variant
*blend
= fd6_blend_variant(ctx
->blend
,
991 pfb
->samples
, ctx
->sample_mask
);
992 fd6_emit_add_group(emit
, blend
->stateobj
, FD6_GROUP_BLEND
, ENABLE_DRAW
);
995 if (dirty
& FD_DIRTY_BLEND_COLOR
) {
996 struct pipe_blend_color
*bcolor
= &ctx
->blend_color
;
997 struct fd_ringbuffer
*ring
= fd_submit_new_ringbuffer(
998 emit
->ctx
->batch
->submit
, 5*4, FD_RINGBUFFER_STREAMING
);
1001 A6XX_RB_BLEND_RED_F32(bcolor
->color
[0]),
1002 A6XX_RB_BLEND_GREEN_F32(bcolor
->color
[1]),
1003 A6XX_RB_BLEND_BLUE_F32(bcolor
->color
[2]),
1004 A6XX_RB_BLEND_ALPHA_F32(bcolor
->color
[3])
1007 fd6_emit_take_group(emit
, ring
, FD6_GROUP_BLEND_COLOR
, ENABLE_DRAW
);
1010 needs_border
|= fd6_emit_combined_textures(ring
, emit
, PIPE_SHADER_VERTEX
, vs
);
1012 needs_border
|= fd6_emit_combined_textures(ring
, emit
, PIPE_SHADER_TESS_CTRL
, hs
);
1013 needs_border
|= fd6_emit_combined_textures(ring
, emit
, PIPE_SHADER_TESS_EVAL
, ds
);
1016 needs_border
|= fd6_emit_combined_textures(ring
, emit
, PIPE_SHADER_GEOMETRY
, gs
);
1018 needs_border
|= fd6_emit_combined_textures(ring
, emit
, PIPE_SHADER_FRAGMENT
, fs
);
1021 emit_border_color(ctx
, ring
);
1024 debug_assert(ir3_shader_nibo(hs
) == 0);
1025 debug_assert(ir3_shader_nibo(ds
) == 0);
1028 debug_assert(ir3_shader_nibo(gs
) == 0);
1031 #define DIRTY_IBO (FD_DIRTY_SHADER_SSBO | FD_DIRTY_SHADER_IMAGE | \
1032 FD_DIRTY_SHADER_PROG)
1033 if (ctx
->dirty_shader
[PIPE_SHADER_FRAGMENT
] & DIRTY_IBO
) {
1034 struct fd_ringbuffer
*state
=
1035 fd6_build_ibo_state(ctx
, fs
, PIPE_SHADER_FRAGMENT
);
1036 struct fd_ringbuffer
*obj
= fd_submit_new_ringbuffer(
1037 ctx
->batch
->submit
, 0x100, FD_RINGBUFFER_STREAMING
);
1039 OUT_PKT7(obj
, CP_LOAD_STATE6
, 3);
1040 OUT_RING(obj
, CP_LOAD_STATE6_0_DST_OFF(0) |
1041 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER
) |
1042 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1043 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_IBO
) |
1044 CP_LOAD_STATE6_0_NUM_UNIT(ir3_shader_nibo(fs
)));
1047 OUT_PKT4(obj
, REG_A6XX_SP_IBO_LO
, 2);
1050 /* TODO if we used CP_SET_DRAW_STATE for compute shaders, we could
1051 * de-duplicate this from program->config_stateobj
1053 OUT_PKT4(obj
, REG_A6XX_SP_IBO_COUNT
, 1);
1054 OUT_RING(obj
, ir3_shader_nibo(fs
));
1056 fd6_emit_ibo_consts(emit
, fs
, PIPE_SHADER_FRAGMENT
, ring
);
1058 fd6_emit_take_group(emit
, obj
, FD6_GROUP_IBO
, ENABLE_DRAW
);
1059 fd_ringbuffer_del(state
);
1062 if (emit
->num_groups
> 0) {
1063 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3 * emit
->num_groups
);
1064 for (unsigned i
= 0; i
< emit
->num_groups
; i
++) {
1065 struct fd6_state_group
*g
= &emit
->groups
[i
];
1066 unsigned n
= g
->stateobj
?
1067 fd_ringbuffer_size(g
->stateobj
) / 4 : 0;
1069 debug_assert((g
->enable_mask
& ~ENABLE_ALL
) == 0);
1072 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
1073 CP_SET_DRAW_STATE__0_DISABLE
|
1075 CP_SET_DRAW_STATE__0_GROUP_ID(g
->group_id
));
1076 OUT_RING(ring
, 0x00000000);
1077 OUT_RING(ring
, 0x00000000);
1079 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(n
) |
1081 CP_SET_DRAW_STATE__0_GROUP_ID(g
->group_id
));
1082 OUT_RB(ring
, g
->stateobj
);
1086 fd_ringbuffer_del(g
->stateobj
);
1088 emit
->num_groups
= 0;
1093 fd6_emit_cs_state(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
1094 struct ir3_shader_variant
*cp
)
1096 enum fd_dirty_shader_state dirty
= ctx
->dirty_shader
[PIPE_SHADER_COMPUTE
];
1098 if (dirty
& (FD_DIRTY_SHADER_TEX
| FD_DIRTY_SHADER_PROG
|
1099 FD_DIRTY_SHADER_IMAGE
| FD_DIRTY_SHADER_SSBO
)) {
1100 struct fd_texture_stateobj
*tex
= &ctx
->tex
[PIPE_SHADER_COMPUTE
];
1101 unsigned bcolor_offset
= fd6_border_color_offset(ctx
, PIPE_SHADER_COMPUTE
, tex
);
1103 bool needs_border
= fd6_emit_textures(ctx
->pipe
, ring
, PIPE_SHADER_COMPUTE
, tex
,
1104 bcolor_offset
, cp
, ctx
);
1107 emit_border_color(ctx
, ring
);
1109 OUT_PKT4(ring
, REG_A6XX_SP_VS_TEX_COUNT
, 1);
1112 OUT_PKT4(ring
, REG_A6XX_SP_HS_TEX_COUNT
, 1);
1115 OUT_PKT4(ring
, REG_A6XX_SP_DS_TEX_COUNT
, 1);
1118 OUT_PKT4(ring
, REG_A6XX_SP_GS_TEX_COUNT
, 1);
1121 OUT_PKT4(ring
, REG_A6XX_SP_FS_TEX_COUNT
, 1);
1125 if (dirty
& (FD_DIRTY_SHADER_SSBO
| FD_DIRTY_SHADER_IMAGE
)) {
1126 struct fd_ringbuffer
*state
=
1127 fd6_build_ibo_state(ctx
, cp
, PIPE_SHADER_COMPUTE
);
1129 OUT_PKT7(ring
, CP_LOAD_STATE6_FRAG
, 3);
1130 OUT_RING(ring
, CP_LOAD_STATE6_0_DST_OFF(0) |
1131 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO
) |
1132 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT
) |
1133 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER
) |
1134 CP_LOAD_STATE6_0_NUM_UNIT(ir3_shader_nibo(cp
)));
1135 OUT_RB(ring
, state
);
1137 OUT_PKT4(ring
, REG_A6XX_SP_CS_IBO_LO
, 2);
1138 OUT_RB(ring
, state
);
1140 OUT_PKT4(ring
, REG_A6XX_SP_CS_IBO_COUNT
, 1);
1141 OUT_RING(ring
, ir3_shader_nibo(cp
));
1143 fd_ringbuffer_del(state
);
1148 /* emit setup at begin of new cmdstream buffer (don't rely on previous
1149 * state, there could have been a context switch between ioctls):
1152 fd6_emit_restore(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
1154 //struct fd_context *ctx = batch->ctx;
1156 fd_log(batch
, "START RESTORE");
1158 fd6_cache_inv(batch
, ring
);
1160 OUT_REG(ring
, A6XX_HLSQ_INVALIDATE_CMD(
1169 .gfx_shared_const
= true,
1170 .cs_shared_const
= true,
1171 .gfx_bindless
= 0x1f,
1177 WRITE(REG_A6XX_RB_UNKNOWN_8E04
, 0x0);
1178 WRITE(REG_A6XX_SP_UNKNOWN_AE04
, 0x8);
1179 WRITE(REG_A6XX_SP_UNKNOWN_AE00
, 0);
1180 WRITE(REG_A6XX_SP_UNKNOWN_AE0F
, 0x3f);
1181 WRITE(REG_A6XX_SP_UNKNOWN_B605
, 0x44);
1182 WRITE(REG_A6XX_SP_UNKNOWN_B600
, 0x100000);
1183 WRITE(REG_A6XX_HLSQ_UNKNOWN_BE00
, 0x80);
1184 WRITE(REG_A6XX_HLSQ_UNKNOWN_BE01
, 0);
1186 WRITE(REG_A6XX_VPC_UNKNOWN_9600
, 0);
1187 WRITE(REG_A6XX_GRAS_UNKNOWN_8600
, 0x880);
1188 WRITE(REG_A6XX_HLSQ_UNKNOWN_BE04
, 0x80000);
1189 WRITE(REG_A6XX_SP_UNKNOWN_AE03
, 0x1430);
1190 WRITE(REG_A6XX_SP_IBO_COUNT
, 0);
1191 WRITE(REG_A6XX_SP_UNKNOWN_B182
, 0);
1192 WRITE(REG_A6XX_HLSQ_SHARED_CONSTS
, 0);
1193 WRITE(REG_A6XX_UCHE_UNKNOWN_0E12
, 0x3200000);
1194 WRITE(REG_A6XX_UCHE_CLIENT_PF
, 4);
1195 WRITE(REG_A6XX_RB_UNKNOWN_8E01
, 0x1);
1196 WRITE(REG_A6XX_SP_MODE_CONTROL
, A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE
| 4);
1197 WRITE(REG_A6XX_VFD_ADD_OFFSET
, A6XX_VFD_ADD_OFFSET_VERTEX
);
1198 WRITE(REG_A6XX_RB_UNKNOWN_8811
, 0x00000010);
1199 WRITE(REG_A6XX_PC_MODE_CNTL
, 0x1f);
1201 WRITE(REG_A6XX_GRAS_UNKNOWN_8101
, 0);
1202 WRITE(REG_A6XX_GRAS_SAMPLE_CNTL
, 0);
1203 WRITE(REG_A6XX_GRAS_UNKNOWN_8110
, 0x2);
1205 WRITE(REG_A6XX_RB_UNKNOWN_8818
, 0);
1206 WRITE(REG_A6XX_RB_UNKNOWN_8819
, 0);
1207 WRITE(REG_A6XX_RB_UNKNOWN_881A
, 0);
1208 WRITE(REG_A6XX_RB_UNKNOWN_881B
, 0);
1209 WRITE(REG_A6XX_RB_UNKNOWN_881C
, 0);
1210 WRITE(REG_A6XX_RB_UNKNOWN_881D
, 0);
1211 WRITE(REG_A6XX_RB_UNKNOWN_881E
, 0);
1212 WRITE(REG_A6XX_RB_UNKNOWN_88F0
, 0);
1214 WRITE(REG_A6XX_VPC_POINT_COORD_INVERT
,
1215 A6XX_VPC_POINT_COORD_INVERT(0).value
);
1216 WRITE(REG_A6XX_VPC_UNKNOWN_9300
, 0);
1218 WRITE(REG_A6XX_VPC_SO_DISABLE
, A6XX_VPC_SO_DISABLE(true).value
);
1220 WRITE(REG_A6XX_PC_UNKNOWN_9980
, 0);
1222 WRITE(REG_A6XX_PC_MULTIVIEW_CNTL
, 0);
1224 WRITE(REG_A6XX_SP_UNKNOWN_A81B
, 0);
1226 WRITE(REG_A6XX_SP_UNKNOWN_B183
, 0);
1228 WRITE(REG_A6XX_GRAS_UNKNOWN_8099
, 0);
1229 WRITE(REG_A6XX_GRAS_VS_LAYER_CNTL
, 0);
1230 WRITE(REG_A6XX_GRAS_UNKNOWN_80A0
, 2);
1231 WRITE(REG_A6XX_GRAS_UNKNOWN_80AF
, 0);
1232 WRITE(REG_A6XX_VPC_UNKNOWN_9210
, 0);
1233 WRITE(REG_A6XX_VPC_UNKNOWN_9211
, 0);
1234 WRITE(REG_A6XX_VPC_UNKNOWN_9602
, 0);
1235 WRITE(REG_A6XX_PC_UNKNOWN_9E72
, 0);
1236 WRITE(REG_A6XX_SP_TP_SAMPLE_CONFIG
, 0);
1237 /* NOTE blob seems to (mostly?) use 0xb2 for SP_TP_UNKNOWN_B309
1238 * but this seems to kill texture gather offsets.
1240 WRITE(REG_A6XX_SP_TP_UNKNOWN_B309
, 0xa2);
1241 WRITE(REG_A6XX_RB_SAMPLE_CONFIG
, 0);
1242 WRITE(REG_A6XX_GRAS_SAMPLE_CONFIG
, 0);
1243 WRITE(REG_A6XX_RB_Z_BOUNDS_MIN
, 0);
1244 WRITE(REG_A6XX_RB_Z_BOUNDS_MAX
, 0);
1245 WRITE(REG_A6XX_HLSQ_CONTROL_5_REG
, 0xfc);
1247 emit_marker6(ring
, 7);
1249 OUT_PKT4(ring
, REG_A6XX_VFD_MODE_CNTL
, 1);
1250 OUT_RING(ring
, 0x00000000); /* VFD_MODE_CNTL */
1252 WRITE(REG_A6XX_VFD_MULTIVIEW_CNTL
, 0);
1254 OUT_PKT4(ring
, REG_A6XX_PC_MODE_CNTL
, 1);
1255 OUT_RING(ring
, 0x0000001f); /* PC_MODE_CNTL */
1257 /* we don't use this yet.. probably best to disable.. */
1258 OUT_PKT7(ring
, CP_SET_DRAW_STATE
, 3);
1259 OUT_RING(ring
, CP_SET_DRAW_STATE__0_COUNT(0) |
1260 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS
|
1261 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1262 OUT_RING(ring
, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1263 OUT_RING(ring
, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1265 OUT_PKT4(ring
, REG_A6XX_VPC_SO_BUF_CNTL
, 1);
1266 OUT_RING(ring
, 0x00000000); /* VPC_SO_BUF_CNTL */
1268 OUT_PKT4(ring
, REG_A6XX_GRAS_LRZ_CNTL
, 1);
1269 OUT_RING(ring
, 0x00000000);
1271 OUT_PKT4(ring
, REG_A6XX_RB_LRZ_CNTL
, 1);
1272 OUT_RING(ring
, 0x00000000);
1274 fd_log(batch
, "END RESTORE");
1278 fd6_mem_to_mem(struct fd_ringbuffer
*ring
, struct pipe_resource
*dst
,
1279 unsigned dst_off
, struct pipe_resource
*src
, unsigned src_off
,
1280 unsigned sizedwords
)
1282 struct fd_bo
*src_bo
= fd_resource(src
)->bo
;
1283 struct fd_bo
*dst_bo
= fd_resource(dst
)->bo
;
1286 for (i
= 0; i
< sizedwords
; i
++) {
1287 OUT_PKT7(ring
, CP_MEM_TO_MEM
, 5);
1288 OUT_RING(ring
, 0x00000000);
1289 OUT_RELOC(ring
, dst_bo
, dst_off
, 0, 0);
1290 OUT_RELOC(ring
, src_bo
, src_off
, 0, 0);
1297 /* this is *almost* the same as fd6_cache_flush().. which I guess
1298 * could be re-worked to be something a bit more generic w/ param
1299 * indicating what needs to be flushed.. although that would mean
1300 * figuring out which events trigger what state to flush..
1303 fd6_framebuffer_barrier(struct fd_context
*ctx
)
1305 struct fd6_context
*fd6_ctx
= fd6_context(ctx
);
1306 struct fd_batch
*batch
= ctx
->batch
;
1307 struct fd_ringbuffer
*ring
= batch
->draw
;
1310 seqno
= fd6_event_write(batch
, ring
, RB_DONE_TS
, true);
1312 OUT_PKT7(ring
, CP_WAIT_REG_MEM
, 6);
1313 OUT_RING(ring
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
1314 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
1315 OUT_RELOC(ring
, control_ptr(fd6_ctx
, seqno
));
1316 OUT_RING(ring
, CP_WAIT_REG_MEM_3_REF(seqno
));
1317 OUT_RING(ring
, CP_WAIT_REG_MEM_4_MASK(~0));
1318 OUT_RING(ring
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
1320 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_COLOR_TS
, true);
1321 fd6_event_write(batch
, ring
, PC_CCU_FLUSH_DEPTH_TS
, true);
1323 seqno
= fd6_event_write(batch
, ring
, CACHE_FLUSH_TS
, true);
1325 fd6_event_write(batch
, ring
, 0x31, false);
1327 OUT_PKT7(ring
, CP_WAIT_MEM_GTE
, 4);
1328 OUT_RING(ring
, CP_WAIT_MEM_GTE_0_RESERVED(0));
1329 OUT_RELOC(ring
, control_ptr(fd6_ctx
, seqno
));
1330 OUT_RING(ring
, CP_WAIT_MEM_GTE_3_REF(seqno
));
1334 fd6_emit_init_screen(struct pipe_screen
*pscreen
)
1336 struct fd_screen
*screen
= fd_screen(pscreen
);
1337 screen
->emit_ib
= fd6_emit_ib
;
1338 screen
->mem_to_mem
= fd6_mem_to_mem
;
1342 fd6_emit_init(struct pipe_context
*pctx
)
1344 struct fd_context
*ctx
= fd_context(pctx
);
1345 ctx
->framebuffer_barrier
= fd6_framebuffer_barrier
;