2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/u_format.h"
25 #include "util/u_half.h"
26 #include "v3d_context.h"
27 #include "broadcom/common/v3d_macros.h"
28 #include "broadcom/cle/v3dx_pack.h"
29 #include "broadcom/compiler/v3d_compiler.h"
32 v3d_factor(enum pipe_blendfactor factor
, bool dst_alpha_one
)
34 /* We may get a bad blendfactor when blending is disabled. */
36 return V3D_BLEND_FACTOR_ZERO
;
39 case PIPE_BLENDFACTOR_ZERO
:
40 return V3D_BLEND_FACTOR_ZERO
;
41 case PIPE_BLENDFACTOR_ONE
:
42 return V3D_BLEND_FACTOR_ONE
;
43 case PIPE_BLENDFACTOR_SRC_COLOR
:
44 return V3D_BLEND_FACTOR_SRC_COLOR
;
45 case PIPE_BLENDFACTOR_INV_SRC_COLOR
:
46 return V3D_BLEND_FACTOR_INV_SRC_COLOR
;
47 case PIPE_BLENDFACTOR_DST_COLOR
:
48 return V3D_BLEND_FACTOR_DST_COLOR
;
49 case PIPE_BLENDFACTOR_INV_DST_COLOR
:
50 return V3D_BLEND_FACTOR_INV_DST_COLOR
;
51 case PIPE_BLENDFACTOR_SRC_ALPHA
:
52 return V3D_BLEND_FACTOR_SRC_ALPHA
;
53 case PIPE_BLENDFACTOR_INV_SRC_ALPHA
:
54 return V3D_BLEND_FACTOR_INV_SRC_ALPHA
;
55 case PIPE_BLENDFACTOR_DST_ALPHA
:
56 return (dst_alpha_one
?
57 V3D_BLEND_FACTOR_ONE
:
58 V3D_BLEND_FACTOR_DST_ALPHA
);
59 case PIPE_BLENDFACTOR_INV_DST_ALPHA
:
60 return (dst_alpha_one
?
61 V3D_BLEND_FACTOR_ZERO
:
62 V3D_BLEND_FACTOR_INV_DST_ALPHA
);
63 case PIPE_BLENDFACTOR_CONST_COLOR
:
64 return V3D_BLEND_FACTOR_CONST_COLOR
;
65 case PIPE_BLENDFACTOR_INV_CONST_COLOR
:
66 return V3D_BLEND_FACTOR_INV_CONST_COLOR
;
67 case PIPE_BLENDFACTOR_CONST_ALPHA
:
68 return V3D_BLEND_FACTOR_CONST_ALPHA
;
69 case PIPE_BLENDFACTOR_INV_CONST_ALPHA
:
70 return V3D_BLEND_FACTOR_INV_CONST_ALPHA
;
71 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
:
72 return V3D_BLEND_FACTOR_SRC_ALPHA_SATURATE
;
74 unreachable("Bad blend factor");
78 static inline uint16_t
79 swizzled_border_color(const struct v3d_device_info
*devinfo
,
80 struct pipe_sampler_state
*sampler
,
81 struct v3d_sampler_view
*sview
,
84 const struct util_format_description
*desc
=
85 util_format_description(sview
->base
.format
);
88 /* If we're doing swizzling in the sampler, then only rearrange the
89 * border color for the mismatch between the VC5 texture format and
90 * the PIPE_FORMAT, since GL_ARB_texture_swizzle will be handled by
91 * the sampler's swizzle.
93 * For swizzling in the shader, we don't do any pre-swizzling of the
96 if (v3d_get_tex_return_size(devinfo
, sview
->base
.format
,
97 sampler
->compare_mode
) != 32)
98 swiz
= desc
->swizzle
[swiz
];
102 return util_float_to_half(0.0);
104 return util_float_to_half(1.0);
106 return util_float_to_half(sampler
->border_color
.f
[swiz
]);
112 translate_swizzle(unsigned char pipe_swizzle
)
114 switch (pipe_swizzle
) {
123 return 2 + pipe_swizzle
;
125 unreachable("unknown swizzle");
130 emit_one_texture(struct v3d_context
*v3d
, struct v3d_texture_stateobj
*stage_tex
,
133 struct v3d_job
*job
= v3d
->job
;
134 struct pipe_sampler_state
*psampler
= stage_tex
->samplers
[i
];
135 struct v3d_sampler_state
*sampler
= v3d_sampler_state(psampler
);
136 struct pipe_sampler_view
*psview
= stage_tex
->textures
[i
];
137 struct v3d_sampler_view
*sview
= v3d_sampler_view(psview
);
138 struct pipe_resource
*prsc
= psview
->texture
;
139 struct v3d_resource
*rsc
= v3d_resource(prsc
);
140 const struct v3d_device_info
*devinfo
= &v3d
->screen
->devinfo
;
142 stage_tex
->texture_state
[i
].offset
=
143 v3d_cl_ensure_space(&job
->indirect
,
144 cl_packet_length(TEXTURE_SHADER_STATE
),
146 v3d_bo_set_reference(&stage_tex
->texture_state
[i
].bo
,
149 uint32_t return_size
= v3d_get_tex_return_size(devinfo
, psview
->format
,
150 psampler
->compare_mode
);
152 struct V3D33_TEXTURE_SHADER_STATE unpacked
= {
154 .border_color_red
= swizzled_border_color(devinfo
, psampler
,
156 .border_color_green
= swizzled_border_color(devinfo
, psampler
,
158 .border_color_blue
= swizzled_border_color(devinfo
, psampler
,
160 .border_color_alpha
= swizzled_border_color(devinfo
, psampler
,
163 /* In the normal texturing path, the LOD gets clamped between
164 * min/max, and the base_level field (set in the sampler view
165 * from first_level) only decides where the min/mag switch
166 * happens, so we need to use the LOD clamps to keep us
167 * between min and max.
169 * For txf, the LOD clamp is still used, despite GL not
170 * wanting that. We will need to have a separate
171 * TEXTURE_SHADER_STATE that ignores psview->min/max_lod to
172 * support txf properly.
174 .min_level_of_detail
= MIN2(psview
->u
.tex
.first_level
+
175 MAX2(psampler
->min_lod
, 0),
176 psview
->u
.tex
.last_level
),
177 .max_level_of_detail
= MIN2(psview
->u
.tex
.first_level
+
179 psview
->u
.tex
.last_level
),
181 .texture_base_pointer
= cl_address(rsc
->bo
,
182 rsc
->slices
[0].offset
),
184 .output_32_bit
= return_size
== 32,
187 /* Set up the sampler swizzle if we're doing 16-bit sampling. For
188 * 32-bit, we leave swizzling up to the shader compiler.
190 * Note: Contrary to the docs, the swizzle still applies even if the
191 * return size is 32. It's just that you probably want to swizzle in
192 * the shader, because you need the Y/Z/W channels to be defined.
194 if (return_size
== 32) {
195 unpacked
.swizzle_r
= translate_swizzle(PIPE_SWIZZLE_X
);
196 unpacked
.swizzle_g
= translate_swizzle(PIPE_SWIZZLE_Y
);
197 unpacked
.swizzle_b
= translate_swizzle(PIPE_SWIZZLE_Z
);
198 unpacked
.swizzle_a
= translate_swizzle(PIPE_SWIZZLE_W
);
200 unpacked
.swizzle_r
= translate_swizzle(sview
->swizzle
[0]);
201 unpacked
.swizzle_g
= translate_swizzle(sview
->swizzle
[1]);
202 unpacked
.swizzle_b
= translate_swizzle(sview
->swizzle
[2]);
203 unpacked
.swizzle_a
= translate_swizzle(sview
->swizzle
[3]);
206 int min_img_filter
= psampler
->min_img_filter
;
207 int min_mip_filter
= psampler
->min_mip_filter
;
208 int mag_img_filter
= psampler
->mag_img_filter
;
210 if (return_size
== 32) {
211 min_mip_filter
= PIPE_TEX_MIPFILTER_NEAREST
;
212 min_img_filter
= PIPE_TEX_FILTER_NEAREST
;
213 mag_img_filter
= PIPE_TEX_FILTER_NEAREST
;
216 bool min_nearest
= min_img_filter
== PIPE_TEX_FILTER_NEAREST
;
217 switch (min_mip_filter
) {
218 case PIPE_TEX_MIPFILTER_NONE
:
219 unpacked
.filter
+= min_nearest
? 2 : 0;
221 case PIPE_TEX_MIPFILTER_NEAREST
:
222 unpacked
.filter
+= min_nearest
? 4 : 8;
224 case PIPE_TEX_MIPFILTER_LINEAR
:
225 unpacked
.filter
+= min_nearest
? 4 : 8;
226 unpacked
.filter
+= 2;
230 if (mag_img_filter
== PIPE_TEX_FILTER_NEAREST
)
233 if (psampler
->max_anisotropy
> 8)
234 unpacked
.filter
= V3D_TMU_FILTER_ANISOTROPIC_16_1
;
235 else if (psampler
->max_anisotropy
> 4)
236 unpacked
.filter
= V3D_TMU_FILTER_ANISOTROPIC_8_1
;
237 else if (psampler
->max_anisotropy
> 2)
238 unpacked
.filter
= V3D_TMU_FILTER_ANISOTROPIC_4_1
;
239 else if (psampler
->max_anisotropy
)
240 unpacked
.filter
= V3D_TMU_FILTER_ANISOTROPIC_2_1
;
242 uint8_t packed
[cl_packet_length(TEXTURE_SHADER_STATE
)];
243 cl_packet_pack(TEXTURE_SHADER_STATE
)(&job
->indirect
, packed
, &unpacked
);
245 for (int i
= 0; i
< ARRAY_SIZE(packed
); i
++)
246 packed
[i
] |= sview
->texture_shader_state
[i
] | sampler
->texture_shader_state
[i
];
248 /* TMU indirect structs need to be 32b aligned. */
249 v3d_cl_ensure_space(&job
->indirect
, ARRAY_SIZE(packed
), 32);
250 cl_emit_prepacked(&job
->indirect
, &packed
);
254 emit_textures(struct v3d_context
*v3d
, struct v3d_texture_stateobj
*stage_tex
)
256 for (int i
= 0; i
< stage_tex
->num_textures
; i
++) {
257 if (stage_tex
->textures
[i
])
258 emit_one_texture(v3d
, stage_tex
, i
);
261 #endif /* V3D_VERSION < 40 */
264 translate_colormask(struct v3d_context
*v3d
, uint32_t colormask
, int rt
)
266 if (v3d
->swap_color_rb
& (1 << rt
)) {
267 colormask
= ((colormask
& (2 | 8)) |
268 ((colormask
& 1) << 2) |
269 ((colormask
& 4) >> 2));
272 return (~colormask
) & 0xf;
276 emit_rt_blend(struct v3d_context
*v3d
, struct v3d_job
*job
,
277 struct pipe_blend_state
*blend
, int rt
)
279 struct pipe_rt_blend_state
*rtblend
= &blend
->rt
[rt
];
281 #if V3D_VERSION >= 40
282 /* We don't need to emit blend state for disabled RTs. */
283 if (!rtblend
->blend_enable
)
287 cl_emit(&job
->bcl
, BLEND_CONFIG
, config
) {
288 #if V3D_VERSION >= 40
289 config
.render_target_mask
= 1 << rt
;
294 config
.colour_blend_mode
= rtblend
->rgb_func
;
295 config
.colour_blend_dst_factor
=
296 v3d_factor(rtblend
->rgb_dst_factor
,
297 v3d
->blend_dst_alpha_one
);
298 config
.colour_blend_src_factor
=
299 v3d_factor(rtblend
->rgb_src_factor
,
300 v3d
->blend_dst_alpha_one
);
302 config
.alpha_blend_mode
= rtblend
->alpha_func
;
303 config
.alpha_blend_dst_factor
=
304 v3d_factor(rtblend
->alpha_dst_factor
,
305 v3d
->blend_dst_alpha_one
);
306 config
.alpha_blend_src_factor
=
307 v3d_factor(rtblend
->alpha_src_factor
,
308 v3d
->blend_dst_alpha_one
);
313 emit_flat_shade_flags(struct v3d_job
*job
,
316 enum V3DX(Varying_Flags_Action
) lower
,
317 enum V3DX(Varying_Flags_Action
) higher
)
319 cl_emit(&job
->bcl
, FLAT_SHADE_FLAGS
, flags
) {
320 flags
.varying_offset_v0
= varying_offset
;
321 flags
.flat_shade_flags_for_varyings_v024
= varyings
;
322 flags
.action_for_flat_shade_flags_of_lower_numbered_varyings
=
324 flags
.action_for_flat_shade_flags_of_higher_numbered_varyings
=
329 #if V3D_VERSION >= 40
331 emit_noperspective_flags(struct v3d_job
*job
,
334 enum V3DX(Varying_Flags_Action
) lower
,
335 enum V3DX(Varying_Flags_Action
) higher
)
337 cl_emit(&job
->bcl
, NON_PERSPECTIVE_FLAGS
, flags
) {
338 flags
.varying_offset_v0
= varying_offset
;
339 flags
.non_perspective_flags_for_varyings_v024
= varyings
;
340 flags
.action_for_non_perspective_flags_of_lower_numbered_varyings
=
342 flags
.action_for_non_perspective_flags_of_higher_numbered_varyings
=
348 emit_centroid_flags(struct v3d_job
*job
,
351 enum V3DX(Varying_Flags_Action
) lower
,
352 enum V3DX(Varying_Flags_Action
) higher
)
354 cl_emit(&job
->bcl
, CENTROID_FLAGS
, flags
) {
355 flags
.varying_offset_v0
= varying_offset
;
356 flags
.centroid_flags_for_varyings_v024
= varyings
;
357 flags
.action_for_centroid_flags_of_lower_numbered_varyings
=
359 flags
.action_for_centroid_flags_of_higher_numbered_varyings
=
363 #endif /* V3D_VERSION >= 40 */
366 emit_varying_flags(struct v3d_job
*job
, uint32_t *flags
,
367 void (*flag_emit_callback
)(struct v3d_job
*job
,
370 enum V3DX(Varying_Flags_Action
) lower
,
371 enum V3DX(Varying_Flags_Action
) higher
))
373 struct v3d_context
*v3d
= job
->v3d
;
374 bool emitted_any
= false;
376 for (int i
= 0; i
< ARRAY_SIZE(v3d
->prog
.fs
->prog_data
.fs
->flat_shade_flags
); i
++) {
381 flag_emit_callback(job
, i
, flags
[i
],
382 V3D_VARYING_FLAGS_ACTION_UNCHANGED
,
383 V3D_VARYING_FLAGS_ACTION_UNCHANGED
);
385 flag_emit_callback(job
, i
, flags
[i
],
386 V3D_VARYING_FLAGS_ACTION_UNCHANGED
,
387 V3D_VARYING_FLAGS_ACTION_ZEROED
);
389 flag_emit_callback(job
, i
, flags
[i
],
390 V3D_VARYING_FLAGS_ACTION_ZEROED
,
391 V3D_VARYING_FLAGS_ACTION_ZEROED
);
400 v3dX(emit_state
)(struct pipe_context
*pctx
)
402 struct v3d_context
*v3d
= v3d_context(pctx
);
403 struct v3d_job
*job
= v3d
->job
;
404 bool rasterizer_discard
= v3d
->rasterizer
->base
.rasterizer_discard
;
406 if (v3d
->dirty
& (VC5_DIRTY_SCISSOR
| VC5_DIRTY_VIEWPORT
|
407 VC5_DIRTY_RASTERIZER
)) {
408 float *vpscale
= v3d
->viewport
.scale
;
409 float *vptranslate
= v3d
->viewport
.translate
;
410 float vp_minx
= -fabsf(vpscale
[0]) + vptranslate
[0];
411 float vp_maxx
= fabsf(vpscale
[0]) + vptranslate
[0];
412 float vp_miny
= -fabsf(vpscale
[1]) + vptranslate
[1];
413 float vp_maxy
= fabsf(vpscale
[1]) + vptranslate
[1];
415 /* Clip to the scissor if it's enabled, but still clip to the
416 * drawable regardless since that controls where the binner
417 * tries to put things.
419 * Additionally, always clip the rendering to the viewport,
420 * since the hardware does guardband clipping, meaning
421 * primitives would rasterize outside of the view volume.
423 uint32_t minx
, miny
, maxx
, maxy
;
424 if (!v3d
->rasterizer
->base
.scissor
) {
425 minx
= MAX2(vp_minx
, 0);
426 miny
= MAX2(vp_miny
, 0);
427 maxx
= MIN2(vp_maxx
, job
->draw_width
);
428 maxy
= MIN2(vp_maxy
, job
->draw_height
);
430 minx
= MAX2(vp_minx
, v3d
->scissor
.minx
);
431 miny
= MAX2(vp_miny
, v3d
->scissor
.miny
);
432 maxx
= MIN2(vp_maxx
, v3d
->scissor
.maxx
);
433 maxy
= MIN2(vp_maxy
, v3d
->scissor
.maxy
);
436 cl_emit(&job
->bcl
, CLIP_WINDOW
, clip
) {
437 clip
.clip_window_left_pixel_coordinate
= minx
;
438 clip
.clip_window_bottom_pixel_coordinate
= miny
;
439 if (maxx
> minx
&& maxy
> miny
) {
440 clip
.clip_window_width_in_pixels
= maxx
- minx
;
441 clip
.clip_window_height_in_pixels
= maxy
- miny
;
442 } else if (V3D_VERSION
< 41) {
443 /* The HW won't entirely clip out when scissor
444 * w/h is 0. Just treat it the same as
445 * rasterizer discard.
447 rasterizer_discard
= true;
448 clip
.clip_window_width_in_pixels
= 1;
449 clip
.clip_window_height_in_pixels
= 1;
453 job
->draw_min_x
= MIN2(job
->draw_min_x
, minx
);
454 job
->draw_min_y
= MIN2(job
->draw_min_y
, miny
);
455 job
->draw_max_x
= MAX2(job
->draw_max_x
, maxx
);
456 job
->draw_max_y
= MAX2(job
->draw_max_y
, maxy
);
459 if (v3d
->dirty
& (VC5_DIRTY_RASTERIZER
|
462 VC5_DIRTY_COMPILED_FS
)) {
463 cl_emit(&job
->bcl
, CONFIGURATION_BITS
, config
) {
464 config
.enable_forward_facing_primitive
=
465 !rasterizer_discard
&&
466 !(v3d
->rasterizer
->base
.cull_face
&
468 config
.enable_reverse_facing_primitive
=
469 !rasterizer_discard
&&
470 !(v3d
->rasterizer
->base
.cull_face
&
472 /* This seems backwards, but it's what gets the
473 * clipflat test to pass.
475 config
.clockwise_primitives
=
476 v3d
->rasterizer
->base
.front_ccw
;
478 config
.enable_depth_offset
=
479 v3d
->rasterizer
->base
.offset_tri
;
481 /* V3D follows GL behavior where the sample mask only
482 * applies when MSAA is enabled. Gallium has sample
483 * mask apply anyway, and the MSAA blit shaders will
484 * set sample mask without explicitly setting
485 * rasterizer oversample. Just force it on here,
486 * since the blit shaders are the only way to have
487 * !multisample && samplemask != 0xf.
489 config
.rasterizer_oversample_mode
=
490 v3d
->rasterizer
->base
.multisample
||
491 v3d
->sample_mask
!= 0xf;
493 config
.direct3d_provoking_vertex
=
494 v3d
->rasterizer
->base
.flatshade_first
;
496 config
.blend_enable
= v3d
->blend
->blend_enables
;
498 /* Note: EZ state may update based on the compiled FS,
501 config
.early_z_updates_enable
=
502 (job
->ez_state
!= VC5_EZ_DISABLED
);
503 if (v3d
->zsa
->base
.depth
.enabled
) {
504 config
.z_updates_enable
=
505 v3d
->zsa
->base
.depth
.writemask
;
506 config
.early_z_enable
=
507 config
.early_z_updates_enable
;
508 config
.depth_test_function
=
509 v3d
->zsa
->base
.depth
.func
;
511 config
.depth_test_function
= PIPE_FUNC_ALWAYS
;
514 config
.stencil_enable
=
515 v3d
->zsa
->base
.stencil
[0].enabled
;
520 if (v3d
->dirty
& VC5_DIRTY_RASTERIZER
&&
521 v3d
->rasterizer
->base
.offset_tri
) {
522 cl_emit(&job
->bcl
, DEPTH_OFFSET
, depth
) {
523 depth
.depth_offset_factor
=
524 v3d
->rasterizer
->offset_factor
;
526 job
->zsbuf
->format
== PIPE_FORMAT_Z16_UNORM
) {
527 depth
.depth_offset_units
=
528 v3d
->rasterizer
->z16_offset_units
;
530 depth
.depth_offset_units
=
531 v3d
->rasterizer
->offset_units
;
536 if (v3d
->dirty
& VC5_DIRTY_RASTERIZER
) {
537 cl_emit(&job
->bcl
, POINT_SIZE
, point_size
) {
538 point_size
.point_size
= v3d
->rasterizer
->point_size
;
541 cl_emit(&job
->bcl
, LINE_WIDTH
, line_width
) {
542 line_width
.line_width
= v3d
->rasterizer
->base
.line_width
;
546 if (v3d
->dirty
& VC5_DIRTY_VIEWPORT
) {
547 cl_emit(&job
->bcl
, CLIPPER_XY_SCALING
, clip
) {
548 clip
.viewport_half_width_in_1_256th_of_pixel
=
549 v3d
->viewport
.scale
[0] * 256.0f
;
550 clip
.viewport_half_height_in_1_256th_of_pixel
=
551 v3d
->viewport
.scale
[1] * 256.0f
;
554 cl_emit(&job
->bcl
, CLIPPER_Z_SCALE_AND_OFFSET
, clip
) {
555 clip
.viewport_z_offset_zc_to_zs
=
556 v3d
->viewport
.translate
[2];
557 clip
.viewport_z_scale_zc_to_zs
=
558 v3d
->viewport
.scale
[2];
560 cl_emit(&job
->bcl
, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES
, clip
) {
561 float z1
= (v3d
->viewport
.translate
[2] -
562 v3d
->viewport
.scale
[2]);
563 float z2
= (v3d
->viewport
.translate
[2] +
564 v3d
->viewport
.scale
[2]);
565 clip
.minimum_zw
= MIN2(z1
, z2
);
566 clip
.maximum_zw
= MAX2(z1
, z2
);
569 cl_emit(&job
->bcl
, VIEWPORT_OFFSET
, vp
) {
570 vp
.viewport_centre_x_coordinate
=
571 v3d
->viewport
.translate
[0];
572 vp
.viewport_centre_y_coordinate
=
573 v3d
->viewport
.translate
[1];
577 if (v3d
->dirty
& VC5_DIRTY_BLEND
) {
578 struct v3d_blend_state
*blend
= v3d
->blend
;
580 if (blend
->blend_enables
) {
581 #if V3D_VERSION >= 40
582 cl_emit(&job
->bcl
, BLEND_ENABLES
, enables
) {
583 enables
.mask
= blend
->blend_enables
;
587 if (blend
->base
.independent_blend_enable
) {
588 for (int i
= 0; i
< VC5_MAX_DRAW_BUFFERS
; i
++)
589 emit_rt_blend(v3d
, job
, &blend
->base
, i
);
591 emit_rt_blend(v3d
, job
, &blend
->base
, 0);
596 if (v3d
->dirty
& VC5_DIRTY_BLEND
) {
597 struct pipe_blend_state
*blend
= &v3d
->blend
->base
;
599 cl_emit(&job
->bcl
, COLOUR_WRITE_MASKS
, mask
) {
600 for (int i
= 0; i
< 4; i
++) {
601 int rt
= blend
->independent_blend_enable
? i
: 0;
602 int rt_mask
= blend
->rt
[rt
].colormask
;
604 mask
.mask
|= translate_colormask(v3d
, rt_mask
,
610 /* GFXH-1431: On V3D 3.x, writing BLEND_CONFIG resets the constant
613 if (v3d
->dirty
& VC5_DIRTY_BLEND_COLOR
||
614 (V3D_VERSION
< 41 && (v3d
->dirty
& VC5_DIRTY_BLEND
))) {
615 cl_emit(&job
->bcl
, BLEND_CONSTANT_COLOUR
, colour
) {
616 colour
.red_f16
= (v3d
->swap_color_rb
?
617 v3d
->blend_color
.hf
[2] :
618 v3d
->blend_color
.hf
[0]);
619 colour
.green_f16
= v3d
->blend_color
.hf
[1];
620 colour
.blue_f16
= (v3d
->swap_color_rb
?
621 v3d
->blend_color
.hf
[0] :
622 v3d
->blend_color
.hf
[2]);
623 colour
.alpha_f16
= v3d
->blend_color
.hf
[3];
627 if (v3d
->dirty
& (VC5_DIRTY_ZSA
| VC5_DIRTY_STENCIL_REF
)) {
628 struct pipe_stencil_state
*front
= &v3d
->zsa
->base
.stencil
[0];
629 struct pipe_stencil_state
*back
= &v3d
->zsa
->base
.stencil
[1];
631 if (front
->enabled
) {
632 cl_emit_with_prepacked(&job
->bcl
, STENCIL_CONFIG
,
633 v3d
->zsa
->stencil_front
, config
) {
634 config
.stencil_ref_value
=
635 v3d
->stencil_ref
.ref_value
[0];
640 cl_emit_with_prepacked(&job
->bcl
, STENCIL_CONFIG
,
641 v3d
->zsa
->stencil_back
, config
) {
642 config
.stencil_ref_value
=
643 v3d
->stencil_ref
.ref_value
[1];
649 /* Pre-4.x, we have texture state that depends on both the sampler and
650 * the view, so we merge them together at draw time.
652 if (v3d
->dirty
& VC5_DIRTY_FRAGTEX
)
653 emit_textures(v3d
, &v3d
->fragtex
);
655 if (v3d
->dirty
& VC5_DIRTY_VERTTEX
)
656 emit_textures(v3d
, &v3d
->verttex
);
659 if (v3d
->dirty
& VC5_DIRTY_FLAT_SHADE_FLAGS
) {
660 if (!emit_varying_flags(job
,
661 v3d
->prog
.fs
->prog_data
.fs
->flat_shade_flags
,
662 emit_flat_shade_flags
)) {
663 cl_emit(&job
->bcl
, ZERO_ALL_FLAT_SHADE_FLAGS
, flags
);
667 #if V3D_VERSION >= 40
668 if (v3d
->dirty
& VC5_DIRTY_NOPERSPECTIVE_FLAGS
) {
669 if (!emit_varying_flags(job
,
670 v3d
->prog
.fs
->prog_data
.fs
->noperspective_flags
,
671 emit_noperspective_flags
)) {
672 cl_emit(&job
->bcl
, ZERO_ALL_NON_PERSPECTIVE_FLAGS
, flags
);
676 if (v3d
->dirty
& VC5_DIRTY_CENTROID_FLAGS
) {
677 if (!emit_varying_flags(job
,
678 v3d
->prog
.fs
->prog_data
.fs
->centroid_flags
,
679 emit_centroid_flags
)) {
680 cl_emit(&job
->bcl
, ZERO_ALL_CENTROID_FLAGS
, flags
);
685 /* Set up the transform feedback data specs (which VPM entries to
686 * output to which buffers).
688 if (v3d
->dirty
& (VC5_DIRTY_STREAMOUT
|
689 VC5_DIRTY_RASTERIZER
|
690 VC5_DIRTY_PRIM_MODE
)) {
691 struct v3d_streamout_stateobj
*so
= &v3d
->streamout
;
693 if (so
->num_targets
) {
694 bool psiz_per_vertex
= (v3d
->prim_mode
== PIPE_PRIM_POINTS
&&
695 v3d
->rasterizer
->base
.point_size_per_vertex
);
696 uint16_t *tf_specs
= (psiz_per_vertex
?
697 v3d
->prog
.bind_vs
->tf_specs_psiz
:
698 v3d
->prog
.bind_vs
->tf_specs
);
700 #if V3D_VERSION >= 40
701 job
->tf_enabled
= (v3d
->prog
.bind_vs
->num_tf_specs
!= 0 &&
702 v3d
->active_queries
);
704 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_SPECS
, tfe
) {
705 tfe
.number_of_16_bit_output_data_specs_following
=
706 v3d
->prog
.bind_vs
->num_tf_specs
;
707 tfe
.enable
= job
->tf_enabled
;
709 #else /* V3D_VERSION < 40 */
710 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_ENABLE
, tfe
) {
711 tfe
.number_of_32_bit_output_buffer_address_following
=
713 tfe
.number_of_16_bit_output_data_specs_following
=
714 v3d
->prog
.bind_vs
->num_tf_specs
;
716 #endif /* V3D_VERSION < 40 */
717 for (int i
= 0; i
< v3d
->prog
.bind_vs
->num_tf_specs
; i
++) {
718 cl_emit_prepacked(&job
->bcl
, &tf_specs
[i
]);
720 } else if (job
->tf_enabled
) {
721 #if V3D_VERSION >= 40
722 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_SPECS
, tfe
) {
725 job
->tf_enabled
= false;
726 #endif /* V3D_VERSION >= 40 */
730 /* Set up the trasnform feedback buffers. */
731 if (v3d
->dirty
& VC5_DIRTY_STREAMOUT
) {
732 struct v3d_streamout_stateobj
*so
= &v3d
->streamout
;
733 for (int i
= 0; i
< so
->num_targets
; i
++) {
734 const struct pipe_stream_output_target
*target
=
736 struct v3d_resource
*rsc
= target
?
737 v3d_resource(target
->buffer
) : NULL
;
738 struct pipe_shader_state
*vs
= &v3d
->prog
.bind_vs
->base
;
739 struct pipe_stream_output_info
*info
= &vs
->stream_output
;
740 uint32_t offset
= (v3d
->streamout
.offsets
[i
] *
741 info
->stride
[i
] * 4);
743 #if V3D_VERSION >= 40
747 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_BUFFER
, output
) {
748 output
.buffer_address
=
750 target
->buffer_offset
+
752 output
.buffer_size_in_32_bit_words
=
753 (target
->buffer_size
- offset
) >> 2;
754 output
.buffer_number
= i
;
756 #else /* V3D_VERSION < 40 */
757 cl_emit(&job
->bcl
, TRANSFORM_FEEDBACK_OUTPUT_ADDRESS
, output
) {
761 target
->buffer_offset
+
765 #endif /* V3D_VERSION < 40 */
767 v3d_job_add_write_resource(v3d
->job
,
770 /* XXX: buffer_size? */
774 if (v3d
->dirty
& VC5_DIRTY_OQ
) {
775 cl_emit(&job
->bcl
, OCCLUSION_QUERY_COUNTER
, counter
) {
776 job
->oq_enabled
= v3d
->active_queries
&& v3d
->current_oq
;
777 if (job
->oq_enabled
) {
778 counter
.address
= cl_address(v3d
->current_oq
, 0);
783 #if V3D_VERSION >= 40
784 if (v3d
->dirty
& VC5_DIRTY_SAMPLE_STATE
) {
785 cl_emit(&job
->bcl
, SAMPLE_STATE
, state
) {
786 /* Note: SampleCoverage was handled at the
787 * state_tracker level by converting to sample_mask.
789 state
.coverage
= fui(1.0) >> 16;
790 state
.mask
= job
->msaa
? v3d
->sample_mask
: 0xf;