2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "brw_context.h"
28 #include "brw_state.h"
29 #include "main/enums.h"
30 #include "main/formats.h"
31 #include "main/fbobject.h"
32 #include "main/samplerobj.h"
33 #include "main/framebuffer.h"
34 #include "program/prog_parameter.h"
35 #include "program/program.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_image.h"
38 #include "intel_fbo.h"
39 #include "compiler/brw_nir.h"
40 #include "brw_program.h"
42 #include "util/ralloc.h"
43 #include "util/u_math.h"
46 assign_fs_binding_table_offsets(const struct gen_device_info
*devinfo
,
47 const struct gl_program
*prog
,
48 const struct brw_wm_prog_key
*key
,
49 struct brw_wm_prog_data
*prog_data
)
51 /* Render targets implicitly start at surface index 0. Even if there are
52 * no color regions, we still perform an FB write to a null render target,
53 * which will be surface 0.
55 uint32_t next_binding_table_offset
= MAX2(key
->nr_color_regions
, 1);
57 next_binding_table_offset
=
58 brw_assign_common_binding_table_offsets(devinfo
, prog
, &prog_data
->base
,
59 next_binding_table_offset
);
61 if (prog
->nir
->info
.outputs_read
&& !key
->coherent_fb_fetch
) {
62 prog_data
->binding_table
.render_target_read_start
=
63 next_binding_table_offset
;
64 next_binding_table_offset
+= key
->nr_color_regions
;
69 brw_wm_debug_recompile(struct brw_context
*brw
, struct gl_program
*prog
,
70 const struct brw_wm_prog_key
*key
)
72 perf_debug("Recompiling fragment shader for program %d\n", prog
->Id
);
75 const struct brw_wm_prog_key
*old_key
=
76 brw_find_previous_compile(&brw
->cache
, BRW_CACHE_FS_PROG
,
77 key
->program_string_id
);
80 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
84 found
|= key_debug(brw
, "alphatest, computed depth, depth test, or "
86 old_key
->iz_lookup
, key
->iz_lookup
);
87 found
|= key_debug(brw
, "depth statistics",
88 old_key
->stats_wm
, key
->stats_wm
);
89 found
|= key_debug(brw
, "flat shading",
90 old_key
->flat_shade
, key
->flat_shade
);
91 found
|= key_debug(brw
, "number of color buffers",
92 old_key
->nr_color_regions
, key
->nr_color_regions
);
93 found
|= key_debug(brw
, "MRT alpha test or alpha-to-coverage",
94 old_key
->replicate_alpha
, key
->replicate_alpha
);
95 found
|= key_debug(brw
, "fragment color clamping",
96 old_key
->clamp_fragment_color
, key
->clamp_fragment_color
);
97 found
|= key_debug(brw
, "per-sample interpolation",
98 old_key
->persample_interp
, key
->persample_interp
);
99 found
|= key_debug(brw
, "multisampled FBO",
100 old_key
->multisample_fbo
, key
->multisample_fbo
);
101 found
|= key_debug(brw
, "frag coord adds sample pos",
102 old_key
->frag_coord_adds_sample_pos
,
103 key
->frag_coord_adds_sample_pos
);
104 found
|= key_debug(brw
, "line smoothing",
105 old_key
->line_aa
, key
->line_aa
);
106 found
|= key_debug(brw
, "high quality derivatives",
107 old_key
->high_quality_derivatives
,
108 key
->high_quality_derivatives
);
109 found
|= key_debug(brw
, "force dual color blending",
110 old_key
->force_dual_color_blend
,
111 key
->force_dual_color_blend
);
112 found
|= key_debug(brw
, "coherent fb fetch",
113 old_key
->coherent_fb_fetch
, key
->coherent_fb_fetch
);
115 found
|= key_debug(brw
, "input slots valid",
116 old_key
->input_slots_valid
, key
->input_slots_valid
);
117 found
|= key_debug(brw
, "mrt alpha test function",
118 old_key
->alpha_test_func
, key
->alpha_test_func
);
119 found
|= key_debug(brw
, "mrt alpha test reference value",
120 old_key
->alpha_test_ref
, key
->alpha_test_ref
);
122 found
|= brw_debug_recompile_sampler_key(brw
, &old_key
->tex
, &key
->tex
);
125 perf_debug(" Something else\n");
130 brw_codegen_wm_prog(struct brw_context
*brw
,
131 struct brw_program
*fp
,
132 struct brw_wm_prog_key
*key
,
133 struct brw_vue_map
*vue_map
)
135 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
136 void *mem_ctx
= ralloc_context(NULL
);
137 struct brw_wm_prog_data prog_data
;
138 const GLuint
*program
;
139 bool start_busy
= false;
140 double start_time
= 0;
142 nir_shader
*nir
= nir_shader_clone(mem_ctx
, fp
->program
.nir
);
144 memset(&prog_data
, 0, sizeof(prog_data
));
146 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
147 if (fp
->program
.is_arb_asm
)
148 prog_data
.base
.use_alt_mode
= true;
150 assign_fs_binding_table_offsets(devinfo
, &fp
->program
, key
, &prog_data
);
152 if (!fp
->program
.is_arb_asm
) {
153 brw_nir_setup_glsl_uniforms(mem_ctx
, nir
, &fp
->program
,
154 &prog_data
.base
, true);
155 brw_nir_analyze_ubo_ranges(brw
->screen
->compiler
, nir
,
156 NULL
, prog_data
.base
.ubo_ranges
);
158 brw_nir_setup_arb_uniforms(mem_ctx
, nir
, &fp
->program
, &prog_data
.base
);
160 if (unlikely(INTEL_DEBUG
& DEBUG_WM
))
161 brw_dump_arb_asm("fragment", &fp
->program
);
164 if (unlikely(brw
->perf_debug
)) {
165 start_busy
= (brw
->batch
.last_bo
&&
166 brw_bo_busy(brw
->batch
.last_bo
));
167 start_time
= get_time();
170 int st_index8
= -1, st_index16
= -1, st_index32
= -1;
171 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
172 st_index8
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS8
,
173 !fp
->program
.is_arb_asm
);
174 st_index16
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS16
,
175 !fp
->program
.is_arb_asm
);
176 st_index32
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS32
,
177 !fp
->program
.is_arb_asm
);
180 char *error_str
= NULL
;
181 program
= brw_compile_fs(brw
->screen
->compiler
, brw
, mem_ctx
,
182 key
, &prog_data
, nir
,
183 &fp
->program
, st_index8
, st_index16
, st_index32
,
184 true, false, vue_map
,
187 if (program
== NULL
) {
188 if (!fp
->program
.is_arb_asm
) {
189 fp
->program
.sh
.data
->LinkStatus
= LINKING_FAILURE
;
190 ralloc_strcat(&fp
->program
.sh
.data
->InfoLog
, error_str
);
193 _mesa_problem(NULL
, "Failed to compile fragment shader: %s\n", error_str
);
195 ralloc_free(mem_ctx
);
199 if (unlikely(brw
->perf_debug
)) {
200 if (fp
->compiled_once
)
201 brw_wm_debug_recompile(brw
, &fp
->program
, key
);
202 fp
->compiled_once
= true;
204 if (start_busy
&& !brw_bo_busy(brw
->batch
.last_bo
)) {
205 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
206 (get_time() - start_time
) * 1000);
210 brw_alloc_stage_scratch(brw
, &brw
->wm
.base
, prog_data
.base
.total_scratch
);
212 if (unlikely((INTEL_DEBUG
& DEBUG_WM
) && fp
->program
.is_arb_asm
))
213 fprintf(stderr
, "\n");
215 /* The param and pull_param arrays will be freed by the shader cache. */
216 ralloc_steal(NULL
, prog_data
.base
.param
);
217 ralloc_steal(NULL
, prog_data
.base
.pull_param
);
218 brw_upload_cache(&brw
->cache
, BRW_CACHE_FS_PROG
,
219 key
, sizeof(struct brw_wm_prog_key
),
220 program
, prog_data
.base
.program_size
,
221 &prog_data
, sizeof(prog_data
),
222 &brw
->wm
.base
.prog_offset
, &brw
->wm
.base
.prog_data
);
224 ralloc_free(mem_ctx
);
230 brw_debug_recompile_sampler_key(struct brw_context
*brw
,
231 const struct brw_sampler_prog_key_data
*old_key
,
232 const struct brw_sampler_prog_key_data
*key
)
236 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
237 found
|= key_debug(brw
, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
238 old_key
->swizzles
[i
], key
->swizzles
[i
]);
240 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 1st coordinate",
241 old_key
->gl_clamp_mask
[0], key
->gl_clamp_mask
[0]);
242 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
243 old_key
->gl_clamp_mask
[1], key
->gl_clamp_mask
[1]);
244 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
245 old_key
->gl_clamp_mask
[2], key
->gl_clamp_mask
[2]);
246 found
|= key_debug(brw
, "gather channel quirk on any texture unit",
247 old_key
->gather_channel_quirk_mask
, key
->gather_channel_quirk_mask
);
248 found
|= key_debug(brw
, "compressed multisample layout",
249 old_key
->compressed_multisample_layout_mask
,
250 key
->compressed_multisample_layout_mask
);
251 found
|= key_debug(brw
, "16x msaa",
255 found
|= key_debug(brw
, "y_uv image bound",
256 old_key
->y_uv_image_mask
,
257 key
->y_uv_image_mask
);
258 found
|= key_debug(brw
, "y_u_v image bound",
259 old_key
->y_u_v_image_mask
,
260 key
->y_u_v_image_mask
);
261 found
|= key_debug(brw
, "yx_xuxv image bound",
262 old_key
->yx_xuxv_image_mask
,
263 key
->yx_xuxv_image_mask
);
264 found
|= key_debug(brw
, "xy_uxvx image bound",
265 old_key
->xy_uxvx_image_mask
,
266 key
->xy_uxvx_image_mask
);
267 found
|= key_debug(brw
, "ayuv image bound",
268 old_key
->ayuv_image_mask
,
269 key
->ayuv_image_mask
);
272 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
273 found
|= key_debug(brw
, "textureGather workarounds",
274 old_key
->gen6_gather_wa
[i
], key
->gen6_gather_wa
[i
]);
281 gen6_gather_workaround(GLenum internalformat
)
283 switch (internalformat
) {
284 case GL_R8I
: return WA_SIGN
| WA_8BIT
;
285 case GL_R8UI
: return WA_8BIT
;
286 case GL_R16I
: return WA_SIGN
| WA_16BIT
;
287 case GL_R16UI
: return WA_16BIT
;
289 /* Note that even though GL_R32I and GL_R32UI have format overrides in
290 * the surface state, there is no shader w/a required.
297 brw_populate_sampler_prog_key_data(struct gl_context
*ctx
,
298 const struct gl_program
*prog
,
299 struct brw_sampler_prog_key_data
*key
)
301 struct brw_context
*brw
= brw_context(ctx
);
302 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
303 GLbitfield mask
= prog
->SamplersUsed
;
306 const int s
= u_bit_scan(&mask
);
308 key
->swizzles
[s
] = SWIZZLE_NOOP
;
310 int unit_id
= prog
->SamplerUnits
[s
];
311 const struct gl_texture_unit
*unit
= &ctx
->Texture
.Unit
[unit_id
];
313 if (unit
->_Current
&& unit
->_Current
->Target
!= GL_TEXTURE_BUFFER
) {
314 const struct gl_texture_object
*t
= unit
->_Current
;
315 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
316 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit_id
);
318 const bool alpha_depth
= t
->DepthMode
== GL_ALPHA
&&
319 (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
320 img
->_BaseFormat
== GL_DEPTH_STENCIL
);
322 /* Haswell handles texture swizzling as surface format overrides
323 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
325 if (alpha_depth
|| (devinfo
->gen
< 8 && !devinfo
->is_haswell
))
326 key
->swizzles
[s
] = brw_get_texture_swizzle(ctx
, t
);
328 if (devinfo
->gen
< 8 &&
329 sampler
->MinFilter
!= GL_NEAREST
&&
330 sampler
->MagFilter
!= GL_NEAREST
) {
331 if (sampler
->WrapS
== GL_CLAMP
)
332 key
->gl_clamp_mask
[0] |= 1 << s
;
333 if (sampler
->WrapT
== GL_CLAMP
)
334 key
->gl_clamp_mask
[1] |= 1 << s
;
335 if (sampler
->WrapR
== GL_CLAMP
)
336 key
->gl_clamp_mask
[2] |= 1 << s
;
339 /* gather4 for RG32* is broken in multiple ways on Gen7. */
340 if (devinfo
->gen
== 7 && prog
->info
.uses_texture_gather
) {
341 switch (img
->InternalFormat
) {
344 /* We have to override the format to R32G32_FLOAT_LD.
345 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
346 * (1.0) rather than integer 1. This needs shader hacks.
348 * On Ivybridge, we whack W (alpha) to ONE in our key's
349 * swizzle. On Haswell, we look at the original texture
350 * swizzle, and use XYZW with channels overridden to ONE,
351 * leaving normal texture swizzling to SCS.
353 unsigned src_swizzle
=
354 devinfo
->is_haswell
? t
->_Swizzle
: key
->swizzles
[s
];
355 for (int i
= 0; i
< 4; i
++) {
356 unsigned src_comp
= GET_SWZ(src_swizzle
, i
);
357 if (src_comp
== SWIZZLE_ONE
|| src_comp
== SWIZZLE_W
) {
358 key
->swizzles
[i
] &= ~(0x7 << (3 * i
));
359 key
->swizzles
[i
] |= SWIZZLE_ONE
<< (3 * i
);
365 /* The channel select for green doesn't work - we have to
366 * request blue. Haswell can use SCS for this, but Ivybridge
367 * needs a shader workaround.
369 if (!devinfo
->is_haswell
)
370 key
->gather_channel_quirk_mask
|= 1 << s
;
375 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
376 * UNORM/FLOAT instead and fix it in the shader.
378 if (devinfo
->gen
== 6 && prog
->info
.uses_texture_gather
) {
379 key
->gen6_gather_wa
[s
] = gen6_gather_workaround(img
->InternalFormat
);
382 /* If this is a multisample sampler, and uses the CMS MSAA layout,
383 * then we need to emit slightly different code to first sample the
386 struct intel_texture_object
*intel_tex
=
387 intel_texture_object((struct gl_texture_object
*)t
);
389 /* From gen9 onwards some single sampled buffers can also be
390 * compressed. These don't need ld2dms sampling along with mcs fetch.
392 if (intel_tex
->mt
->aux_usage
== ISL_AUX_USAGE_MCS
) {
393 assert(devinfo
->gen
>= 7);
394 assert(intel_tex
->mt
->surf
.samples
> 1);
395 assert(intel_tex
->mt
->aux_buf
);
396 assert(intel_tex
->mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
397 key
->compressed_multisample_layout_mask
|= 1 << s
;
399 if (intel_tex
->mt
->surf
.samples
>= 16) {
400 assert(devinfo
->gen
>= 9);
401 key
->msaa_16
|= 1 << s
;
405 if (t
->Target
== GL_TEXTURE_EXTERNAL_OES
&& intel_tex
->planar_format
) {
406 switch (intel_tex
->planar_format
->components
) {
407 case __DRI_IMAGE_COMPONENTS_Y_UV
:
408 key
->y_uv_image_mask
|= 1 << s
;
410 case __DRI_IMAGE_COMPONENTS_Y_U_V
:
411 key
->y_u_v_image_mask
|= 1 << s
;
413 case __DRI_IMAGE_COMPONENTS_Y_XUXV
:
414 key
->yx_xuxv_image_mask
|= 1 << s
;
416 case __DRI_IMAGE_COMPONENTS_Y_UXVX
:
417 key
->xy_uxvx_image_mask
|= 1 << s
;
419 case __DRI_IMAGE_COMPONENTS_AYUV
:
420 key
->ayuv_image_mask
|= 1 << s
;
432 brw_wm_state_dirty(const struct brw_context
*brw
)
434 return brw_state_dirty(brw
,
446 BRW_NEW_FRAGMENT_PROGRAM
|
447 BRW_NEW_REDUCED_PRIMITIVE
|
449 BRW_NEW_VUE_MAP_GEOM_OUT
);
453 brw_wm_populate_key(struct brw_context
*brw
, struct brw_wm_prog_key
*key
)
455 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
456 struct gl_context
*ctx
= &brw
->ctx
;
457 /* BRW_NEW_FRAGMENT_PROGRAM */
458 const struct gl_program
*prog
= brw
->programs
[MESA_SHADER_FRAGMENT
];
459 const struct brw_program
*fp
= brw_program_const(prog
);
463 memset(key
, 0, sizeof(*key
));
465 /* Build the index for table lookup
467 if (devinfo
->gen
< 6) {
468 struct intel_renderbuffer
*depth_irb
=
469 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
472 if (prog
->info
.fs
.uses_discard
|| ctx
->Color
.AlphaEnabled
) {
473 lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
476 if (prog
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
)) {
477 lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
481 if (depth_irb
&& ctx
->Depth
.Test
) {
482 lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
484 if (brw_depth_writes_enabled(brw
))
485 lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
488 /* _NEW_STENCIL | _NEW_BUFFERS */
489 if (brw
->stencil_enabled
) {
490 lookup
|= BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT
;
492 if (ctx
->Stencil
.WriteMask
[0] ||
493 ctx
->Stencil
.WriteMask
[ctx
->Stencil
._BackFace
])
494 lookup
|= BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT
;
496 key
->iz_lookup
= lookup
;
499 line_aa
= BRW_WM_AA_NEVER
;
501 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
502 if (ctx
->Line
.SmoothFlag
) {
503 if (brw
->reduced_primitive
== GL_LINES
) {
504 line_aa
= BRW_WM_AA_ALWAYS
;
506 else if (brw
->reduced_primitive
== GL_TRIANGLES
) {
507 if (ctx
->Polygon
.FrontMode
== GL_LINE
) {
508 line_aa
= BRW_WM_AA_SOMETIMES
;
510 if (ctx
->Polygon
.BackMode
== GL_LINE
||
511 (ctx
->Polygon
.CullFlag
&&
512 ctx
->Polygon
.CullFaceMode
== GL_BACK
))
513 line_aa
= BRW_WM_AA_ALWAYS
;
515 else if (ctx
->Polygon
.BackMode
== GL_LINE
) {
516 line_aa
= BRW_WM_AA_SOMETIMES
;
518 if ((ctx
->Polygon
.CullFlag
&&
519 ctx
->Polygon
.CullFaceMode
== GL_FRONT
))
520 line_aa
= BRW_WM_AA_ALWAYS
;
525 key
->line_aa
= line_aa
;
528 key
->high_quality_derivatives
=
529 prog
->info
.uses_fddx_fddy
&&
530 ctx
->Hint
.FragmentShaderDerivative
== GL_NICEST
;
532 if (devinfo
->gen
< 6)
533 key
->stats_wm
= brw
->stats_wm
;
537 (prog
->info
.inputs_read
& (VARYING_BIT_COL0
| VARYING_BIT_COL1
)) &&
538 (ctx
->Light
.ShadeModel
== GL_FLAT
);
540 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
541 key
->clamp_fragment_color
= ctx
->Color
._ClampFragmentColor
;
544 brw_populate_sampler_prog_key_data(ctx
, prog
, &key
->tex
);
547 key
->nr_color_regions
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
550 key
->force_dual_color_blend
= brw
->dual_color_blend_by_location
&&
551 (ctx
->Color
.BlendEnabled
& 1) && ctx
->Color
.Blend
[0]._UsesDualSrc
;
553 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
554 key
->replicate_alpha
= ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
555 (_mesa_is_alpha_test_enabled(ctx
) ||
556 _mesa_is_alpha_to_coverage_enabled(ctx
));
558 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
559 /* Ignore sample qualifier while computing this flag. */
560 if (ctx
->Multisample
.Enabled
) {
561 key
->persample_interp
=
562 ctx
->Multisample
.SampleShading
&&
563 (ctx
->Multisample
.MinSampleShadingValue
*
564 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1);
566 key
->multisample_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
569 /* BRW_NEW_VUE_MAP_GEOM_OUT */
570 if (devinfo
->gen
< 6 || util_bitcount64(prog
->info
.inputs_read
&
571 BRW_FS_VARYING_INPUT_MASK
) > 16) {
572 key
->input_slots_valid
= brw
->vue_map_geom_out
.slots_valid
;
575 /* _NEW_COLOR | _NEW_BUFFERS */
576 /* Pre-gen6, the hardware alpha test always used each render
577 * target's alpha to do alpha test, as opposed to render target 0's alpha
578 * like GL requires. Fix that by building the alpha test into the
579 * shader, and we'll skip enabling the fixed function alpha test.
581 if (devinfo
->gen
< 6 && ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
582 ctx
->Color
.AlphaEnabled
) {
583 key
->alpha_test_func
= ctx
->Color
.AlphaFunc
;
584 key
->alpha_test_ref
= ctx
->Color
.AlphaRef
;
587 /* The unique fragment program ID */
588 key
->program_string_id
= fp
->id
;
590 /* Whether reads from the framebuffer should behave coherently. */
591 key
->coherent_fb_fetch
= ctx
->Extensions
.EXT_shader_framebuffer_fetch
;
595 brw_upload_wm_prog(struct brw_context
*brw
)
597 struct brw_wm_prog_key key
;
598 struct brw_program
*fp
=
599 (struct brw_program
*) brw
->programs
[MESA_SHADER_FRAGMENT
];
601 if (!brw_wm_state_dirty(brw
))
604 brw_wm_populate_key(brw
, &key
);
606 if (brw_search_cache(&brw
->cache
, BRW_CACHE_FS_PROG
, &key
, sizeof(key
),
607 &brw
->wm
.base
.prog_offset
, &brw
->wm
.base
.prog_data
,
611 if (brw_disk_cache_upload_program(brw
, MESA_SHADER_FRAGMENT
))
614 fp
= (struct brw_program
*) brw
->programs
[MESA_SHADER_FRAGMENT
];
615 fp
->id
= key
.program_string_id
;
617 MAYBE_UNUSED
bool success
= brw_codegen_wm_prog(brw
, fp
, &key
,
618 &brw
->vue_map_geom_out
);
623 brw_wm_populate_default_key(const struct gen_device_info
*devinfo
,
624 struct brw_wm_prog_key
*key
,
625 struct gl_program
*prog
)
627 memset(key
, 0, sizeof(*key
));
629 uint64_t outputs_written
= prog
->info
.outputs_written
;
631 if (devinfo
->gen
< 6) {
632 if (prog
->info
.fs
.uses_discard
)
633 key
->iz_lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
635 if (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
636 key
->iz_lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
638 /* Just assume depth testing. */
639 key
->iz_lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
640 key
->iz_lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
643 if (devinfo
->gen
< 6 || util_bitcount64(prog
->info
.inputs_read
&
644 BRW_FS_VARYING_INPUT_MASK
) > 16) {
645 key
->input_slots_valid
= prog
->info
.inputs_read
| VARYING_BIT_POS
;
648 brw_setup_tex_for_precompile(devinfo
, &key
->tex
, prog
);
650 key
->nr_color_regions
= util_bitcount64(outputs_written
&
651 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH
) |
652 BITFIELD64_BIT(FRAG_RESULT_STENCIL
) |
653 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK
)));
655 key
->program_string_id
= brw_program(prog
)->id
;
657 /* Whether reads from the framebuffer should behave coherently. */
658 key
->coherent_fb_fetch
= devinfo
->gen
>= 9;
662 brw_fs_precompile(struct gl_context
*ctx
, struct gl_program
*prog
)
664 struct brw_context
*brw
= brw_context(ctx
);
665 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
666 struct brw_wm_prog_key key
;
668 struct brw_program
*bfp
= brw_program(prog
);
670 brw_wm_populate_default_key(&brw
->screen
->devinfo
, &key
, prog
);
672 /* check brw_wm_populate_default_key coherent_fb_fetch setting */
673 assert(key
.coherent_fb_fetch
==
674 ctx
->Extensions
.EXT_shader_framebuffer_fetch
);
676 uint32_t old_prog_offset
= brw
->wm
.base
.prog_offset
;
677 struct brw_stage_prog_data
*old_prog_data
= brw
->wm
.base
.prog_data
;
679 struct brw_vue_map vue_map
;
680 if (devinfo
->gen
< 6) {
681 brw_compute_vue_map(&brw
->screen
->devinfo
, &vue_map
,
682 prog
->info
.inputs_read
| VARYING_BIT_POS
,
686 bool success
= brw_codegen_wm_prog(brw
, bfp
, &key
, &vue_map
);
688 brw
->wm
.base
.prog_offset
= old_prog_offset
;
689 brw
->wm
.base
.prog_data
= old_prog_data
;