2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "brw_context.h"
28 #include "brw_state.h"
29 #include "main/enums.h"
30 #include "main/formats.h"
31 #include "main/fbobject.h"
32 #include "main/samplerobj.h"
33 #include "main/framebuffer.h"
34 #include "program/prog_parameter.h"
35 #include "program/program.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_image.h"
38 #include "intel_fbo.h"
39 #include "compiler/brw_nir.h"
40 #include "brw_program.h"
42 #include "util/ralloc.h"
43 #include "util/u_math.h"
46 assign_fs_binding_table_offsets(const struct gen_device_info
*devinfo
,
47 const struct gl_program
*prog
,
48 const struct brw_wm_prog_key
*key
,
49 struct brw_wm_prog_data
*prog_data
)
51 /* Render targets implicitly start at surface index 0. Even if there are
52 * no color regions, we still perform an FB write to a null render target,
53 * which will be surface 0.
55 uint32_t next_binding_table_offset
= MAX2(key
->nr_color_regions
, 1);
57 next_binding_table_offset
=
58 brw_assign_common_binding_table_offsets(devinfo
, prog
, &prog_data
->base
,
59 next_binding_table_offset
);
61 if (prog
->nir
->info
.outputs_read
&& !key
->coherent_fb_fetch
) {
62 prog_data
->binding_table
.render_target_read_start
=
63 next_binding_table_offset
;
64 next_binding_table_offset
+= key
->nr_color_regions
;
69 brw_wm_debug_recompile(struct brw_context
*brw
, struct gl_program
*prog
,
70 const struct brw_wm_prog_key
*key
)
72 perf_debug("Recompiling fragment shader for program %d\n", prog
->Id
);
75 const struct brw_wm_prog_key
*old_key
=
76 brw_find_previous_compile(&brw
->cache
, BRW_CACHE_FS_PROG
,
77 key
->program_string_id
);
80 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
84 found
|= key_debug(brw
, "alphatest, computed depth, depth test, or "
86 old_key
->iz_lookup
, key
->iz_lookup
);
87 found
|= key_debug(brw
, "depth statistics",
88 old_key
->stats_wm
, key
->stats_wm
);
89 found
|= key_debug(brw
, "flat shading",
90 old_key
->flat_shade
, key
->flat_shade
);
91 found
|= key_debug(brw
, "number of color buffers",
92 old_key
->nr_color_regions
, key
->nr_color_regions
);
93 found
|= key_debug(brw
, "MRT alpha test or alpha-to-coverage",
94 old_key
->replicate_alpha
, key
->replicate_alpha
);
95 found
|= key_debug(brw
, "fragment color clamping",
96 old_key
->clamp_fragment_color
, key
->clamp_fragment_color
);
97 found
|= key_debug(brw
, "per-sample interpolation",
98 old_key
->persample_interp
, key
->persample_interp
);
99 found
|= key_debug(brw
, "multisampled FBO",
100 old_key
->multisample_fbo
, key
->multisample_fbo
);
101 found
|= key_debug(brw
, "frag coord adds sample pos",
102 old_key
->frag_coord_adds_sample_pos
,
103 key
->frag_coord_adds_sample_pos
);
104 found
|= key_debug(brw
, "line smoothing",
105 old_key
->line_aa
, key
->line_aa
);
106 found
|= key_debug(brw
, "high quality derivatives",
107 old_key
->high_quality_derivatives
,
108 key
->high_quality_derivatives
);
109 found
|= key_debug(brw
, "force dual color blending",
110 old_key
->force_dual_color_blend
,
111 key
->force_dual_color_blend
);
112 found
|= key_debug(brw
, "coherent fb fetch",
113 old_key
->coherent_fb_fetch
, key
->coherent_fb_fetch
);
115 found
|= key_debug(brw
, "input slots valid",
116 old_key
->input_slots_valid
, key
->input_slots_valid
);
117 found
|= key_debug(brw
, "mrt alpha test function",
118 old_key
->alpha_test_func
, key
->alpha_test_func
);
119 found
|= key_debug(brw
, "mrt alpha test reference value",
120 old_key
->alpha_test_ref
, key
->alpha_test_ref
);
122 found
|= brw_debug_recompile_sampler_key(brw
, &old_key
->tex
, &key
->tex
);
125 perf_debug(" Something else\n");
130 brw_codegen_wm_prog(struct brw_context
*brw
,
131 struct brw_program
*fp
,
132 struct brw_wm_prog_key
*key
,
133 struct brw_vue_map
*vue_map
)
135 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
136 void *mem_ctx
= ralloc_context(NULL
);
137 struct brw_wm_prog_data prog_data
;
138 const GLuint
*program
;
139 bool start_busy
= false;
140 double start_time
= 0;
142 memset(&prog_data
, 0, sizeof(prog_data
));
144 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
145 if (fp
->program
.is_arb_asm
)
146 prog_data
.base
.use_alt_mode
= true;
148 assign_fs_binding_table_offsets(devinfo
, &fp
->program
, key
, &prog_data
);
150 if (!fp
->program
.is_arb_asm
) {
151 brw_nir_setup_glsl_uniforms(mem_ctx
, fp
->program
.nir
, &fp
->program
,
152 &prog_data
.base
, true);
153 brw_nir_analyze_ubo_ranges(brw
->screen
->compiler
, fp
->program
.nir
,
154 NULL
, prog_data
.base
.ubo_ranges
);
156 brw_nir_setup_arb_uniforms(mem_ctx
, fp
->program
.nir
, &fp
->program
,
159 if (unlikely(INTEL_DEBUG
& DEBUG_WM
))
160 brw_dump_arb_asm("fragment", &fp
->program
);
163 if (unlikely(brw
->perf_debug
)) {
164 start_busy
= (brw
->batch
.last_bo
&&
165 brw_bo_busy(brw
->batch
.last_bo
));
166 start_time
= get_time();
169 int st_index8
= -1, st_index16
= -1, st_index32
= -1;
170 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
171 st_index8
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS8
,
172 !fp
->program
.is_arb_asm
);
173 st_index16
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS16
,
174 !fp
->program
.is_arb_asm
);
175 st_index32
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS32
,
176 !fp
->program
.is_arb_asm
);
179 char *error_str
= NULL
;
180 program
= brw_compile_fs(brw
->screen
->compiler
, brw
, mem_ctx
,
181 key
, &prog_data
, fp
->program
.nir
,
182 &fp
->program
, st_index8
, st_index16
, st_index32
,
183 true, false, vue_map
,
186 if (program
== NULL
) {
187 if (!fp
->program
.is_arb_asm
) {
188 fp
->program
.sh
.data
->LinkStatus
= LINKING_FAILURE
;
189 ralloc_strcat(&fp
->program
.sh
.data
->InfoLog
, error_str
);
192 _mesa_problem(NULL
, "Failed to compile fragment shader: %s\n", error_str
);
194 ralloc_free(mem_ctx
);
198 if (unlikely(brw
->perf_debug
)) {
199 if (fp
->compiled_once
)
200 brw_wm_debug_recompile(brw
, &fp
->program
, key
);
201 fp
->compiled_once
= true;
203 if (start_busy
&& !brw_bo_busy(brw
->batch
.last_bo
)) {
204 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
205 (get_time() - start_time
) * 1000);
209 brw_alloc_stage_scratch(brw
, &brw
->wm
.base
, prog_data
.base
.total_scratch
);
211 if (unlikely((INTEL_DEBUG
& DEBUG_WM
) && fp
->program
.is_arb_asm
))
212 fprintf(stderr
, "\n");
214 /* The param and pull_param arrays will be freed by the shader cache. */
215 ralloc_steal(NULL
, prog_data
.base
.param
);
216 ralloc_steal(NULL
, prog_data
.base
.pull_param
);
217 brw_upload_cache(&brw
->cache
, BRW_CACHE_FS_PROG
,
218 key
, sizeof(struct brw_wm_prog_key
),
219 program
, prog_data
.base
.program_size
,
220 &prog_data
, sizeof(prog_data
),
221 &brw
->wm
.base
.prog_offset
, &brw
->wm
.base
.prog_data
);
223 ralloc_free(mem_ctx
);
229 brw_debug_recompile_sampler_key(struct brw_context
*brw
,
230 const struct brw_sampler_prog_key_data
*old_key
,
231 const struct brw_sampler_prog_key_data
*key
)
235 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
236 found
|= key_debug(brw
, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
237 old_key
->swizzles
[i
], key
->swizzles
[i
]);
239 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 1st coordinate",
240 old_key
->gl_clamp_mask
[0], key
->gl_clamp_mask
[0]);
241 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
242 old_key
->gl_clamp_mask
[1], key
->gl_clamp_mask
[1]);
243 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
244 old_key
->gl_clamp_mask
[2], key
->gl_clamp_mask
[2]);
245 found
|= key_debug(brw
, "gather channel quirk on any texture unit",
246 old_key
->gather_channel_quirk_mask
, key
->gather_channel_quirk_mask
);
247 found
|= key_debug(brw
, "compressed multisample layout",
248 old_key
->compressed_multisample_layout_mask
,
249 key
->compressed_multisample_layout_mask
);
250 found
|= key_debug(brw
, "16x msaa",
254 found
|= key_debug(brw
, "y_uv image bound",
255 old_key
->y_uv_image_mask
,
256 key
->y_uv_image_mask
);
257 found
|= key_debug(brw
, "y_u_v image bound",
258 old_key
->y_u_v_image_mask
,
259 key
->y_u_v_image_mask
);
260 found
|= key_debug(brw
, "yx_xuxv image bound",
261 old_key
->yx_xuxv_image_mask
,
262 key
->yx_xuxv_image_mask
);
263 found
|= key_debug(brw
, "xy_uxvx image bound",
264 old_key
->xy_uxvx_image_mask
,
265 key
->xy_uxvx_image_mask
);
266 found
|= key_debug(brw
, "ayuv image bound",
267 old_key
->ayuv_image_mask
,
268 key
->ayuv_image_mask
);
271 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
272 found
|= key_debug(brw
, "textureGather workarounds",
273 old_key
->gen6_gather_wa
[i
], key
->gen6_gather_wa
[i
]);
280 gen6_gather_workaround(GLenum internalformat
)
282 switch (internalformat
) {
283 case GL_R8I
: return WA_SIGN
| WA_8BIT
;
284 case GL_R8UI
: return WA_8BIT
;
285 case GL_R16I
: return WA_SIGN
| WA_16BIT
;
286 case GL_R16UI
: return WA_16BIT
;
288 /* Note that even though GL_R32I and GL_R32UI have format overrides in
289 * the surface state, there is no shader w/a required.
296 brw_populate_sampler_prog_key_data(struct gl_context
*ctx
,
297 const struct gl_program
*prog
,
298 struct brw_sampler_prog_key_data
*key
)
300 struct brw_context
*brw
= brw_context(ctx
);
301 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
302 GLbitfield mask
= prog
->SamplersUsed
;
305 const int s
= u_bit_scan(&mask
);
307 key
->swizzles
[s
] = SWIZZLE_NOOP
;
309 int unit_id
= prog
->SamplerUnits
[s
];
310 const struct gl_texture_unit
*unit
= &ctx
->Texture
.Unit
[unit_id
];
312 if (unit
->_Current
&& unit
->_Current
->Target
!= GL_TEXTURE_BUFFER
) {
313 const struct gl_texture_object
*t
= unit
->_Current
;
314 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
315 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit_id
);
317 const bool alpha_depth
= t
->DepthMode
== GL_ALPHA
&&
318 (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
319 img
->_BaseFormat
== GL_DEPTH_STENCIL
);
321 /* Haswell handles texture swizzling as surface format overrides
322 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
324 if (alpha_depth
|| (devinfo
->gen
< 8 && !devinfo
->is_haswell
))
325 key
->swizzles
[s
] = brw_get_texture_swizzle(ctx
, t
);
327 if (devinfo
->gen
< 8 &&
328 sampler
->MinFilter
!= GL_NEAREST
&&
329 sampler
->MagFilter
!= GL_NEAREST
) {
330 if (sampler
->WrapS
== GL_CLAMP
)
331 key
->gl_clamp_mask
[0] |= 1 << s
;
332 if (sampler
->WrapT
== GL_CLAMP
)
333 key
->gl_clamp_mask
[1] |= 1 << s
;
334 if (sampler
->WrapR
== GL_CLAMP
)
335 key
->gl_clamp_mask
[2] |= 1 << s
;
338 /* gather4 for RG32* is broken in multiple ways on Gen7. */
339 if (devinfo
->gen
== 7 && prog
->info
.uses_texture_gather
) {
340 switch (img
->InternalFormat
) {
343 /* We have to override the format to R32G32_FLOAT_LD.
344 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
345 * (1.0) rather than integer 1. This needs shader hacks.
347 * On Ivybridge, we whack W (alpha) to ONE in our key's
348 * swizzle. On Haswell, we look at the original texture
349 * swizzle, and use XYZW with channels overridden to ONE,
350 * leaving normal texture swizzling to SCS.
352 unsigned src_swizzle
=
353 devinfo
->is_haswell
? t
->_Swizzle
: key
->swizzles
[s
];
354 for (int i
= 0; i
< 4; i
++) {
355 unsigned src_comp
= GET_SWZ(src_swizzle
, i
);
356 if (src_comp
== SWIZZLE_ONE
|| src_comp
== SWIZZLE_W
) {
357 key
->swizzles
[i
] &= ~(0x7 << (3 * i
));
358 key
->swizzles
[i
] |= SWIZZLE_ONE
<< (3 * i
);
364 /* The channel select for green doesn't work - we have to
365 * request blue. Haswell can use SCS for this, but Ivybridge
366 * needs a shader workaround.
368 if (!devinfo
->is_haswell
)
369 key
->gather_channel_quirk_mask
|= 1 << s
;
374 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
375 * UNORM/FLOAT instead and fix it in the shader.
377 if (devinfo
->gen
== 6 && prog
->info
.uses_texture_gather
) {
378 key
->gen6_gather_wa
[s
] = gen6_gather_workaround(img
->InternalFormat
);
381 /* If this is a multisample sampler, and uses the CMS MSAA layout,
382 * then we need to emit slightly different code to first sample the
385 struct intel_texture_object
*intel_tex
=
386 intel_texture_object((struct gl_texture_object
*)t
);
388 /* From gen9 onwards some single sampled buffers can also be
389 * compressed. These don't need ld2dms sampling along with mcs fetch.
391 if (intel_tex
->mt
->aux_usage
== ISL_AUX_USAGE_MCS
) {
392 assert(devinfo
->gen
>= 7);
393 assert(intel_tex
->mt
->surf
.samples
> 1);
394 assert(intel_tex
->mt
->aux_buf
);
395 assert(intel_tex
->mt
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
396 key
->compressed_multisample_layout_mask
|= 1 << s
;
398 if (intel_tex
->mt
->surf
.samples
>= 16) {
399 assert(devinfo
->gen
>= 9);
400 key
->msaa_16
|= 1 << s
;
404 if (t
->Target
== GL_TEXTURE_EXTERNAL_OES
&& intel_tex
->planar_format
) {
405 switch (intel_tex
->planar_format
->components
) {
406 case __DRI_IMAGE_COMPONENTS_Y_UV
:
407 key
->y_uv_image_mask
|= 1 << s
;
409 case __DRI_IMAGE_COMPONENTS_Y_U_V
:
410 key
->y_u_v_image_mask
|= 1 << s
;
412 case __DRI_IMAGE_COMPONENTS_Y_XUXV
:
413 key
->yx_xuxv_image_mask
|= 1 << s
;
415 case __DRI_IMAGE_COMPONENTS_Y_UXVX
:
416 key
->xy_uxvx_image_mask
|= 1 << s
;
418 case __DRI_IMAGE_COMPONENTS_AYUV
:
419 key
->ayuv_image_mask
|= 1 << s
;
431 brw_wm_state_dirty(const struct brw_context
*brw
)
433 return brw_state_dirty(brw
,
445 BRW_NEW_FRAGMENT_PROGRAM
|
446 BRW_NEW_REDUCED_PRIMITIVE
|
448 BRW_NEW_VUE_MAP_GEOM_OUT
);
452 brw_wm_populate_key(struct brw_context
*brw
, struct brw_wm_prog_key
*key
)
454 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
455 struct gl_context
*ctx
= &brw
->ctx
;
456 /* BRW_NEW_FRAGMENT_PROGRAM */
457 const struct gl_program
*prog
= brw
->programs
[MESA_SHADER_FRAGMENT
];
458 const struct brw_program
*fp
= brw_program_const(prog
);
462 memset(key
, 0, sizeof(*key
));
464 /* Build the index for table lookup
466 if (devinfo
->gen
< 6) {
467 struct intel_renderbuffer
*depth_irb
=
468 intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
471 if (prog
->info
.fs
.uses_discard
|| ctx
->Color
.AlphaEnabled
) {
472 lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
475 if (prog
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
)) {
476 lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
480 if (depth_irb
&& ctx
->Depth
.Test
) {
481 lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
483 if (brw_depth_writes_enabled(brw
))
484 lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
487 /* _NEW_STENCIL | _NEW_BUFFERS */
488 if (brw
->stencil_enabled
) {
489 lookup
|= BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT
;
491 if (ctx
->Stencil
.WriteMask
[0] ||
492 ctx
->Stencil
.WriteMask
[ctx
->Stencil
._BackFace
])
493 lookup
|= BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT
;
495 key
->iz_lookup
= lookup
;
498 line_aa
= BRW_WM_AA_NEVER
;
500 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
501 if (ctx
->Line
.SmoothFlag
) {
502 if (brw
->reduced_primitive
== GL_LINES
) {
503 line_aa
= BRW_WM_AA_ALWAYS
;
505 else if (brw
->reduced_primitive
== GL_TRIANGLES
) {
506 if (ctx
->Polygon
.FrontMode
== GL_LINE
) {
507 line_aa
= BRW_WM_AA_SOMETIMES
;
509 if (ctx
->Polygon
.BackMode
== GL_LINE
||
510 (ctx
->Polygon
.CullFlag
&&
511 ctx
->Polygon
.CullFaceMode
== GL_BACK
))
512 line_aa
= BRW_WM_AA_ALWAYS
;
514 else if (ctx
->Polygon
.BackMode
== GL_LINE
) {
515 line_aa
= BRW_WM_AA_SOMETIMES
;
517 if ((ctx
->Polygon
.CullFlag
&&
518 ctx
->Polygon
.CullFaceMode
== GL_FRONT
))
519 line_aa
= BRW_WM_AA_ALWAYS
;
524 key
->line_aa
= line_aa
;
527 key
->high_quality_derivatives
=
528 prog
->info
.uses_fddx_fddy
&&
529 ctx
->Hint
.FragmentShaderDerivative
== GL_NICEST
;
531 if (devinfo
->gen
< 6)
532 key
->stats_wm
= brw
->stats_wm
;
536 (prog
->info
.inputs_read
& (VARYING_BIT_COL0
| VARYING_BIT_COL1
)) &&
537 (ctx
->Light
.ShadeModel
== GL_FLAT
);
539 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
540 key
->clamp_fragment_color
= ctx
->Color
._ClampFragmentColor
;
543 brw_populate_sampler_prog_key_data(ctx
, prog
, &key
->tex
);
546 key
->nr_color_regions
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
549 key
->force_dual_color_blend
= brw
->dual_color_blend_by_location
&&
550 (ctx
->Color
.BlendEnabled
& 1) && ctx
->Color
.Blend
[0]._UsesDualSrc
;
552 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
553 key
->replicate_alpha
= ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
554 (_mesa_is_alpha_test_enabled(ctx
) ||
555 _mesa_is_alpha_to_coverage_enabled(ctx
));
557 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
558 /* Ignore sample qualifier while computing this flag. */
559 if (ctx
->Multisample
.Enabled
) {
560 key
->persample_interp
=
561 ctx
->Multisample
.SampleShading
&&
562 (ctx
->Multisample
.MinSampleShadingValue
*
563 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1);
565 key
->multisample_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
568 /* BRW_NEW_VUE_MAP_GEOM_OUT */
569 if (devinfo
->gen
< 6 || util_bitcount64(prog
->info
.inputs_read
&
570 BRW_FS_VARYING_INPUT_MASK
) > 16) {
571 key
->input_slots_valid
= brw
->vue_map_geom_out
.slots_valid
;
574 /* _NEW_COLOR | _NEW_BUFFERS */
575 /* Pre-gen6, the hardware alpha test always used each render
576 * target's alpha to do alpha test, as opposed to render target 0's alpha
577 * like GL requires. Fix that by building the alpha test into the
578 * shader, and we'll skip enabling the fixed function alpha test.
580 if (devinfo
->gen
< 6 && ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
581 ctx
->Color
.AlphaEnabled
) {
582 key
->alpha_test_func
= ctx
->Color
.AlphaFunc
;
583 key
->alpha_test_ref
= ctx
->Color
.AlphaRef
;
586 /* The unique fragment program ID */
587 key
->program_string_id
= fp
->id
;
589 /* Whether reads from the framebuffer should behave coherently. */
590 key
->coherent_fb_fetch
= ctx
->Extensions
.EXT_shader_framebuffer_fetch
;
594 brw_upload_wm_prog(struct brw_context
*brw
)
596 struct brw_wm_prog_key key
;
597 struct brw_program
*fp
=
598 (struct brw_program
*) brw
->programs
[MESA_SHADER_FRAGMENT
];
600 if (!brw_wm_state_dirty(brw
))
603 brw_wm_populate_key(brw
, &key
);
605 if (brw_search_cache(&brw
->cache
, BRW_CACHE_FS_PROG
, &key
, sizeof(key
),
606 &brw
->wm
.base
.prog_offset
, &brw
->wm
.base
.prog_data
,
610 if (brw_disk_cache_upload_program(brw
, MESA_SHADER_FRAGMENT
))
613 fp
= (struct brw_program
*) brw
->programs
[MESA_SHADER_FRAGMENT
];
614 fp
->id
= key
.program_string_id
;
616 MAYBE_UNUSED
bool success
= brw_codegen_wm_prog(brw
, fp
, &key
,
617 &brw
->vue_map_geom_out
);
622 brw_wm_populate_default_key(const struct gen_device_info
*devinfo
,
623 struct brw_wm_prog_key
*key
,
624 struct gl_program
*prog
)
626 memset(key
, 0, sizeof(*key
));
628 uint64_t outputs_written
= prog
->info
.outputs_written
;
630 if (devinfo
->gen
< 6) {
631 if (prog
->info
.fs
.uses_discard
)
632 key
->iz_lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
634 if (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
635 key
->iz_lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
637 /* Just assume depth testing. */
638 key
->iz_lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
639 key
->iz_lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
642 if (devinfo
->gen
< 6 || util_bitcount64(prog
->info
.inputs_read
&
643 BRW_FS_VARYING_INPUT_MASK
) > 16) {
644 key
->input_slots_valid
= prog
->info
.inputs_read
| VARYING_BIT_POS
;
647 brw_setup_tex_for_precompile(devinfo
, &key
->tex
, prog
);
649 key
->nr_color_regions
= util_bitcount64(outputs_written
&
650 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH
) |
651 BITFIELD64_BIT(FRAG_RESULT_STENCIL
) |
652 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK
)));
654 key
->program_string_id
= brw_program(prog
)->id
;
656 /* Whether reads from the framebuffer should behave coherently. */
657 key
->coherent_fb_fetch
= devinfo
->gen
>= 9;
661 brw_fs_precompile(struct gl_context
*ctx
, struct gl_program
*prog
)
663 struct brw_context
*brw
= brw_context(ctx
);
664 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
665 struct brw_wm_prog_key key
;
667 struct brw_program
*bfp
= brw_program(prog
);
669 brw_wm_populate_default_key(&brw
->screen
->devinfo
, &key
, prog
);
671 /* check brw_wm_populate_default_key coherent_fb_fetch setting */
672 assert(key
.coherent_fb_fetch
==
673 ctx
->Extensions
.EXT_shader_framebuffer_fetch
);
675 uint32_t old_prog_offset
= brw
->wm
.base
.prog_offset
;
676 struct brw_stage_prog_data
*old_prog_data
= brw
->wm
.base
.prog_data
;
678 struct brw_vue_map vue_map
;
679 if (devinfo
->gen
< 6) {
680 brw_compute_vue_map(&brw
->screen
->devinfo
, &vue_map
,
681 prog
->info
.inputs_read
| VARYING_BIT_POS
,
685 bool success
= brw_codegen_wm_prog(brw
, bfp
, &key
, &vue_map
);
687 brw
->wm
.base
.prog_offset
= old_prog_offset
;
688 brw
->wm
.base
.prog_data
= old_prog_data
;