2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "brw_context.h"
28 #include "brw_state.h"
29 #include "main/enums.h"
30 #include "main/formats.h"
31 #include "main/fbobject.h"
32 #include "main/samplerobj.h"
33 #include "main/framebuffer.h"
34 #include "program/prog_parameter.h"
35 #include "program/program.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_image.h"
38 #include "compiler/brw_nir.h"
39 #include "brw_program.h"
41 #include "util/ralloc.h"
44 assign_fs_binding_table_offsets(const struct gen_device_info
*devinfo
,
45 const struct gl_program
*prog
,
46 const struct brw_wm_prog_key
*key
,
47 struct brw_wm_prog_data
*prog_data
)
49 uint32_t next_binding_table_offset
= 0;
51 /* If there are no color regions, we still perform an FB write to a null
52 * renderbuffer, which we place at surface index 0.
54 prog_data
->binding_table
.render_target_start
= next_binding_table_offset
;
55 next_binding_table_offset
+= MAX2(key
->nr_color_regions
, 1);
57 next_binding_table_offset
=
58 brw_assign_common_binding_table_offsets(devinfo
, prog
, &prog_data
->base
,
59 next_binding_table_offset
);
61 if (prog
->nir
->info
->outputs_read
&& !key
->coherent_fb_fetch
) {
62 prog_data
->binding_table
.render_target_read_start
=
63 next_binding_table_offset
;
64 next_binding_table_offset
+= key
->nr_color_regions
;
69 brw_wm_debug_recompile(struct brw_context
*brw
, struct gl_program
*prog
,
70 const struct brw_wm_prog_key
*key
)
72 perf_debug("Recompiling fragment shader for program %d\n", prog
->Id
);
75 const struct brw_wm_prog_key
*old_key
=
76 brw_find_previous_compile(&brw
->cache
, BRW_CACHE_FS_PROG
,
77 key
->program_string_id
);
80 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
84 found
|= key_debug(brw
, "alphatest, computed depth, depth test, or "
86 old_key
->iz_lookup
, key
->iz_lookup
);
87 found
|= key_debug(brw
, "depth statistics",
88 old_key
->stats_wm
, key
->stats_wm
);
89 found
|= key_debug(brw
, "flat shading",
90 old_key
->flat_shade
, key
->flat_shade
);
91 found
|= key_debug(brw
, "per-sample interpolation",
92 old_key
->persample_interp
, key
->persample_interp
);
93 found
|= key_debug(brw
, "number of color buffers",
94 old_key
->nr_color_regions
, key
->nr_color_regions
);
95 found
|= key_debug(brw
, "MRT alpha test or alpha-to-coverage",
96 old_key
->replicate_alpha
, key
->replicate_alpha
);
97 found
|= key_debug(brw
, "fragment color clamping",
98 old_key
->clamp_fragment_color
, key
->clamp_fragment_color
);
99 found
|= key_debug(brw
, "multisampled FBO",
100 old_key
->multisample_fbo
, key
->multisample_fbo
);
101 found
|= key_debug(brw
, "line smoothing",
102 old_key
->line_aa
, key
->line_aa
);
103 found
|= key_debug(brw
, "input slots valid",
104 old_key
->input_slots_valid
, key
->input_slots_valid
);
105 found
|= key_debug(brw
, "mrt alpha test function",
106 old_key
->alpha_test_func
, key
->alpha_test_func
);
107 found
|= key_debug(brw
, "mrt alpha test reference value",
108 old_key
->alpha_test_ref
, key
->alpha_test_ref
);
110 found
|= brw_debug_recompile_sampler_key(brw
, &old_key
->tex
, &key
->tex
);
113 perf_debug(" Something else\n");
118 * All Mesa program -> GPU code generation goes through this function.
119 * Depending on the instructions used (i.e. flow control instructions)
120 * we'll use one of two code generators.
123 brw_codegen_wm_prog(struct brw_context
*brw
,
124 struct brw_program
*fp
,
125 struct brw_wm_prog_key
*key
,
126 struct brw_vue_map
*vue_map
)
128 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
129 struct gl_context
*ctx
= &brw
->ctx
;
130 void *mem_ctx
= ralloc_context(NULL
);
131 struct brw_wm_prog_data prog_data
;
132 const GLuint
*program
;
134 bool start_busy
= false;
135 double start_time
= 0;
137 memset(&prog_data
, 0, sizeof(prog_data
));
139 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
140 if (fp
->program
.is_arb_asm
)
141 prog_data
.base
.use_alt_mode
= true;
143 assign_fs_binding_table_offsets(devinfo
, &fp
->program
, key
, &prog_data
);
145 /* Allocate the references to the uniforms that will end up in the
146 * prog_data associated with the compiled program, and which will be freed
147 * by the state cache.
149 int param_count
= fp
->program
.nir
->num_uniforms
/ 4;
150 prog_data
.base
.nr_image_params
= fp
->program
.info
.num_images
;
151 /* The backend also sometimes adds params for texture size. */
152 param_count
+= 2 * ctx
->Const
.Program
[MESA_SHADER_FRAGMENT
].MaxTextureImageUnits
;
153 prog_data
.base
.param
=
154 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
155 prog_data
.base
.pull_param
=
156 rzalloc_array(NULL
, const gl_constant_value
*, param_count
);
157 prog_data
.base
.image_param
=
158 rzalloc_array(NULL
, struct brw_image_param
,
159 prog_data
.base
.nr_image_params
);
160 prog_data
.base
.nr_params
= param_count
;
162 if (!fp
->program
.is_arb_asm
) {
163 brw_nir_setup_glsl_uniforms(fp
->program
.nir
, &fp
->program
,
164 &prog_data
.base
, true);
166 brw_nir_setup_arb_uniforms(fp
->program
.nir
, &fp
->program
,
169 if (unlikely(INTEL_DEBUG
& DEBUG_WM
))
170 brw_dump_arb_asm("fragment", &fp
->program
);
173 if (unlikely(brw
->perf_debug
)) {
174 start_busy
= (brw
->batch
.last_bo
&&
175 brw_bo_busy(brw
->batch
.last_bo
));
176 start_time
= get_time();
179 int st_index8
= -1, st_index16
= -1;
180 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
181 st_index8
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS8
,
182 !fp
->program
.is_arb_asm
);
183 st_index16
= brw_get_shader_time_index(brw
, &fp
->program
, ST_FS16
,
184 !fp
->program
.is_arb_asm
);
187 char *error_str
= NULL
;
188 program
= brw_compile_fs(brw
->screen
->compiler
, brw
, mem_ctx
,
189 key
, &prog_data
, fp
->program
.nir
,
190 &fp
->program
, st_index8
, st_index16
,
191 true, brw
->use_rep_send
, vue_map
,
192 &program_size
, &error_str
);
194 if (program
== NULL
) {
195 if (!fp
->program
.is_arb_asm
) {
196 fp
->program
.sh
.data
->LinkStatus
= linking_failure
;
197 ralloc_strcat(&fp
->program
.sh
.data
->InfoLog
, error_str
);
200 _mesa_problem(NULL
, "Failed to compile fragment shader: %s\n", error_str
);
202 ralloc_free(mem_ctx
);
206 if (unlikely(brw
->perf_debug
)) {
207 if (fp
->compiled_once
)
208 brw_wm_debug_recompile(brw
, &fp
->program
, key
);
209 fp
->compiled_once
= true;
211 if (start_busy
&& !brw_bo_busy(brw
->batch
.last_bo
)) {
212 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
213 (get_time() - start_time
) * 1000);
217 brw_alloc_stage_scratch(brw
, &brw
->wm
.base
,
218 prog_data
.base
.total_scratch
,
219 devinfo
->max_wm_threads
);
221 if (unlikely((INTEL_DEBUG
& DEBUG_WM
) && fp
->program
.is_arb_asm
))
222 fprintf(stderr
, "\n");
224 brw_upload_cache(&brw
->cache
, BRW_CACHE_FS_PROG
,
225 key
, sizeof(struct brw_wm_prog_key
),
226 program
, program_size
,
227 &prog_data
, sizeof(prog_data
),
228 &brw
->wm
.base
.prog_offset
, &brw
->wm
.base
.prog_data
);
230 ralloc_free(mem_ctx
);
236 brw_debug_recompile_sampler_key(struct brw_context
*brw
,
237 const struct brw_sampler_prog_key_data
*old_key
,
238 const struct brw_sampler_prog_key_data
*key
)
242 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
243 found
|= key_debug(brw
, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
244 old_key
->swizzles
[i
], key
->swizzles
[i
]);
246 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 1st coordinate",
247 old_key
->gl_clamp_mask
[0], key
->gl_clamp_mask
[0]);
248 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
249 old_key
->gl_clamp_mask
[1], key
->gl_clamp_mask
[1]);
250 found
|= key_debug(brw
, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
251 old_key
->gl_clamp_mask
[2], key
->gl_clamp_mask
[2]);
252 found
|= key_debug(brw
, "gather channel quirk on any texture unit",
253 old_key
->gather_channel_quirk_mask
, key
->gather_channel_quirk_mask
);
254 found
|= key_debug(brw
, "compressed multisample layout",
255 old_key
->compressed_multisample_layout_mask
,
256 key
->compressed_multisample_layout_mask
);
257 found
|= key_debug(brw
, "16x msaa",
261 found
|= key_debug(brw
, "y_uv image bound",
262 old_key
->y_uv_image_mask
,
263 key
->y_uv_image_mask
);
264 found
|= key_debug(brw
, "y_u_v image bound",
265 old_key
->y_u_v_image_mask
,
266 key
->y_u_v_image_mask
);
267 found
|= key_debug(brw
, "yx_xuxv image bound",
268 old_key
->yx_xuxv_image_mask
,
269 key
->yx_xuxv_image_mask
);
271 for (unsigned int i
= 0; i
< MAX_SAMPLERS
; i
++) {
272 found
|= key_debug(brw
, "textureGather workarounds",
273 old_key
->gen6_gather_wa
[i
], key
->gen6_gather_wa
[i
]);
280 gen6_gather_workaround(GLenum internalformat
)
282 switch (internalformat
) {
283 case GL_R8I
: return WA_SIGN
| WA_8BIT
;
284 case GL_R8UI
: return WA_8BIT
;
285 case GL_R16I
: return WA_SIGN
| WA_16BIT
;
286 case GL_R16UI
: return WA_16BIT
;
288 /* Note that even though GL_R32I and GL_R32UI have format overrides in
289 * the surface state, there is no shader w/a required.
296 brw_populate_sampler_prog_key_data(struct gl_context
*ctx
,
297 const struct gl_program
*prog
,
298 struct brw_sampler_prog_key_data
*key
)
300 struct brw_context
*brw
= brw_context(ctx
);
301 GLbitfield mask
= prog
->SamplersUsed
;
304 const int s
= u_bit_scan(&mask
);
306 key
->swizzles
[s
] = SWIZZLE_NOOP
;
308 int unit_id
= prog
->SamplerUnits
[s
];
309 const struct gl_texture_unit
*unit
= &ctx
->Texture
.Unit
[unit_id
];
311 if (unit
->_Current
&& unit
->_Current
->Target
!= GL_TEXTURE_BUFFER
) {
312 const struct gl_texture_object
*t
= unit
->_Current
;
313 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
314 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit_id
);
316 const bool alpha_depth
= t
->DepthMode
== GL_ALPHA
&&
317 (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
318 img
->_BaseFormat
== GL_DEPTH_STENCIL
);
320 /* Haswell handles texture swizzling as surface format overrides
321 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
323 if (alpha_depth
|| (brw
->gen
< 8 && !brw
->is_haswell
))
324 key
->swizzles
[s
] = brw_get_texture_swizzle(ctx
, t
);
327 sampler
->MinFilter
!= GL_NEAREST
&&
328 sampler
->MagFilter
!= GL_NEAREST
) {
329 if (sampler
->WrapS
== GL_CLAMP
)
330 key
->gl_clamp_mask
[0] |= 1 << s
;
331 if (sampler
->WrapT
== GL_CLAMP
)
332 key
->gl_clamp_mask
[1] |= 1 << s
;
333 if (sampler
->WrapR
== GL_CLAMP
)
334 key
->gl_clamp_mask
[2] |= 1 << s
;
337 /* gather4 for RG32* is broken in multiple ways on Gen7. */
338 if (brw
->gen
== 7 && prog
->nir
->info
->uses_texture_gather
) {
339 switch (img
->InternalFormat
) {
342 /* We have to override the format to R32G32_FLOAT_LD.
343 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
344 * (1.0) rather than integer 1. This needs shader hacks.
346 * On Ivybridge, we whack W (alpha) to ONE in our key's
347 * swizzle. On Haswell, we look at the original texture
348 * swizzle, and use XYZW with channels overridden to ONE,
349 * leaving normal texture swizzling to SCS.
351 unsigned src_swizzle
=
352 brw
->is_haswell
? t
->_Swizzle
: key
->swizzles
[s
];
353 for (int i
= 0; i
< 4; i
++) {
354 unsigned src_comp
= GET_SWZ(src_swizzle
, i
);
355 if (src_comp
== SWIZZLE_ONE
|| src_comp
== SWIZZLE_W
) {
356 key
->swizzles
[i
] &= ~(0x7 << (3 * i
));
357 key
->swizzles
[i
] |= SWIZZLE_ONE
<< (3 * i
);
363 /* The channel select for green doesn't work - we have to
364 * request blue. Haswell can use SCS for this, but Ivybridge
365 * needs a shader workaround.
367 if (!brw
->is_haswell
)
368 key
->gather_channel_quirk_mask
|= 1 << s
;
373 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
374 * UNORM/FLOAT instead and fix it in the shader.
376 if (brw
->gen
== 6 && prog
->nir
->info
->uses_texture_gather
) {
377 key
->gen6_gather_wa
[s
] = gen6_gather_workaround(img
->InternalFormat
);
380 /* If this is a multisample sampler, and uses the CMS MSAA layout,
381 * then we need to emit slightly different code to first sample the
384 struct intel_texture_object
*intel_tex
=
385 intel_texture_object((struct gl_texture_object
*)t
);
387 /* From gen9 onwards some single sampled buffers can also be
388 * compressed. These don't need ld2dms sampling along with mcs fetch.
391 intel_tex
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
&&
392 intel_tex
->mt
->num_samples
> 1) {
393 key
->compressed_multisample_layout_mask
|= 1 << s
;
395 if (intel_tex
->mt
->num_samples
>= 16) {
396 assert(brw
->gen
>= 9);
397 key
->msaa_16
|= 1 << s
;
401 if (t
->Target
== GL_TEXTURE_EXTERNAL_OES
&& intel_tex
->planar_format
) {
402 switch (intel_tex
->planar_format
->components
) {
403 case __DRI_IMAGE_COMPONENTS_Y_UV
:
404 key
->y_uv_image_mask
|= 1 << s
;
406 case __DRI_IMAGE_COMPONENTS_Y_U_V
:
407 key
->y_u_v_image_mask
|= 1 << s
;
409 case __DRI_IMAGE_COMPONENTS_Y_XUXV
:
410 key
->yx_xuxv_image_mask
|= 1 << s
;
422 brw_wm_state_dirty(const struct brw_context
*brw
)
424 return brw_state_dirty(brw
,
436 BRW_NEW_FRAGMENT_PROGRAM
|
437 BRW_NEW_REDUCED_PRIMITIVE
|
439 BRW_NEW_VUE_MAP_GEOM_OUT
);
443 brw_wm_populate_key(struct brw_context
*brw
, struct brw_wm_prog_key
*key
)
445 struct gl_context
*ctx
= &brw
->ctx
;
446 /* BRW_NEW_FRAGMENT_PROGRAM */
447 const struct brw_program
*fp
= brw_program_const(brw
->fragment_program
);
448 const struct gl_program
*prog
= (struct gl_program
*) brw
->fragment_program
;
452 memset(key
, 0, sizeof(*key
));
454 /* Build the index for table lookup
458 if (prog
->info
.fs
.uses_discard
|| ctx
->Color
.AlphaEnabled
) {
459 lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
462 if (prog
->info
.outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
)) {
463 lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
468 lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
470 if (brw_depth_writes_enabled(brw
))
471 lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
473 /* _NEW_STENCIL | _NEW_BUFFERS */
474 if (ctx
->Stencil
._Enabled
) {
475 lookup
|= BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT
;
477 if (ctx
->Stencil
.WriteMask
[0] ||
478 ctx
->Stencil
.WriteMask
[ctx
->Stencil
._BackFace
])
479 lookup
|= BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT
;
481 key
->iz_lookup
= lookup
;
484 line_aa
= BRW_WM_AA_NEVER
;
486 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
487 if (ctx
->Line
.SmoothFlag
) {
488 if (brw
->reduced_primitive
== GL_LINES
) {
489 line_aa
= BRW_WM_AA_ALWAYS
;
491 else if (brw
->reduced_primitive
== GL_TRIANGLES
) {
492 if (ctx
->Polygon
.FrontMode
== GL_LINE
) {
493 line_aa
= BRW_WM_AA_SOMETIMES
;
495 if (ctx
->Polygon
.BackMode
== GL_LINE
||
496 (ctx
->Polygon
.CullFlag
&&
497 ctx
->Polygon
.CullFaceMode
== GL_BACK
))
498 line_aa
= BRW_WM_AA_ALWAYS
;
500 else if (ctx
->Polygon
.BackMode
== GL_LINE
) {
501 line_aa
= BRW_WM_AA_SOMETIMES
;
503 if ((ctx
->Polygon
.CullFlag
&&
504 ctx
->Polygon
.CullFaceMode
== GL_FRONT
))
505 line_aa
= BRW_WM_AA_ALWAYS
;
510 key
->line_aa
= line_aa
;
513 key
->high_quality_derivatives
=
514 ctx
->Hint
.FragmentShaderDerivative
== GL_NICEST
;
517 key
->stats_wm
= brw
->stats_wm
;
520 key
->flat_shade
= (ctx
->Light
.ShadeModel
== GL_FLAT
);
522 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
523 key
->clamp_fragment_color
= ctx
->Color
._ClampFragmentColor
;
526 brw_populate_sampler_prog_key_data(ctx
, prog
, &key
->tex
);
529 key
->nr_color_regions
= ctx
->DrawBuffer
->_NumColorDrawBuffers
;
532 key
->force_dual_color_blend
= brw
->dual_color_blend_by_location
&&
533 (ctx
->Color
.BlendEnabled
& 1) && ctx
->Color
.Blend
[0]._UsesDualSrc
;
535 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
536 key
->replicate_alpha
= ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
537 (_mesa_is_alpha_test_enabled(ctx
) ||
538 _mesa_is_alpha_to_coverage_enabled(ctx
));
540 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
541 /* Ignore sample qualifier while computing this flag. */
542 if (ctx
->Multisample
.Enabled
) {
543 key
->persample_interp
=
544 ctx
->Multisample
.SampleShading
&&
545 (ctx
->Multisample
.MinSampleShadingValue
*
546 _mesa_geometric_samples(ctx
->DrawBuffer
) > 1);
548 key
->multisample_fbo
= _mesa_geometric_samples(ctx
->DrawBuffer
) > 1;
551 /* BRW_NEW_VUE_MAP_GEOM_OUT */
552 if (brw
->gen
< 6 || _mesa_bitcount_64(prog
->info
.inputs_read
&
553 BRW_FS_VARYING_INPUT_MASK
) > 16) {
554 key
->input_slots_valid
= brw
->vue_map_geom_out
.slots_valid
;
557 /* _NEW_COLOR | _NEW_BUFFERS */
558 /* Pre-gen6, the hardware alpha test always used each render
559 * target's alpha to do alpha test, as opposed to render target 0's alpha
560 * like GL requires. Fix that by building the alpha test into the
561 * shader, and we'll skip enabling the fixed function alpha test.
563 if (brw
->gen
< 6 && ctx
->DrawBuffer
->_NumColorDrawBuffers
> 1 &&
564 ctx
->Color
.AlphaEnabled
) {
565 key
->alpha_test_func
= ctx
->Color
.AlphaFunc
;
566 key
->alpha_test_ref
= ctx
->Color
.AlphaRef
;
569 /* The unique fragment program ID */
570 key
->program_string_id
= fp
->id
;
572 /* Whether reads from the framebuffer should behave coherently. */
573 key
->coherent_fb_fetch
= ctx
->Extensions
.MESA_shader_framebuffer_fetch
;
577 brw_upload_wm_prog(struct brw_context
*brw
)
579 struct brw_wm_prog_key key
;
580 struct brw_program
*fp
= (struct brw_program
*) brw
->fragment_program
;
582 if (!brw_wm_state_dirty(brw
))
585 brw_wm_populate_key(brw
, &key
);
587 if (!brw_search_cache(&brw
->cache
, BRW_CACHE_FS_PROG
,
589 &brw
->wm
.base
.prog_offset
,
590 &brw
->wm
.base
.prog_data
)) {
591 bool success
= brw_codegen_wm_prog(brw
, fp
, &key
,
592 &brw
->vue_map_geom_out
);
599 brw_fs_precompile(struct gl_context
*ctx
, struct gl_program
*prog
)
601 struct brw_context
*brw
= brw_context(ctx
);
602 struct brw_wm_prog_key key
;
604 struct brw_program
*bfp
= brw_program(prog
);
606 memset(&key
, 0, sizeof(key
));
608 uint64_t outputs_written
= prog
->info
.outputs_written
;
611 if (prog
->info
.fs
.uses_discard
)
612 key
.iz_lookup
|= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT
;
614 if (outputs_written
& BITFIELD64_BIT(FRAG_RESULT_DEPTH
))
615 key
.iz_lookup
|= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT
;
617 /* Just assume depth testing. */
618 key
.iz_lookup
|= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT
;
619 key
.iz_lookup
|= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT
;
622 if (brw
->gen
< 6 || _mesa_bitcount_64(prog
->info
.inputs_read
&
623 BRW_FS_VARYING_INPUT_MASK
) > 16) {
624 key
.input_slots_valid
= prog
->info
.inputs_read
| VARYING_BIT_POS
;
627 brw_setup_tex_for_precompile(brw
, &key
.tex
, prog
);
629 key
.nr_color_regions
= _mesa_bitcount_64(outputs_written
&
630 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH
) |
631 BITFIELD64_BIT(FRAG_RESULT_STENCIL
) |
632 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK
)));
634 key
.program_string_id
= bfp
->id
;
636 /* Whether reads from the framebuffer should behave coherently. */
637 key
.coherent_fb_fetch
= ctx
->Extensions
.MESA_shader_framebuffer_fetch
;
639 uint32_t old_prog_offset
= brw
->wm
.base
.prog_offset
;
640 struct brw_stage_prog_data
*old_prog_data
= brw
->wm
.base
.prog_data
;
642 struct brw_vue_map vue_map
;
644 brw_compute_vue_map(&brw
->screen
->devinfo
, &vue_map
,
645 prog
->info
.inputs_read
| VARYING_BIT_POS
,
649 bool success
= brw_codegen_wm_prog(brw
, bfp
, &key
, &vue_map
);
651 brw
->wm
.base
.prog_offset
= old_prog_offset
;
652 brw
->wm
.base
.prog_data
= old_prog_data
;