bd2b24a3bfab0d0c9a8f07a00d9328d8db58971d
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26 #include "brw_context.h"
27 #include "brw_wm.h"
28 #include "brw_state.h"
29 #include "brw_shader.h"
30 #include "main/enums.h"
31 #include "main/formats.h"
32 #include "main/fbobject.h"
33 #include "main/samplerobj.h"
34 #include "main/framebuffer.h"
35 #include "program/prog_parameter.h"
36 #include "program/program.h"
37 #include "intel_mipmap_tree.h"
38 #include "intel_image.h"
39 #include "brw_nir.h"
40 #include "brw_program.h"
41
42 #include "util/ralloc.h"
43
44 static void
45 assign_fs_binding_table_offsets(const struct gen_device_info *devinfo,
46 const struct gl_program *prog,
47 const struct brw_wm_prog_key *key,
48 struct brw_wm_prog_data *prog_data)
49 {
50 uint32_t next_binding_table_offset = 0;
51
52 /* If there are no color regions, we still perform an FB write to a null
53 * renderbuffer, which we place at surface index 0.
54 */
55 prog_data->binding_table.render_target_start = next_binding_table_offset;
56 next_binding_table_offset += MAX2(key->nr_color_regions, 1);
57
58 next_binding_table_offset =
59 brw_assign_common_binding_table_offsets(devinfo, prog, &prog_data->base,
60 next_binding_table_offset);
61
62 if (prog->nir->info->outputs_read && !key->coherent_fb_fetch) {
63 prog_data->binding_table.render_target_read_start =
64 next_binding_table_offset;
65 next_binding_table_offset += key->nr_color_regions;
66 }
67 }
68
69 static void
70 brw_wm_debug_recompile(struct brw_context *brw, struct gl_program *prog,
71 const struct brw_wm_prog_key *key)
72 {
73 struct brw_cache_item *c = NULL;
74 const struct brw_wm_prog_key *old_key = NULL;
75 bool found = false;
76
77 perf_debug("Recompiling fragment shader for program %d\n", prog->Id);
78
79 for (unsigned int i = 0; i < brw->cache.size; i++) {
80 for (c = brw->cache.items[i]; c; c = c->next) {
81 if (c->cache_id == BRW_CACHE_FS_PROG) {
82 old_key = c->key;
83
84 if (old_key->program_string_id == key->program_string_id)
85 break;
86 }
87 }
88 if (c)
89 break;
90 }
91
92 if (!c) {
93 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
94 return;
95 }
96
97 found |= key_debug(brw, "alphatest, computed depth, depth test, or "
98 "depth write",
99 old_key->iz_lookup, key->iz_lookup);
100 found |= key_debug(brw, "depth statistics",
101 old_key->stats_wm, key->stats_wm);
102 found |= key_debug(brw, "flat shading",
103 old_key->flat_shade, key->flat_shade);
104 found |= key_debug(brw, "per-sample interpolation",
105 old_key->persample_interp, key->persample_interp);
106 found |= key_debug(brw, "number of color buffers",
107 old_key->nr_color_regions, key->nr_color_regions);
108 found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
109 old_key->replicate_alpha, key->replicate_alpha);
110 found |= key_debug(brw, "fragment color clamping",
111 old_key->clamp_fragment_color, key->clamp_fragment_color);
112 found |= key_debug(brw, "multisampled FBO",
113 old_key->multisample_fbo, key->multisample_fbo);
114 found |= key_debug(brw, "line smoothing",
115 old_key->line_aa, key->line_aa);
116 found |= key_debug(brw, "input slots valid",
117 old_key->input_slots_valid, key->input_slots_valid);
118 found |= key_debug(brw, "mrt alpha test function",
119 old_key->alpha_test_func, key->alpha_test_func);
120 found |= key_debug(brw, "mrt alpha test reference value",
121 old_key->alpha_test_ref, key->alpha_test_ref);
122
123 found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
124
125 if (!found) {
126 perf_debug(" Something else\n");
127 }
128 }
129
130 /**
131 * All Mesa program -> GPU code generation goes through this function.
132 * Depending on the instructions used (i.e. flow control instructions)
133 * we'll use one of two code generators.
134 */
135 static bool
136 brw_codegen_wm_prog(struct brw_context *brw,
137 struct brw_program *fp,
138 struct brw_wm_prog_key *key,
139 struct brw_vue_map *vue_map)
140 {
141 const struct gen_device_info *devinfo = &brw->screen->devinfo;
142 struct gl_context *ctx = &brw->ctx;
143 void *mem_ctx = ralloc_context(NULL);
144 struct brw_wm_prog_data prog_data;
145 const GLuint *program;
146 GLuint program_size;
147 bool start_busy = false;
148 double start_time = 0;
149
150 memset(&prog_data, 0, sizeof(prog_data));
151
152 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
153 if (fp->program.is_arb_asm)
154 prog_data.base.use_alt_mode = true;
155
156 assign_fs_binding_table_offsets(devinfo, &fp->program, key, &prog_data);
157
158 /* Allocate the references to the uniforms that will end up in the
159 * prog_data associated with the compiled program, and which will be freed
160 * by the state cache.
161 */
162 int param_count = fp->program.nir->num_uniforms / 4;
163 prog_data.base.nr_image_params = fp->program.info.num_images;
164 /* The backend also sometimes adds params for texture size. */
165 param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
166 prog_data.base.param =
167 rzalloc_array(NULL, const gl_constant_value *, param_count);
168 prog_data.base.pull_param =
169 rzalloc_array(NULL, const gl_constant_value *, param_count);
170 prog_data.base.image_param =
171 rzalloc_array(NULL, struct brw_image_param,
172 prog_data.base.nr_image_params);
173 prog_data.base.nr_params = param_count;
174
175 if (!fp->program.is_arb_asm) {
176 brw_nir_setup_glsl_uniforms(fp->program.nir, &fp->program,
177 &prog_data.base, true);
178 } else {
179 brw_nir_setup_arb_uniforms(fp->program.nir, &fp->program,
180 &prog_data.base);
181
182 if (unlikely(INTEL_DEBUG & DEBUG_WM))
183 brw_dump_arb_asm("fragment", &fp->program);
184 }
185
186 if (unlikely(brw->perf_debug)) {
187 start_busy = (brw->batch.last_bo &&
188 drm_intel_bo_busy(brw->batch.last_bo));
189 start_time = get_time();
190 }
191
192 int st_index8 = -1, st_index16 = -1;
193 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
194 st_index8 = brw_get_shader_time_index(brw, &fp->program, ST_FS8,
195 !fp->program.is_arb_asm);
196 st_index16 = brw_get_shader_time_index(brw, &fp->program, ST_FS16,
197 !fp->program.is_arb_asm);
198 }
199
200 char *error_str = NULL;
201 program = brw_compile_fs(brw->screen->compiler, brw, mem_ctx,
202 key, &prog_data, fp->program.nir,
203 &fp->program, st_index8, st_index16,
204 true, brw->use_rep_send, vue_map,
205 &program_size, &error_str);
206
207 if (program == NULL) {
208 if (!fp->program.is_arb_asm) {
209 fp->program.sh.data->LinkStatus = false;
210 ralloc_strcat(&fp->program.sh.data->InfoLog, error_str);
211 }
212
213 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n", error_str);
214
215 ralloc_free(mem_ctx);
216 return false;
217 }
218
219 if (unlikely(brw->perf_debug)) {
220 if (fp->compiled_once)
221 brw_wm_debug_recompile(brw, &fp->program, key);
222 fp->compiled_once = true;
223
224 if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
225 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
226 (get_time() - start_time) * 1000);
227 }
228 }
229
230 brw_alloc_stage_scratch(brw, &brw->wm.base,
231 prog_data.base.total_scratch,
232 devinfo->max_wm_threads);
233
234 if (unlikely((INTEL_DEBUG & DEBUG_WM) && fp->program.is_arb_asm))
235 fprintf(stderr, "\n");
236
237 brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG,
238 key, sizeof(struct brw_wm_prog_key),
239 program, program_size,
240 &prog_data, sizeof(prog_data),
241 &brw->wm.base.prog_offset, &brw->wm.base.prog_data);
242
243 ralloc_free(mem_ctx);
244
245 return true;
246 }
247
248 bool
249 brw_debug_recompile_sampler_key(struct brw_context *brw,
250 const struct brw_sampler_prog_key_data *old_key,
251 const struct brw_sampler_prog_key_data *key)
252 {
253 bool found = false;
254
255 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
256 found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
257 old_key->swizzles[i], key->swizzles[i]);
258 }
259 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
260 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
261 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
262 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
263 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
264 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
265 found |= key_debug(brw, "gather channel quirk on any texture unit",
266 old_key->gather_channel_quirk_mask, key->gather_channel_quirk_mask);
267 found |= key_debug(brw, "compressed multisample layout",
268 old_key->compressed_multisample_layout_mask,
269 key->compressed_multisample_layout_mask);
270 found |= key_debug(brw, "16x msaa",
271 old_key->msaa_16,
272 key->msaa_16);
273
274 found |= key_debug(brw, "y_uv image bound",
275 old_key->y_uv_image_mask,
276 key->y_uv_image_mask);
277 found |= key_debug(brw, "y_u_v image bound",
278 old_key->y_u_v_image_mask,
279 key->y_u_v_image_mask);
280 found |= key_debug(brw, "yx_xuxv image bound",
281 old_key->yx_xuxv_image_mask,
282 key->yx_xuxv_image_mask);
283
284 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
285 found |= key_debug(brw, "textureGather workarounds",
286 old_key->gen6_gather_wa[i], key->gen6_gather_wa[i]);
287 }
288
289 return found;
290 }
291
292 static uint8_t
293 gen6_gather_workaround(GLenum internalformat)
294 {
295 switch (internalformat) {
296 case GL_R8I: return WA_SIGN | WA_8BIT;
297 case GL_R8UI: return WA_8BIT;
298 case GL_R16I: return WA_SIGN | WA_16BIT;
299 case GL_R16UI: return WA_16BIT;
300 default:
301 /* Note that even though GL_R32I and GL_R32UI have format overrides in
302 * the surface state, there is no shader w/a required.
303 */
304 return 0;
305 }
306 }
307
308 void
309 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
310 const struct gl_program *prog,
311 struct brw_sampler_prog_key_data *key)
312 {
313 struct brw_context *brw = brw_context(ctx);
314 GLbitfield mask = prog->SamplersUsed;
315
316 while (mask) {
317 const int s = u_bit_scan(&mask);
318
319 key->swizzles[s] = SWIZZLE_NOOP;
320
321 int unit_id = prog->SamplerUnits[s];
322 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
323
324 if (unit->_Current && unit->_Current->Target != GL_TEXTURE_BUFFER) {
325 const struct gl_texture_object *t = unit->_Current;
326 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
327 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
328
329 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
330 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
331 img->_BaseFormat == GL_DEPTH_STENCIL);
332
333 /* Haswell handles texture swizzling as surface format overrides
334 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
335 */
336 if (alpha_depth || (brw->gen < 8 && !brw->is_haswell))
337 key->swizzles[s] = brw_get_texture_swizzle(ctx, t);
338
339 if (brw->gen < 8 &&
340 sampler->MinFilter != GL_NEAREST &&
341 sampler->MagFilter != GL_NEAREST) {
342 if (sampler->WrapS == GL_CLAMP)
343 key->gl_clamp_mask[0] |= 1 << s;
344 if (sampler->WrapT == GL_CLAMP)
345 key->gl_clamp_mask[1] |= 1 << s;
346 if (sampler->WrapR == GL_CLAMP)
347 key->gl_clamp_mask[2] |= 1 << s;
348 }
349
350 /* gather4 for RG32* is broken in multiple ways on Gen7. */
351 if (brw->gen == 7 && prog->nir->info->uses_texture_gather) {
352 switch (img->InternalFormat) {
353 case GL_RG32I:
354 case GL_RG32UI: {
355 /* We have to override the format to R32G32_FLOAT_LD.
356 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
357 * (1.0) rather than integer 1. This needs shader hacks.
358 *
359 * On Ivybridge, we whack W (alpha) to ONE in our key's
360 * swizzle. On Haswell, we look at the original texture
361 * swizzle, and use XYZW with channels overridden to ONE,
362 * leaving normal texture swizzling to SCS.
363 */
364 unsigned src_swizzle =
365 brw->is_haswell ? t->_Swizzle : key->swizzles[s];
366 for (int i = 0; i < 4; i++) {
367 unsigned src_comp = GET_SWZ(src_swizzle, i);
368 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
369 key->swizzles[i] &= ~(0x7 << (3 * i));
370 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
371 }
372 }
373 /* fallthrough */
374 }
375 case GL_RG32F:
376 /* The channel select for green doesn't work - we have to
377 * request blue. Haswell can use SCS for this, but Ivybridge
378 * needs a shader workaround.
379 */
380 if (!brw->is_haswell)
381 key->gather_channel_quirk_mask |= 1 << s;
382 break;
383 }
384 }
385
386 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
387 * UNORM/FLOAT instead and fix it in the shader.
388 */
389 if (brw->gen == 6 && prog->nir->info->uses_texture_gather) {
390 key->gen6_gather_wa[s] = gen6_gather_workaround(img->InternalFormat);
391 }
392
393 /* If this is a multisample sampler, and uses the CMS MSAA layout,
394 * then we need to emit slightly different code to first sample the
395 * MCS surface.
396 */
397 struct intel_texture_object *intel_tex =
398 intel_texture_object((struct gl_texture_object *)t);
399
400 /* From gen9 onwards some single sampled buffers can also be
401 * compressed. These don't need ld2dms sampling along with mcs fetch.
402 */
403 if (brw->gen >= 7 &&
404 intel_tex->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS &&
405 intel_tex->mt->num_samples > 1) {
406 key->compressed_multisample_layout_mask |= 1 << s;
407
408 if (intel_tex->mt->num_samples >= 16) {
409 assert(brw->gen >= 9);
410 key->msaa_16 |= 1 << s;
411 }
412 }
413
414 if (t->Target == GL_TEXTURE_EXTERNAL_OES && intel_tex->planar_format) {
415 switch (intel_tex->planar_format->components) {
416 case __DRI_IMAGE_COMPONENTS_Y_UV:
417 key->y_uv_image_mask |= 1 << s;
418 break;
419 case __DRI_IMAGE_COMPONENTS_Y_U_V:
420 key->y_u_v_image_mask |= 1 << s;
421 break;
422 case __DRI_IMAGE_COMPONENTS_Y_XUXV:
423 key->yx_xuxv_image_mask |= 1 << s;
424 break;
425 default:
426 break;
427 }
428 }
429
430 }
431 }
432 }
433
434 static bool
435 brw_wm_state_dirty(const struct brw_context *brw)
436 {
437 return brw_state_dirty(brw,
438 _NEW_BUFFERS |
439 _NEW_COLOR |
440 _NEW_DEPTH |
441 _NEW_FRAG_CLAMP |
442 _NEW_HINT |
443 _NEW_LIGHT |
444 _NEW_LINE |
445 _NEW_MULTISAMPLE |
446 _NEW_POLYGON |
447 _NEW_STENCIL |
448 _NEW_TEXTURE,
449 BRW_NEW_FRAGMENT_PROGRAM |
450 BRW_NEW_REDUCED_PRIMITIVE |
451 BRW_NEW_STATS_WM |
452 BRW_NEW_VUE_MAP_GEOM_OUT);
453 }
454
455 void
456 brw_wm_populate_key(struct brw_context *brw, struct brw_wm_prog_key *key)
457 {
458 struct gl_context *ctx = &brw->ctx;
459 /* BRW_NEW_FRAGMENT_PROGRAM */
460 const struct brw_program *fp = brw_program_const(brw->fragment_program);
461 const struct gl_program *prog = (struct gl_program *) brw->fragment_program;
462 GLuint lookup = 0;
463 GLuint line_aa;
464
465 memset(key, 0, sizeof(*key));
466
467 /* Build the index for table lookup
468 */
469 if (brw->gen < 6) {
470 /* _NEW_COLOR */
471 if (prog->info.fs.uses_discard || ctx->Color.AlphaEnabled) {
472 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
473 }
474
475 if (prog->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
476 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
477 }
478
479 /* _NEW_DEPTH */
480 if (ctx->Depth.Test)
481 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
482
483 if (brw_depth_writes_enabled(brw))
484 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
485
486 /* _NEW_STENCIL | _NEW_BUFFERS */
487 if (ctx->Stencil._Enabled) {
488 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
489
490 if (ctx->Stencil.WriteMask[0] ||
491 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
492 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
493 }
494 key->iz_lookup = lookup;
495 }
496
497 line_aa = AA_NEVER;
498
499 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
500 if (ctx->Line.SmoothFlag) {
501 if (brw->reduced_primitive == GL_LINES) {
502 line_aa = AA_ALWAYS;
503 }
504 else if (brw->reduced_primitive == GL_TRIANGLES) {
505 if (ctx->Polygon.FrontMode == GL_LINE) {
506 line_aa = AA_SOMETIMES;
507
508 if (ctx->Polygon.BackMode == GL_LINE ||
509 (ctx->Polygon.CullFlag &&
510 ctx->Polygon.CullFaceMode == GL_BACK))
511 line_aa = AA_ALWAYS;
512 }
513 else if (ctx->Polygon.BackMode == GL_LINE) {
514 line_aa = AA_SOMETIMES;
515
516 if ((ctx->Polygon.CullFlag &&
517 ctx->Polygon.CullFaceMode == GL_FRONT))
518 line_aa = AA_ALWAYS;
519 }
520 }
521 }
522
523 key->line_aa = line_aa;
524
525 /* _NEW_HINT */
526 key->high_quality_derivatives =
527 ctx->Hint.FragmentShaderDerivative == GL_NICEST;
528
529 if (brw->gen < 6)
530 key->stats_wm = brw->stats_wm;
531
532 /* _NEW_LIGHT */
533 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
534
535 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
536 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
537
538 /* _NEW_TEXTURE */
539 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
540
541 /* _NEW_BUFFERS */
542 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
543
544 /* _NEW_COLOR */
545 key->force_dual_color_blend = brw->dual_color_blend_by_location &&
546 (ctx->Color.BlendEnabled & 1) && ctx->Color.Blend[0]._UsesDualSrc;
547
548 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
549 key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
550 (_mesa_is_alpha_test_enabled(ctx) ||
551 _mesa_is_alpha_to_coverage_enabled(ctx));
552
553 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
554 /* Ignore sample qualifier while computing this flag. */
555 if (ctx->Multisample.Enabled) {
556 key->persample_interp =
557 ctx->Multisample.SampleShading &&
558 (ctx->Multisample.MinSampleShadingValue *
559 _mesa_geometric_samples(ctx->DrawBuffer) > 1);
560
561 key->multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;
562 }
563
564 /* BRW_NEW_VUE_MAP_GEOM_OUT */
565 if (brw->gen < 6 || _mesa_bitcount_64(prog->info.inputs_read &
566 BRW_FS_VARYING_INPUT_MASK) > 16) {
567 key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
568 }
569
570 /* _NEW_COLOR | _NEW_BUFFERS */
571 /* Pre-gen6, the hardware alpha test always used each render
572 * target's alpha to do alpha test, as opposed to render target 0's alpha
573 * like GL requires. Fix that by building the alpha test into the
574 * shader, and we'll skip enabling the fixed function alpha test.
575 */
576 if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
577 ctx->Color.AlphaEnabled) {
578 key->alpha_test_func = ctx->Color.AlphaFunc;
579 key->alpha_test_ref = ctx->Color.AlphaRef;
580 }
581
582 /* The unique fragment program ID */
583 key->program_string_id = fp->id;
584
585 /* Whether reads from the framebuffer should behave coherently. */
586 key->coherent_fb_fetch = ctx->Extensions.MESA_shader_framebuffer_fetch;
587 }
588
589 void
590 brw_upload_wm_prog(struct brw_context *brw)
591 {
592 struct brw_wm_prog_key key;
593 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
594
595 if (!brw_wm_state_dirty(brw))
596 return;
597
598 brw_wm_populate_key(brw, &key);
599
600 if (!brw_search_cache(&brw->cache, BRW_CACHE_FS_PROG,
601 &key, sizeof(key),
602 &brw->wm.base.prog_offset,
603 &brw->wm.base.prog_data)) {
604 bool success = brw_codegen_wm_prog(brw, fp, &key,
605 &brw->vue_map_geom_out);
606 (void) success;
607 assert(success);
608 }
609 }
610
611 bool
612 brw_fs_precompile(struct gl_context *ctx, struct gl_program *prog)
613 {
614 struct brw_context *brw = brw_context(ctx);
615 struct brw_wm_prog_key key;
616
617 struct brw_program *bfp = brw_program(prog);
618
619 memset(&key, 0, sizeof(key));
620
621 uint64_t outputs_written = prog->info.outputs_written;
622
623 if (brw->gen < 6) {
624 if (prog->info.fs.uses_discard)
625 key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
626
627 if (outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
628 key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
629
630 /* Just assume depth testing. */
631 key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
632 key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
633 }
634
635 if (brw->gen < 6 || _mesa_bitcount_64(prog->info.inputs_read &
636 BRW_FS_VARYING_INPUT_MASK) > 16) {
637 key.input_slots_valid = prog->info.inputs_read | VARYING_BIT_POS;
638 }
639
640 brw_setup_tex_for_precompile(brw, &key.tex, prog);
641
642 key.nr_color_regions = _mesa_bitcount_64(outputs_written &
643 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
644 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
645 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
646
647 key.program_string_id = bfp->id;
648
649 /* Whether reads from the framebuffer should behave coherently. */
650 key.coherent_fb_fetch = ctx->Extensions.MESA_shader_framebuffer_fetch;
651
652 uint32_t old_prog_offset = brw->wm.base.prog_offset;
653 struct brw_stage_prog_data *old_prog_data = brw->wm.base.prog_data;
654
655 struct brw_vue_map vue_map;
656 if (brw->gen < 6) {
657 brw_compute_vue_map(&brw->screen->devinfo, &vue_map,
658 prog->info.inputs_read | VARYING_BIT_POS,
659 false);
660 }
661
662 bool success = brw_codegen_wm_prog(brw, bfp, &key, &vue_map);
663
664 brw->wm.base.prog_offset = old_prog_offset;
665 brw->wm.base.prog_data = old_prog_data;
666
667 return success;
668 }