384c3f5c9565c92a4ad27520436e4831c133fb48
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26 #include "brw_context.h"
27 #include "brw_wm.h"
28 #include "brw_state.h"
29 #include "main/enums.h"
30 #include "main/formats.h"
31 #include "main/fbobject.h"
32 #include "main/samplerobj.h"
33 #include "main/framebuffer.h"
34 #include "program/prog_parameter.h"
35 #include "program/program.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_image.h"
38 #include "intel_fbo.h"
39 #include "compiler/brw_nir.h"
40 #include "brw_program.h"
41
42 #include "util/ralloc.h"
43 #include "util/u_math.h"
44
45 static void
46 assign_fs_binding_table_offsets(const struct gen_device_info *devinfo,
47 const struct gl_program *prog,
48 const struct brw_wm_prog_key *key,
49 struct brw_wm_prog_data *prog_data)
50 {
51 /* Render targets implicitly start at surface index 0. Even if there are
52 * no color regions, we still perform an FB write to a null render target,
53 * which will be surface 0.
54 */
55 uint32_t next_binding_table_offset = MAX2(key->nr_color_regions, 1);
56
57 next_binding_table_offset =
58 brw_assign_common_binding_table_offsets(devinfo, prog, &prog_data->base,
59 next_binding_table_offset);
60
61 if (prog->nir->info.outputs_read && !key->coherent_fb_fetch) {
62 prog_data->binding_table.render_target_read_start =
63 next_binding_table_offset;
64 next_binding_table_offset += key->nr_color_regions;
65 }
66
67 /* Update the binding table size */
68 prog_data->base.binding_table.size_bytes = next_binding_table_offset * 4;
69 }
70
71 static bool
72 brw_codegen_wm_prog(struct brw_context *brw,
73 struct brw_program *fp,
74 struct brw_wm_prog_key *key,
75 struct brw_vue_map *vue_map)
76 {
77 const struct gen_device_info *devinfo = &brw->screen->devinfo;
78 void *mem_ctx = ralloc_context(NULL);
79 struct brw_wm_prog_data prog_data;
80 const GLuint *program;
81 bool start_busy = false;
82 double start_time = 0;
83
84 nir_shader *nir = nir_shader_clone(mem_ctx, fp->program.nir);
85
86 memset(&prog_data, 0, sizeof(prog_data));
87
88 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
89 if (fp->program.is_arb_asm)
90 prog_data.base.use_alt_mode = true;
91
92 assign_fs_binding_table_offsets(devinfo, &fp->program, key, &prog_data);
93
94 if (!fp->program.is_arb_asm) {
95 brw_nir_setup_glsl_uniforms(mem_ctx, nir, &fp->program,
96 &prog_data.base, true);
97 brw_nir_analyze_ubo_ranges(brw->screen->compiler, nir,
98 NULL, prog_data.base.ubo_ranges);
99 } else {
100 brw_nir_setup_arb_uniforms(mem_ctx, nir, &fp->program, &prog_data.base);
101
102 if (unlikely(INTEL_DEBUG & DEBUG_WM))
103 brw_dump_arb_asm("fragment", &fp->program);
104 }
105
106 if (unlikely(brw->perf_debug)) {
107 start_busy = (brw->batch.last_bo &&
108 brw_bo_busy(brw->batch.last_bo));
109 start_time = get_time();
110 }
111
112 int st_index8 = -1, st_index16 = -1, st_index32 = -1;
113 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
114 st_index8 = brw_get_shader_time_index(brw, &fp->program, ST_FS8,
115 !fp->program.is_arb_asm);
116 st_index16 = brw_get_shader_time_index(brw, &fp->program, ST_FS16,
117 !fp->program.is_arb_asm);
118 st_index32 = brw_get_shader_time_index(brw, &fp->program, ST_FS32,
119 !fp->program.is_arb_asm);
120 }
121
122 char *error_str = NULL;
123 program = brw_compile_fs(brw->screen->compiler, brw, mem_ctx,
124 key, &prog_data, nir,
125 &fp->program, st_index8, st_index16, st_index32,
126 true, false, vue_map,
127 &error_str);
128
129 if (program == NULL) {
130 if (!fp->program.is_arb_asm) {
131 fp->program.sh.data->LinkStatus = LINKING_FAILURE;
132 ralloc_strcat(&fp->program.sh.data->InfoLog, error_str);
133 }
134
135 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n", error_str);
136
137 ralloc_free(mem_ctx);
138 return false;
139 }
140
141 if (unlikely(brw->perf_debug)) {
142 if (fp->compiled_once) {
143 brw_debug_recompile(brw, MESA_SHADER_FRAGMENT, fp->program.Id,
144 &key->base);
145 }
146 fp->compiled_once = true;
147
148 if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
149 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
150 (get_time() - start_time) * 1000);
151 }
152 }
153
154 brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch);
155
156 if (unlikely((INTEL_DEBUG & DEBUG_WM) && fp->program.is_arb_asm))
157 fprintf(stderr, "\n");
158
159 /* The param and pull_param arrays will be freed by the shader cache. */
160 ralloc_steal(NULL, prog_data.base.param);
161 ralloc_steal(NULL, prog_data.base.pull_param);
162 brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG,
163 key, sizeof(struct brw_wm_prog_key),
164 program, prog_data.base.program_size,
165 &prog_data, sizeof(prog_data),
166 &brw->wm.base.prog_offset, &brw->wm.base.prog_data);
167
168 ralloc_free(mem_ctx);
169
170 return true;
171 }
172
173 static uint8_t
174 gen6_gather_workaround(GLenum internalformat)
175 {
176 switch (internalformat) {
177 case GL_R8I: return WA_SIGN | WA_8BIT;
178 case GL_R8UI: return WA_8BIT;
179 case GL_R16I: return WA_SIGN | WA_16BIT;
180 case GL_R16UI: return WA_16BIT;
181 default:
182 /* Note that even though GL_R32I and GL_R32UI have format overrides in
183 * the surface state, there is no shader w/a required.
184 */
185 return 0;
186 }
187 }
188
189 static void
190 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
191 const struct gl_program *prog,
192 struct brw_sampler_prog_key_data *key)
193 {
194 struct brw_context *brw = brw_context(ctx);
195 const struct gen_device_info *devinfo = &brw->screen->devinfo;
196 GLbitfield mask = prog->SamplersUsed;
197
198 while (mask) {
199 const int s = u_bit_scan(&mask);
200
201 key->swizzles[s] = SWIZZLE_NOOP;
202 key->scale_factors[s] = 0.0f;
203
204 int unit_id = prog->SamplerUnits[s];
205 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
206
207 if (unit->_Current && unit->_Current->Target != GL_TEXTURE_BUFFER) {
208 const struct gl_texture_object *t = unit->_Current;
209 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
210 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
211
212 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
213 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
214 img->_BaseFormat == GL_DEPTH_STENCIL);
215
216 /* Haswell handles texture swizzling as surface format overrides
217 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
218 */
219 if (alpha_depth || (devinfo->gen < 8 && !devinfo->is_haswell))
220 key->swizzles[s] = brw_get_texture_swizzle(ctx, t);
221
222 if (devinfo->gen < 8 &&
223 sampler->MinFilter != GL_NEAREST &&
224 sampler->MagFilter != GL_NEAREST) {
225 if (sampler->WrapS == GL_CLAMP)
226 key->gl_clamp_mask[0] |= 1 << s;
227 if (sampler->WrapT == GL_CLAMP)
228 key->gl_clamp_mask[1] |= 1 << s;
229 if (sampler->WrapR == GL_CLAMP)
230 key->gl_clamp_mask[2] |= 1 << s;
231 }
232
233 /* gather4 for RG32* is broken in multiple ways on Gen7. */
234 if (devinfo->gen == 7 && prog->info.uses_texture_gather) {
235 switch (img->InternalFormat) {
236 case GL_RG32I:
237 case GL_RG32UI: {
238 /* We have to override the format to R32G32_FLOAT_LD.
239 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
240 * (1.0) rather than integer 1. This needs shader hacks.
241 *
242 * On Ivybridge, we whack W (alpha) to ONE in our key's
243 * swizzle. On Haswell, we look at the original texture
244 * swizzle, and use XYZW with channels overridden to ONE,
245 * leaving normal texture swizzling to SCS.
246 */
247 unsigned src_swizzle =
248 devinfo->is_haswell ? t->_Swizzle : key->swizzles[s];
249 for (int i = 0; i < 4; i++) {
250 unsigned src_comp = GET_SWZ(src_swizzle, i);
251 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
252 key->swizzles[i] &= ~(0x7 << (3 * i));
253 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
254 }
255 }
256 /* fallthrough */
257 }
258 case GL_RG32F:
259 /* The channel select for green doesn't work - we have to
260 * request blue. Haswell can use SCS for this, but Ivybridge
261 * needs a shader workaround.
262 */
263 if (!devinfo->is_haswell)
264 key->gather_channel_quirk_mask |= 1 << s;
265 break;
266 }
267 }
268
269 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
270 * UNORM/FLOAT instead and fix it in the shader.
271 */
272 if (devinfo->gen == 6 && prog->info.uses_texture_gather) {
273 key->gen6_gather_wa[s] = gen6_gather_workaround(img->InternalFormat);
274 }
275
276 /* If this is a multisample sampler, and uses the CMS MSAA layout,
277 * then we need to emit slightly different code to first sample the
278 * MCS surface.
279 */
280 struct intel_texture_object *intel_tex =
281 intel_texture_object((struct gl_texture_object *)t);
282
283 /* From gen9 onwards some single sampled buffers can also be
284 * compressed. These don't need ld2dms sampling along with mcs fetch.
285 */
286 if (intel_tex->mt->aux_usage == ISL_AUX_USAGE_MCS) {
287 assert(devinfo->gen >= 7);
288 assert(intel_tex->mt->surf.samples > 1);
289 assert(intel_tex->mt->aux_buf);
290 assert(intel_tex->mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
291 key->compressed_multisample_layout_mask |= 1 << s;
292
293 if (intel_tex->mt->surf.samples >= 16) {
294 assert(devinfo->gen >= 9);
295 key->msaa_16 |= 1 << s;
296 }
297 }
298
299 if (t->Target == GL_TEXTURE_EXTERNAL_OES && intel_tex->planar_format) {
300
301 /* Setup possible scaling factor. */
302 key->scale_factors[s] = intel_tex->planar_format->scaling_factor;
303
304 switch (intel_tex->planar_format->components) {
305 case __DRI_IMAGE_COMPONENTS_Y_UV:
306 key->y_uv_image_mask |= 1 << s;
307 break;
308 case __DRI_IMAGE_COMPONENTS_Y_U_V:
309 key->y_u_v_image_mask |= 1 << s;
310 break;
311 case __DRI_IMAGE_COMPONENTS_Y_XUXV:
312 key->yx_xuxv_image_mask |= 1 << s;
313 break;
314 case __DRI_IMAGE_COMPONENTS_Y_UXVX:
315 key->xy_uxvx_image_mask |= 1 << s;
316 break;
317 case __DRI_IMAGE_COMPONENTS_AYUV:
318 key->ayuv_image_mask |= 1 << s;
319 break;
320 case __DRI_IMAGE_COMPONENTS_XYUV:
321 key->xyuv_image_mask |= 1 << s;
322 break;
323 default:
324 break;
325 }
326 }
327
328 }
329 }
330 }
331
332 void
333 brw_populate_base_prog_key(struct gl_context *ctx,
334 const struct brw_program *prog,
335 struct brw_base_prog_key *key)
336 {
337 key->program_string_id = prog->id;
338 key->subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
339 brw_populate_sampler_prog_key_data(ctx, &prog->program, &key->tex);
340 }
341
342 void
343 brw_populate_default_base_prog_key(const struct gen_device_info *devinfo,
344 const struct brw_program *prog,
345 struct brw_base_prog_key *key)
346 {
347 key->program_string_id = prog->id;
348 key->subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM;
349 brw_setup_tex_for_precompile(devinfo, &key->tex, &prog->program);
350 }
351
352 static bool
353 brw_wm_state_dirty(const struct brw_context *brw)
354 {
355 return brw_state_dirty(brw,
356 _NEW_BUFFERS |
357 _NEW_COLOR |
358 _NEW_DEPTH |
359 _NEW_FRAG_CLAMP |
360 _NEW_HINT |
361 _NEW_LIGHT |
362 _NEW_LINE |
363 _NEW_MULTISAMPLE |
364 _NEW_POLYGON |
365 _NEW_STENCIL |
366 _NEW_TEXTURE,
367 BRW_NEW_FRAGMENT_PROGRAM |
368 BRW_NEW_REDUCED_PRIMITIVE |
369 BRW_NEW_STATS_WM |
370 BRW_NEW_VUE_MAP_GEOM_OUT);
371 }
372
373 void
374 brw_wm_populate_key(struct brw_context *brw, struct brw_wm_prog_key *key)
375 {
376 const struct gen_device_info *devinfo = &brw->screen->devinfo;
377 struct gl_context *ctx = &brw->ctx;
378 /* BRW_NEW_FRAGMENT_PROGRAM */
379 const struct gl_program *prog = brw->programs[MESA_SHADER_FRAGMENT];
380 const struct brw_program *fp = brw_program_const(prog);
381 GLuint lookup = 0;
382 GLuint line_aa;
383
384 memset(key, 0, sizeof(*key));
385
386 /* Build the index for table lookup
387 */
388 if (devinfo->gen < 6) {
389 struct intel_renderbuffer *depth_irb =
390 intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
391
392 /* _NEW_COLOR */
393 if (prog->info.fs.uses_discard || ctx->Color.AlphaEnabled) {
394 lookup |= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT;
395 }
396
397 if (prog->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
398 lookup |= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT;
399 }
400
401 /* _NEW_DEPTH */
402 if (depth_irb && ctx->Depth.Test) {
403 lookup |= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT;
404
405 if (brw_depth_writes_enabled(brw))
406 lookup |= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT;
407 }
408
409 /* _NEW_STENCIL | _NEW_BUFFERS */
410 if (brw->stencil_enabled) {
411 lookup |= BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT;
412
413 if (ctx->Stencil.WriteMask[0] ||
414 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
415 lookup |= BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT;
416 }
417 key->iz_lookup = lookup;
418 }
419
420 line_aa = BRW_WM_AA_NEVER;
421
422 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
423 if (ctx->Line.SmoothFlag) {
424 if (brw->reduced_primitive == GL_LINES) {
425 line_aa = BRW_WM_AA_ALWAYS;
426 }
427 else if (brw->reduced_primitive == GL_TRIANGLES) {
428 if (ctx->Polygon.FrontMode == GL_LINE) {
429 line_aa = BRW_WM_AA_SOMETIMES;
430
431 if (ctx->Polygon.BackMode == GL_LINE ||
432 (ctx->Polygon.CullFlag &&
433 ctx->Polygon.CullFaceMode == GL_BACK))
434 line_aa = BRW_WM_AA_ALWAYS;
435 }
436 else if (ctx->Polygon.BackMode == GL_LINE) {
437 line_aa = BRW_WM_AA_SOMETIMES;
438
439 if ((ctx->Polygon.CullFlag &&
440 ctx->Polygon.CullFaceMode == GL_FRONT))
441 line_aa = BRW_WM_AA_ALWAYS;
442 }
443 }
444 }
445
446 key->line_aa = line_aa;
447
448 /* _NEW_HINT */
449 key->high_quality_derivatives =
450 prog->info.uses_fddx_fddy &&
451 ctx->Hint.FragmentShaderDerivative == GL_NICEST;
452
453 if (devinfo->gen < 6)
454 key->stats_wm = brw->stats_wm;
455
456 /* _NEW_LIGHT */
457 key->flat_shade =
458 (prog->info.inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1)) &&
459 (ctx->Light.ShadeModel == GL_FLAT);
460
461 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
462 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
463
464 /* _NEW_TEXTURE */
465 brw_populate_base_prog_key(ctx, fp, &key->base);
466
467 /* _NEW_BUFFERS */
468 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
469
470 /* _NEW_COLOR */
471 key->force_dual_color_blend = brw->dual_color_blend_by_location &&
472 (ctx->Color.BlendEnabled & 1) && ctx->Color.Blend[0]._UsesDualSrc;
473
474 /* _NEW_MULTISAMPLE, _NEW_BUFFERS */
475 key->alpha_to_coverage = _mesa_is_alpha_to_coverage_enabled(ctx);
476
477 /* _NEW_COLOR, _NEW_BUFFERS */
478 key->alpha_test_replicate_alpha =
479 ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
480 _mesa_is_alpha_test_enabled(ctx);
481
482 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
483 /* Ignore sample qualifier while computing this flag. */
484 if (ctx->Multisample.Enabled) {
485 key->persample_interp =
486 ctx->Multisample.SampleShading &&
487 (ctx->Multisample.MinSampleShadingValue *
488 _mesa_geometric_samples(ctx->DrawBuffer) > 1);
489
490 key->multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;
491 }
492
493 /* BRW_NEW_VUE_MAP_GEOM_OUT */
494 if (devinfo->gen < 6 || util_bitcount64(prog->info.inputs_read &
495 BRW_FS_VARYING_INPUT_MASK) > 16) {
496 key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
497 }
498
499 /* _NEW_COLOR | _NEW_BUFFERS */
500 /* Pre-gen6, the hardware alpha test always used each render
501 * target's alpha to do alpha test, as opposed to render target 0's alpha
502 * like GL requires. Fix that by building the alpha test into the
503 * shader, and we'll skip enabling the fixed function alpha test.
504 */
505 if (devinfo->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
506 ctx->Color.AlphaEnabled) {
507 key->alpha_test_func = ctx->Color.AlphaFunc;
508 key->alpha_test_ref = ctx->Color.AlphaRef;
509 }
510
511 /* Whether reads from the framebuffer should behave coherently. */
512 key->coherent_fb_fetch = ctx->Extensions.EXT_shader_framebuffer_fetch;
513 }
514
515 void
516 brw_upload_wm_prog(struct brw_context *brw)
517 {
518 struct brw_wm_prog_key key;
519 struct brw_program *fp =
520 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
521
522 if (!brw_wm_state_dirty(brw))
523 return;
524
525 brw_wm_populate_key(brw, &key);
526
527 if (brw_search_cache(&brw->cache, BRW_CACHE_FS_PROG, &key, sizeof(key),
528 &brw->wm.base.prog_offset, &brw->wm.base.prog_data,
529 true))
530 return;
531
532 if (brw_disk_cache_upload_program(brw, MESA_SHADER_FRAGMENT))
533 return;
534
535 fp = (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
536 fp->id = key.base.program_string_id;
537
538 MAYBE_UNUSED bool success = brw_codegen_wm_prog(brw, fp, &key,
539 &brw->vue_map_geom_out);
540 assert(success);
541 }
542
543 void
544 brw_wm_populate_default_key(const struct brw_compiler *compiler,
545 struct brw_wm_prog_key *key,
546 struct gl_program *prog)
547 {
548 const struct gen_device_info *devinfo = compiler->devinfo;
549
550 memset(key, 0, sizeof(*key));
551
552 brw_populate_default_base_prog_key(devinfo, brw_program(prog),
553 &key->base);
554
555 uint64_t outputs_written = prog->info.outputs_written;
556
557 if (devinfo->gen < 6) {
558 if (prog->info.fs.uses_discard)
559 key->iz_lookup |= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT;
560
561 if (outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
562 key->iz_lookup |= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT;
563
564 /* Just assume depth testing. */
565 key->iz_lookup |= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT;
566 key->iz_lookup |= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT;
567 }
568
569 if (devinfo->gen < 6 || util_bitcount64(prog->info.inputs_read &
570 BRW_FS_VARYING_INPUT_MASK) > 16) {
571 key->input_slots_valid = prog->info.inputs_read | VARYING_BIT_POS;
572 }
573
574 key->nr_color_regions = util_bitcount64(outputs_written &
575 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
576 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
577 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
578
579 /* Whether reads from the framebuffer should behave coherently. */
580 key->coherent_fb_fetch = devinfo->gen >= 9;
581 }
582
583 bool
584 brw_fs_precompile(struct gl_context *ctx, struct gl_program *prog)
585 {
586 struct brw_context *brw = brw_context(ctx);
587 const struct gen_device_info *devinfo = &brw->screen->devinfo;
588 struct brw_wm_prog_key key;
589
590 struct brw_program *bfp = brw_program(prog);
591
592 brw_wm_populate_default_key(brw->screen->compiler, &key, prog);
593
594 /* check brw_wm_populate_default_key coherent_fb_fetch setting */
595 assert(key.coherent_fb_fetch ==
596 ctx->Extensions.EXT_shader_framebuffer_fetch);
597
598 uint32_t old_prog_offset = brw->wm.base.prog_offset;
599 struct brw_stage_prog_data *old_prog_data = brw->wm.base.prog_data;
600
601 struct brw_vue_map vue_map;
602 if (devinfo->gen < 6) {
603 brw_compute_vue_map(&brw->screen->devinfo, &vue_map,
604 prog->info.inputs_read | VARYING_BIT_POS,
605 false);
606 }
607
608 bool success = brw_codegen_wm_prog(brw, bfp, &key, &vue_map);
609
610 brw->wm.base.prog_offset = old_prog_offset;
611 brw->wm.base.prog_data = old_prog_data;
612
613 return success;
614 }