i965: Add support for sampling from XYUV images
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm.c
1 /*
2 * Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 * Intel funded Tungsten Graphics to
4 * develop this 3D driver.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26 #include "brw_context.h"
27 #include "brw_wm.h"
28 #include "brw_state.h"
29 #include "main/enums.h"
30 #include "main/formats.h"
31 #include "main/fbobject.h"
32 #include "main/samplerobj.h"
33 #include "main/framebuffer.h"
34 #include "program/prog_parameter.h"
35 #include "program/program.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_image.h"
38 #include "intel_fbo.h"
39 #include "compiler/brw_nir.h"
40 #include "brw_program.h"
41
42 #include "util/ralloc.h"
43 #include "util/u_math.h"
44
45 static void
46 assign_fs_binding_table_offsets(const struct gen_device_info *devinfo,
47 const struct gl_program *prog,
48 const struct brw_wm_prog_key *key,
49 struct brw_wm_prog_data *prog_data)
50 {
51 /* Render targets implicitly start at surface index 0. Even if there are
52 * no color regions, we still perform an FB write to a null render target,
53 * which will be surface 0.
54 */
55 uint32_t next_binding_table_offset = MAX2(key->nr_color_regions, 1);
56
57 next_binding_table_offset =
58 brw_assign_common_binding_table_offsets(devinfo, prog, &prog_data->base,
59 next_binding_table_offset);
60
61 if (prog->nir->info.outputs_read && !key->coherent_fb_fetch) {
62 prog_data->binding_table.render_target_read_start =
63 next_binding_table_offset;
64 next_binding_table_offset += key->nr_color_regions;
65 }
66
67 /* Update the binding table size */
68 prog_data->base.binding_table.size_bytes = next_binding_table_offset * 4;
69 }
70
71 static void
72 brw_wm_debug_recompile(struct brw_context *brw, struct gl_program *prog,
73 const struct brw_wm_prog_key *key)
74 {
75 perf_debug("Recompiling fragment shader for program %d\n", prog->Id);
76
77 bool found = false;
78 const struct brw_wm_prog_key *old_key =
79 brw_find_previous_compile(&brw->cache, BRW_CACHE_FS_PROG,
80 key->program_string_id);
81
82 if (!old_key) {
83 perf_debug(" Didn't find previous compile in the shader cache for debug\n");
84 return;
85 }
86
87 found |= key_debug(brw, "alphatest, computed depth, depth test, or "
88 "depth write",
89 old_key->iz_lookup, key->iz_lookup);
90 found |= key_debug(brw, "depth statistics",
91 old_key->stats_wm, key->stats_wm);
92 found |= key_debug(brw, "flat shading",
93 old_key->flat_shade, key->flat_shade);
94 found |= key_debug(brw, "number of color buffers",
95 old_key->nr_color_regions, key->nr_color_regions);
96 found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
97 old_key->replicate_alpha, key->replicate_alpha);
98 found |= key_debug(brw, "fragment color clamping",
99 old_key->clamp_fragment_color, key->clamp_fragment_color);
100 found |= key_debug(brw, "per-sample interpolation",
101 old_key->persample_interp, key->persample_interp);
102 found |= key_debug(brw, "multisampled FBO",
103 old_key->multisample_fbo, key->multisample_fbo);
104 found |= key_debug(brw, "frag coord adds sample pos",
105 old_key->frag_coord_adds_sample_pos,
106 key->frag_coord_adds_sample_pos);
107 found |= key_debug(brw, "line smoothing",
108 old_key->line_aa, key->line_aa);
109 found |= key_debug(brw, "high quality derivatives",
110 old_key->high_quality_derivatives,
111 key->high_quality_derivatives);
112 found |= key_debug(brw, "force dual color blending",
113 old_key->force_dual_color_blend,
114 key->force_dual_color_blend);
115 found |= key_debug(brw, "coherent fb fetch",
116 old_key->coherent_fb_fetch, key->coherent_fb_fetch);
117
118 found |= key_debug(brw, "input slots valid",
119 old_key->input_slots_valid, key->input_slots_valid);
120 found |= key_debug(brw, "mrt alpha test function",
121 old_key->alpha_test_func, key->alpha_test_func);
122 found |= key_debug(brw, "mrt alpha test reference value",
123 old_key->alpha_test_ref, key->alpha_test_ref);
124
125 found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
126
127 if (!found) {
128 perf_debug(" Something else\n");
129 }
130 }
131
132 static bool
133 brw_codegen_wm_prog(struct brw_context *brw,
134 struct brw_program *fp,
135 struct brw_wm_prog_key *key,
136 struct brw_vue_map *vue_map)
137 {
138 const struct gen_device_info *devinfo = &brw->screen->devinfo;
139 void *mem_ctx = ralloc_context(NULL);
140 struct brw_wm_prog_data prog_data;
141 const GLuint *program;
142 bool start_busy = false;
143 double start_time = 0;
144
145 nir_shader *nir = nir_shader_clone(mem_ctx, fp->program.nir);
146
147 memset(&prog_data, 0, sizeof(prog_data));
148
149 /* Use ALT floating point mode for ARB programs so that 0^0 == 1. */
150 if (fp->program.is_arb_asm)
151 prog_data.base.use_alt_mode = true;
152
153 assign_fs_binding_table_offsets(devinfo, &fp->program, key, &prog_data);
154
155 if (!fp->program.is_arb_asm) {
156 brw_nir_setup_glsl_uniforms(mem_ctx, nir, &fp->program,
157 &prog_data.base, true);
158 brw_nir_analyze_ubo_ranges(brw->screen->compiler, nir,
159 NULL, prog_data.base.ubo_ranges);
160 } else {
161 brw_nir_setup_arb_uniforms(mem_ctx, nir, &fp->program, &prog_data.base);
162
163 if (unlikely(INTEL_DEBUG & DEBUG_WM))
164 brw_dump_arb_asm("fragment", &fp->program);
165 }
166
167 if (unlikely(brw->perf_debug)) {
168 start_busy = (brw->batch.last_bo &&
169 brw_bo_busy(brw->batch.last_bo));
170 start_time = get_time();
171 }
172
173 int st_index8 = -1, st_index16 = -1, st_index32 = -1;
174 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
175 st_index8 = brw_get_shader_time_index(brw, &fp->program, ST_FS8,
176 !fp->program.is_arb_asm);
177 st_index16 = brw_get_shader_time_index(brw, &fp->program, ST_FS16,
178 !fp->program.is_arb_asm);
179 st_index32 = brw_get_shader_time_index(brw, &fp->program, ST_FS32,
180 !fp->program.is_arb_asm);
181 }
182
183 char *error_str = NULL;
184 program = brw_compile_fs(brw->screen->compiler, brw, mem_ctx,
185 key, &prog_data, nir,
186 &fp->program, st_index8, st_index16, st_index32,
187 true, false, vue_map,
188 &error_str);
189
190 if (program == NULL) {
191 if (!fp->program.is_arb_asm) {
192 fp->program.sh.data->LinkStatus = LINKING_FAILURE;
193 ralloc_strcat(&fp->program.sh.data->InfoLog, error_str);
194 }
195
196 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n", error_str);
197
198 ralloc_free(mem_ctx);
199 return false;
200 }
201
202 if (unlikely(brw->perf_debug)) {
203 if (fp->compiled_once)
204 brw_wm_debug_recompile(brw, &fp->program, key);
205 fp->compiled_once = true;
206
207 if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
208 perf_debug("FS compile took %.03f ms and stalled the GPU\n",
209 (get_time() - start_time) * 1000);
210 }
211 }
212
213 brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch);
214
215 if (unlikely((INTEL_DEBUG & DEBUG_WM) && fp->program.is_arb_asm))
216 fprintf(stderr, "\n");
217
218 /* The param and pull_param arrays will be freed by the shader cache. */
219 ralloc_steal(NULL, prog_data.base.param);
220 ralloc_steal(NULL, prog_data.base.pull_param);
221 brw_upload_cache(&brw->cache, BRW_CACHE_FS_PROG,
222 key, sizeof(struct brw_wm_prog_key),
223 program, prog_data.base.program_size,
224 &prog_data, sizeof(prog_data),
225 &brw->wm.base.prog_offset, &brw->wm.base.prog_data);
226
227 ralloc_free(mem_ctx);
228
229 return true;
230 }
231
232 bool
233 brw_debug_recompile_sampler_key(struct brw_context *brw,
234 const struct brw_sampler_prog_key_data *old_key,
235 const struct brw_sampler_prog_key_data *key)
236 {
237 bool found = false;
238
239 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
240 found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
241 old_key->swizzles[i], key->swizzles[i]);
242 }
243 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
244 old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
245 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
246 old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
247 found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
248 old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
249 found |= key_debug(brw, "gather channel quirk on any texture unit",
250 old_key->gather_channel_quirk_mask, key->gather_channel_quirk_mask);
251 found |= key_debug(brw, "compressed multisample layout",
252 old_key->compressed_multisample_layout_mask,
253 key->compressed_multisample_layout_mask);
254 found |= key_debug(brw, "16x msaa",
255 old_key->msaa_16,
256 key->msaa_16);
257
258 found |= key_debug(brw, "y_uv image bound",
259 old_key->y_uv_image_mask,
260 key->y_uv_image_mask);
261 found |= key_debug(brw, "y_u_v image bound",
262 old_key->y_u_v_image_mask,
263 key->y_u_v_image_mask);
264 found |= key_debug(brw, "yx_xuxv image bound",
265 old_key->yx_xuxv_image_mask,
266 key->yx_xuxv_image_mask);
267 found |= key_debug(brw, "xy_uxvx image bound",
268 old_key->xy_uxvx_image_mask,
269 key->xy_uxvx_image_mask);
270 found |= key_debug(brw, "ayuv image bound",
271 old_key->ayuv_image_mask,
272 key->ayuv_image_mask);
273 found |= key_debug(brw, "xyuv image bound",
274 old_key->xyuv_image_mask,
275 key->xyuv_image_mask);
276
277 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
278 found |= key_debug(brw, "textureGather workarounds",
279 old_key->gen6_gather_wa[i], key->gen6_gather_wa[i]);
280 }
281
282 for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
283 found |= key_debug_float(brw, "scale factor",
284 old_key->scale_factors[i],
285 key->scale_factors[i]);
286 }
287
288 return found;
289 }
290
291 static uint8_t
292 gen6_gather_workaround(GLenum internalformat)
293 {
294 switch (internalformat) {
295 case GL_R8I: return WA_SIGN | WA_8BIT;
296 case GL_R8UI: return WA_8BIT;
297 case GL_R16I: return WA_SIGN | WA_16BIT;
298 case GL_R16UI: return WA_16BIT;
299 default:
300 /* Note that even though GL_R32I and GL_R32UI have format overrides in
301 * the surface state, there is no shader w/a required.
302 */
303 return 0;
304 }
305 }
306
307 void
308 brw_populate_sampler_prog_key_data(struct gl_context *ctx,
309 const struct gl_program *prog,
310 struct brw_sampler_prog_key_data *key)
311 {
312 struct brw_context *brw = brw_context(ctx);
313 const struct gen_device_info *devinfo = &brw->screen->devinfo;
314 GLbitfield mask = prog->SamplersUsed;
315
316 while (mask) {
317 const int s = u_bit_scan(&mask);
318
319 key->swizzles[s] = SWIZZLE_NOOP;
320 key->scale_factors[s] = 0.0f;
321
322 int unit_id = prog->SamplerUnits[s];
323 const struct gl_texture_unit *unit = &ctx->Texture.Unit[unit_id];
324
325 if (unit->_Current && unit->_Current->Target != GL_TEXTURE_BUFFER) {
326 const struct gl_texture_object *t = unit->_Current;
327 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
328 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit_id);
329
330 const bool alpha_depth = t->DepthMode == GL_ALPHA &&
331 (img->_BaseFormat == GL_DEPTH_COMPONENT ||
332 img->_BaseFormat == GL_DEPTH_STENCIL);
333
334 /* Haswell handles texture swizzling as surface format overrides
335 * (except for GL_ALPHA); all other platforms need MOVs in the shader.
336 */
337 if (alpha_depth || (devinfo->gen < 8 && !devinfo->is_haswell))
338 key->swizzles[s] = brw_get_texture_swizzle(ctx, t);
339
340 if (devinfo->gen < 8 &&
341 sampler->MinFilter != GL_NEAREST &&
342 sampler->MagFilter != GL_NEAREST) {
343 if (sampler->WrapS == GL_CLAMP)
344 key->gl_clamp_mask[0] |= 1 << s;
345 if (sampler->WrapT == GL_CLAMP)
346 key->gl_clamp_mask[1] |= 1 << s;
347 if (sampler->WrapR == GL_CLAMP)
348 key->gl_clamp_mask[2] |= 1 << s;
349 }
350
351 /* gather4 for RG32* is broken in multiple ways on Gen7. */
352 if (devinfo->gen == 7 && prog->info.uses_texture_gather) {
353 switch (img->InternalFormat) {
354 case GL_RG32I:
355 case GL_RG32UI: {
356 /* We have to override the format to R32G32_FLOAT_LD.
357 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
358 * (1.0) rather than integer 1. This needs shader hacks.
359 *
360 * On Ivybridge, we whack W (alpha) to ONE in our key's
361 * swizzle. On Haswell, we look at the original texture
362 * swizzle, and use XYZW with channels overridden to ONE,
363 * leaving normal texture swizzling to SCS.
364 */
365 unsigned src_swizzle =
366 devinfo->is_haswell ? t->_Swizzle : key->swizzles[s];
367 for (int i = 0; i < 4; i++) {
368 unsigned src_comp = GET_SWZ(src_swizzle, i);
369 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
370 key->swizzles[i] &= ~(0x7 << (3 * i));
371 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
372 }
373 }
374 /* fallthrough */
375 }
376 case GL_RG32F:
377 /* The channel select for green doesn't work - we have to
378 * request blue. Haswell can use SCS for this, but Ivybridge
379 * needs a shader workaround.
380 */
381 if (!devinfo->is_haswell)
382 key->gather_channel_quirk_mask |= 1 << s;
383 break;
384 }
385 }
386
387 /* Gen6's gather4 is broken for UINT/SINT; we treat them as
388 * UNORM/FLOAT instead and fix it in the shader.
389 */
390 if (devinfo->gen == 6 && prog->info.uses_texture_gather) {
391 key->gen6_gather_wa[s] = gen6_gather_workaround(img->InternalFormat);
392 }
393
394 /* If this is a multisample sampler, and uses the CMS MSAA layout,
395 * then we need to emit slightly different code to first sample the
396 * MCS surface.
397 */
398 struct intel_texture_object *intel_tex =
399 intel_texture_object((struct gl_texture_object *)t);
400
401 /* From gen9 onwards some single sampled buffers can also be
402 * compressed. These don't need ld2dms sampling along with mcs fetch.
403 */
404 if (intel_tex->mt->aux_usage == ISL_AUX_USAGE_MCS) {
405 assert(devinfo->gen >= 7);
406 assert(intel_tex->mt->surf.samples > 1);
407 assert(intel_tex->mt->aux_buf);
408 assert(intel_tex->mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
409 key->compressed_multisample_layout_mask |= 1 << s;
410
411 if (intel_tex->mt->surf.samples >= 16) {
412 assert(devinfo->gen >= 9);
413 key->msaa_16 |= 1 << s;
414 }
415 }
416
417 if (t->Target == GL_TEXTURE_EXTERNAL_OES && intel_tex->planar_format) {
418
419 /* Setup possible scaling factor. */
420 key->scale_factors[s] = intel_tex->planar_format->scaling_factor;
421
422 switch (intel_tex->planar_format->components) {
423 case __DRI_IMAGE_COMPONENTS_Y_UV:
424 key->y_uv_image_mask |= 1 << s;
425 break;
426 case __DRI_IMAGE_COMPONENTS_Y_U_V:
427 key->y_u_v_image_mask |= 1 << s;
428 break;
429 case __DRI_IMAGE_COMPONENTS_Y_XUXV:
430 key->yx_xuxv_image_mask |= 1 << s;
431 break;
432 case __DRI_IMAGE_COMPONENTS_Y_UXVX:
433 key->xy_uxvx_image_mask |= 1 << s;
434 break;
435 case __DRI_IMAGE_COMPONENTS_AYUV:
436 key->ayuv_image_mask |= 1 << s;
437 break;
438 case __DRI_IMAGE_COMPONENTS_XYUV:
439 key->xyuv_image_mask |= 1 << s;
440 break;
441 default:
442 break;
443 }
444 }
445
446 }
447 }
448 }
449
450 static bool
451 brw_wm_state_dirty(const struct brw_context *brw)
452 {
453 return brw_state_dirty(brw,
454 _NEW_BUFFERS |
455 _NEW_COLOR |
456 _NEW_DEPTH |
457 _NEW_FRAG_CLAMP |
458 _NEW_HINT |
459 _NEW_LIGHT |
460 _NEW_LINE |
461 _NEW_MULTISAMPLE |
462 _NEW_POLYGON |
463 _NEW_STENCIL |
464 _NEW_TEXTURE,
465 BRW_NEW_FRAGMENT_PROGRAM |
466 BRW_NEW_REDUCED_PRIMITIVE |
467 BRW_NEW_STATS_WM |
468 BRW_NEW_VUE_MAP_GEOM_OUT);
469 }
470
471 void
472 brw_wm_populate_key(struct brw_context *brw, struct brw_wm_prog_key *key)
473 {
474 const struct gen_device_info *devinfo = &brw->screen->devinfo;
475 struct gl_context *ctx = &brw->ctx;
476 /* BRW_NEW_FRAGMENT_PROGRAM */
477 const struct gl_program *prog = brw->programs[MESA_SHADER_FRAGMENT];
478 const struct brw_program *fp = brw_program_const(prog);
479 GLuint lookup = 0;
480 GLuint line_aa;
481
482 memset(key, 0, sizeof(*key));
483
484 /* Build the index for table lookup
485 */
486 if (devinfo->gen < 6) {
487 struct intel_renderbuffer *depth_irb =
488 intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
489
490 /* _NEW_COLOR */
491 if (prog->info.fs.uses_discard || ctx->Color.AlphaEnabled) {
492 lookup |= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT;
493 }
494
495 if (prog->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
496 lookup |= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT;
497 }
498
499 /* _NEW_DEPTH */
500 if (depth_irb && ctx->Depth.Test) {
501 lookup |= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT;
502
503 if (brw_depth_writes_enabled(brw))
504 lookup |= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT;
505 }
506
507 /* _NEW_STENCIL | _NEW_BUFFERS */
508 if (brw->stencil_enabled) {
509 lookup |= BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT;
510
511 if (ctx->Stencil.WriteMask[0] ||
512 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
513 lookup |= BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT;
514 }
515 key->iz_lookup = lookup;
516 }
517
518 line_aa = BRW_WM_AA_NEVER;
519
520 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
521 if (ctx->Line.SmoothFlag) {
522 if (brw->reduced_primitive == GL_LINES) {
523 line_aa = BRW_WM_AA_ALWAYS;
524 }
525 else if (brw->reduced_primitive == GL_TRIANGLES) {
526 if (ctx->Polygon.FrontMode == GL_LINE) {
527 line_aa = BRW_WM_AA_SOMETIMES;
528
529 if (ctx->Polygon.BackMode == GL_LINE ||
530 (ctx->Polygon.CullFlag &&
531 ctx->Polygon.CullFaceMode == GL_BACK))
532 line_aa = BRW_WM_AA_ALWAYS;
533 }
534 else if (ctx->Polygon.BackMode == GL_LINE) {
535 line_aa = BRW_WM_AA_SOMETIMES;
536
537 if ((ctx->Polygon.CullFlag &&
538 ctx->Polygon.CullFaceMode == GL_FRONT))
539 line_aa = BRW_WM_AA_ALWAYS;
540 }
541 }
542 }
543
544 key->line_aa = line_aa;
545
546 /* _NEW_HINT */
547 key->high_quality_derivatives =
548 prog->info.uses_fddx_fddy &&
549 ctx->Hint.FragmentShaderDerivative == GL_NICEST;
550
551 if (devinfo->gen < 6)
552 key->stats_wm = brw->stats_wm;
553
554 /* _NEW_LIGHT */
555 key->flat_shade =
556 (prog->info.inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1)) &&
557 (ctx->Light.ShadeModel == GL_FLAT);
558
559 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
560 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
561
562 /* _NEW_TEXTURE */
563 brw_populate_sampler_prog_key_data(ctx, prog, &key->tex);
564
565 /* _NEW_BUFFERS */
566 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
567
568 /* _NEW_COLOR */
569 key->force_dual_color_blend = brw->dual_color_blend_by_location &&
570 (ctx->Color.BlendEnabled & 1) && ctx->Color.Blend[0]._UsesDualSrc;
571
572 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
573 key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
574 (_mesa_is_alpha_test_enabled(ctx) ||
575 _mesa_is_alpha_to_coverage_enabled(ctx));
576
577 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
578 /* Ignore sample qualifier while computing this flag. */
579 if (ctx->Multisample.Enabled) {
580 key->persample_interp =
581 ctx->Multisample.SampleShading &&
582 (ctx->Multisample.MinSampleShadingValue *
583 _mesa_geometric_samples(ctx->DrawBuffer) > 1);
584
585 key->multisample_fbo = _mesa_geometric_samples(ctx->DrawBuffer) > 1;
586 }
587
588 /* BRW_NEW_VUE_MAP_GEOM_OUT */
589 if (devinfo->gen < 6 || util_bitcount64(prog->info.inputs_read &
590 BRW_FS_VARYING_INPUT_MASK) > 16) {
591 key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
592 }
593
594 /* _NEW_COLOR | _NEW_BUFFERS */
595 /* Pre-gen6, the hardware alpha test always used each render
596 * target's alpha to do alpha test, as opposed to render target 0's alpha
597 * like GL requires. Fix that by building the alpha test into the
598 * shader, and we'll skip enabling the fixed function alpha test.
599 */
600 if (devinfo->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
601 ctx->Color.AlphaEnabled) {
602 key->alpha_test_func = ctx->Color.AlphaFunc;
603 key->alpha_test_ref = ctx->Color.AlphaRef;
604 }
605
606 /* The unique fragment program ID */
607 key->program_string_id = fp->id;
608
609 /* Whether reads from the framebuffer should behave coherently. */
610 key->coherent_fb_fetch = ctx->Extensions.EXT_shader_framebuffer_fetch;
611 }
612
613 void
614 brw_upload_wm_prog(struct brw_context *brw)
615 {
616 struct brw_wm_prog_key key;
617 struct brw_program *fp =
618 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
619
620 if (!brw_wm_state_dirty(brw))
621 return;
622
623 brw_wm_populate_key(brw, &key);
624
625 if (brw_search_cache(&brw->cache, BRW_CACHE_FS_PROG, &key, sizeof(key),
626 &brw->wm.base.prog_offset, &brw->wm.base.prog_data,
627 true))
628 return;
629
630 if (brw_disk_cache_upload_program(brw, MESA_SHADER_FRAGMENT))
631 return;
632
633 fp = (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
634 fp->id = key.program_string_id;
635
636 MAYBE_UNUSED bool success = brw_codegen_wm_prog(brw, fp, &key,
637 &brw->vue_map_geom_out);
638 assert(success);
639 }
640
641 void
642 brw_wm_populate_default_key(const struct gen_device_info *devinfo,
643 struct brw_wm_prog_key *key,
644 struct gl_program *prog)
645 {
646 memset(key, 0, sizeof(*key));
647
648 uint64_t outputs_written = prog->info.outputs_written;
649
650 if (devinfo->gen < 6) {
651 if (prog->info.fs.uses_discard)
652 key->iz_lookup |= BRW_WM_IZ_PS_KILL_ALPHATEST_BIT;
653
654 if (outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
655 key->iz_lookup |= BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT;
656
657 /* Just assume depth testing. */
658 key->iz_lookup |= BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT;
659 key->iz_lookup |= BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT;
660 }
661
662 if (devinfo->gen < 6 || util_bitcount64(prog->info.inputs_read &
663 BRW_FS_VARYING_INPUT_MASK) > 16) {
664 key->input_slots_valid = prog->info.inputs_read | VARYING_BIT_POS;
665 }
666
667 brw_setup_tex_for_precompile(devinfo, &key->tex, prog);
668
669 key->nr_color_regions = util_bitcount64(outputs_written &
670 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
671 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
672 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
673
674 key->program_string_id = brw_program(prog)->id;
675
676 /* Whether reads from the framebuffer should behave coherently. */
677 key->coherent_fb_fetch = devinfo->gen >= 9;
678 }
679
680 bool
681 brw_fs_precompile(struct gl_context *ctx, struct gl_program *prog)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 const struct gen_device_info *devinfo = &brw->screen->devinfo;
685 struct brw_wm_prog_key key;
686
687 struct brw_program *bfp = brw_program(prog);
688
689 brw_wm_populate_default_key(&brw->screen->devinfo, &key, prog);
690
691 /* check brw_wm_populate_default_key coherent_fb_fetch setting */
692 assert(key.coherent_fb_fetch ==
693 ctx->Extensions.EXT_shader_framebuffer_fetch);
694
695 uint32_t old_prog_offset = brw->wm.base.prog_offset;
696 struct brw_stage_prog_data *old_prog_data = brw->wm.base.prog_data;
697
698 struct brw_vue_map vue_map;
699 if (devinfo->gen < 6) {
700 brw_compute_vue_map(&brw->screen->devinfo, &vue_map,
701 prog->info.inputs_read | VARYING_BIT_POS,
702 false);
703 }
704
705 bool success = brw_codegen_wm_prog(brw, bfp, &key, &vue_map);
706
707 brw->wm.base.prog_offset = old_prog_offset;
708 brw->wm.base.prog_data = old_prog_data;
709
710 return success;
711 }