mesa: treat Color._AdvancedBlendMode as enum
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint64_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color =
170 intel_miptree_get_clear_color(devinfo, mt, view.format,
171 view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
172 &clear_bo, &clear_offset);
173 }
174
175 void *state = brw_state_batch(brw,
176 brw->isl_dev.ss.size,
177 brw->isl_dev.ss.align,
178 surf_offset);
179
180 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
181 .address = brw_state_reloc(&brw->batch,
182 *surf_offset + brw->isl_dev.ss.addr_offset,
183 mt->bo, offset, reloc_flags),
184 .aux_surf = aux_surf, .aux_usage = aux_usage,
185 .aux_address = aux_offset,
186 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
187 .clear_color = clear_color,
188 .use_clear_address = clear_bo != NULL,
189 .clear_address = clear_offset,
190 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
191 if (aux_surf) {
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
197 *
198 * FIXME: move to the point of assignment.
199 */
200 assert((aux_offset & 0xfff) == 0);
201
202 if (devinfo->gen >= 8) {
203 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
204 *aux_addr = brw_state_reloc(&brw->batch,
205 *surf_offset +
206 brw->isl_dev.ss.aux_addr_offset,
207 aux_bo, *aux_addr,
208 reloc_flags);
209 } else {
210 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216
217 }
218 }
219
220 if (clear_bo != NULL) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset & 0x3f) == 0);
223 uint64_t *clear_address =
224 state + brw->isl_dev.ss.clear_color_state_offset;
225 *clear_address = brw_state_reloc(&brw->batch,
226 *surf_offset +
227 brw->isl_dev.ss.clear_color_state_offset,
228 clear_bo, *clear_address, reloc_flags);
229 }
230 }
231
232 static uint32_t
233 gen6_update_renderbuffer_surface(struct brw_context *brw,
234 struct gl_renderbuffer *rb,
235 unsigned unit,
236 uint32_t surf_index)
237 {
238 struct gl_context *ctx = &brw->ctx;
239 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
240 struct intel_mipmap_tree *mt = irb->mt;
241
242 assert(brw_render_target_supported(brw, rb));
243
244 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
245 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
246 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
247 __func__, _mesa_get_format_name(rb_format));
248 }
249 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
250
251 struct isl_view view = {
252 .format = isl_format,
253 .base_level = irb->mt_level - irb->mt->first_level,
254 .levels = 1,
255 .base_array_layer = irb->mt_layer,
256 .array_len = MAX2(irb->layer_count, 1),
257 .swizzle = ISL_SWIZZLE_IDENTITY,
258 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
259 };
260
261 uint32_t offset;
262 brw_emit_surface_state(brw, mt, mt->target, view,
263 brw->draw_aux_usage[unit],
264 &offset, surf_index,
265 RELOC_WRITE);
266 return offset;
267 }
268
269 GLuint
270 translate_tex_target(GLenum target)
271 {
272 switch (target) {
273 case GL_TEXTURE_1D:
274 case GL_TEXTURE_1D_ARRAY_EXT:
275 return BRW_SURFACE_1D;
276
277 case GL_TEXTURE_RECTANGLE_NV:
278 return BRW_SURFACE_2D;
279
280 case GL_TEXTURE_2D:
281 case GL_TEXTURE_2D_ARRAY_EXT:
282 case GL_TEXTURE_EXTERNAL_OES:
283 case GL_TEXTURE_2D_MULTISAMPLE:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
285 return BRW_SURFACE_2D;
286
287 case GL_TEXTURE_3D:
288 return BRW_SURFACE_3D;
289
290 case GL_TEXTURE_CUBE_MAP:
291 case GL_TEXTURE_CUBE_MAP_ARRAY:
292 return BRW_SURFACE_CUBE;
293
294 default:
295 unreachable("not reached");
296 }
297 }
298
299 uint32_t
300 brw_get_surface_tiling_bits(enum isl_tiling tiling)
301 {
302 switch (tiling) {
303 case ISL_TILING_X:
304 return BRW_SURFACE_TILED;
305 case ISL_TILING_Y0:
306 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
307 default:
308 return 0;
309 }
310 }
311
312
313 uint32_t
314 brw_get_surface_num_multisamples(unsigned num_samples)
315 {
316 if (num_samples > 1)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4;
318 else
319 return BRW_SURFACE_MULTISAMPLECOUNT_1;
320 }
321
322 /**
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
324 * swizzling.
325 */
326 int
327 brw_get_texture_swizzle(const struct gl_context *ctx,
328 const struct gl_texture_object *t)
329 {
330 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
331
332 int swizzles[SWIZZLE_NIL + 1] = {
333 SWIZZLE_X,
334 SWIZZLE_Y,
335 SWIZZLE_Z,
336 SWIZZLE_W,
337 SWIZZLE_ZERO,
338 SWIZZLE_ONE,
339 SWIZZLE_NIL
340 };
341
342 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
343 img->_BaseFormat == GL_DEPTH_STENCIL) {
344 GLenum depth_mode = t->DepthMode;
345
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
349 */
350 if (_mesa_is_gles3(ctx) &&
351 img->InternalFormat != GL_DEPTH_COMPONENT &&
352 img->InternalFormat != GL_DEPTH_STENCIL) {
353 depth_mode = GL_RED;
354 }
355
356 switch (depth_mode) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 swizzles[3] = SWIZZLE_X;
362 break;
363 case GL_LUMINANCE:
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 break;
369 case GL_INTENSITY:
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_X;
374 break;
375 case GL_RED:
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_ZERO;
378 swizzles[2] = SWIZZLE_ZERO;
379 swizzles[3] = SWIZZLE_ONE;
380 break;
381 }
382 }
383
384 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
385
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
391 */
392 switch (img->_BaseFormat) {
393 case GL_ALPHA:
394 swizzles[0] = SWIZZLE_ZERO;
395 swizzles[1] = SWIZZLE_ZERO;
396 swizzles[2] = SWIZZLE_ZERO;
397 break;
398 case GL_LUMINANCE:
399 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
400 swizzles[0] = SWIZZLE_X;
401 swizzles[1] = SWIZZLE_X;
402 swizzles[2] = SWIZZLE_X;
403 swizzles[3] = SWIZZLE_ONE;
404 }
405 break;
406 case GL_LUMINANCE_ALPHA:
407 if (datatype == GL_SIGNED_NORMALIZED) {
408 swizzles[0] = SWIZZLE_X;
409 swizzles[1] = SWIZZLE_X;
410 swizzles[2] = SWIZZLE_X;
411 swizzles[3] = SWIZZLE_W;
412 }
413 break;
414 case GL_INTENSITY:
415 if (datatype == GL_SIGNED_NORMALIZED) {
416 swizzles[0] = SWIZZLE_X;
417 swizzles[1] = SWIZZLE_X;
418 swizzles[2] = SWIZZLE_X;
419 swizzles[3] = SWIZZLE_X;
420 }
421 break;
422 case GL_RED:
423 if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
424 swizzles[0] = SWIZZLE_X;
425 swizzles[1] = SWIZZLE_ZERO;
426 swizzles[2] = SWIZZLE_ZERO;
427 swizzles[3] = SWIZZLE_ONE;
428 break;
429 }
430 /* fallthrough */
431 case GL_RG:
432 case GL_RGB:
433 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
434 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
435 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
436 swizzles[3] = SWIZZLE_ONE;
437 break;
438 }
439
440 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
441 swizzles[GET_SWZ(t->_Swizzle, 1)],
442 swizzles[GET_SWZ(t->_Swizzle, 2)],
443 swizzles[GET_SWZ(t->_Swizzle, 3)]);
444 }
445
446 /**
447 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
448 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
449 *
450 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
451 * 0 1 2 3 4 5
452 * 4 5 6 7 0 1
453 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
454 *
455 * which is simply adding 4 then modding by 8 (or anding with 7).
456 *
457 * We then may need to apply workarounds for textureGather hardware bugs.
458 */
459 static unsigned
460 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
461 {
462 unsigned scs = (swizzle + 4) & 7;
463
464 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
465 }
466
467 static void brw_update_texture_surface(struct gl_context *ctx,
468 unsigned unit,
469 uint32_t *surf_offset,
470 bool for_gather,
471 bool for_txf,
472 uint32_t plane)
473 {
474 struct brw_context *brw = brw_context(ctx);
475 const struct gen_device_info *devinfo = &brw->screen->devinfo;
476 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
477
478 if (obj->Target == GL_TEXTURE_BUFFER) {
479 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
480
481 } else {
482 struct intel_texture_object *intel_obj = intel_texture_object(obj);
483 struct intel_mipmap_tree *mt = intel_obj->mt;
484
485 if (plane > 0) {
486 if (mt->plane[plane - 1] == NULL)
487 return;
488 mt = mt->plane[plane - 1];
489 }
490
491 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
492 /* If this is a view with restricted NumLayers, then our effective depth
493 * is not just the miptree depth.
494 */
495 unsigned view_num_layers;
496 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
497 view_num_layers = obj->NumLayers;
498 } else {
499 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
500 mt->surf.logical_level0_px.depth :
501 mt->surf.logical_level0_px.array_len;
502 }
503
504 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
505 * texturing functions that return a float, as our code generation always
506 * selects the .x channel (which would always be 0).
507 */
508 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
509 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
510 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
511 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
512 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
513 brw_get_texture_swizzle(&brw->ctx, obj));
514
515 mesa_format mesa_fmt;
516 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
517 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
518 /* The format from intel_obj may be a combined depth stencil format
519 * when we just want depth. Pull it from the miptree instead. This
520 * is safe because texture views aren't allowed on depth/stencil.
521 */
522 mesa_fmt = mt->format;
523 } else if (intel_miptree_has_etc_shadow(brw, mt)) {
524 mesa_fmt = mt->shadow_mt->format;
525 } else if (plane > 0) {
526 mesa_fmt = mt->format;
527 } else {
528 mesa_fmt = intel_obj->_Format;
529 }
530 enum isl_format format = translate_tex_format(brw, mesa_fmt,
531 for_txf ? GL_DECODE_EXT :
532 sampler->sRGBDecode);
533
534 /* Implement gen6 and gen7 gather work-around */
535 bool need_green_to_blue = false;
536 if (for_gather) {
537 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
538 format == ISL_FORMAT_R32G32_SINT ||
539 format == ISL_FORMAT_R32G32_UINT)) {
540 format = ISL_FORMAT_R32G32_FLOAT_LD;
541 need_green_to_blue = devinfo->is_haswell;
542 } else if (devinfo->gen == 6) {
543 /* Sandybridge's gather4 message is broken for integer formats.
544 * To work around this, we pretend the surface is UNORM for
545 * 8 or 16-bit formats, and emit shader instructions to recover
546 * the real INT/UINT value. For 32-bit formats, we pretend
547 * the surface is FLOAT, and simply reinterpret the resulting
548 * bits.
549 */
550 switch (format) {
551 case ISL_FORMAT_R8_SINT:
552 case ISL_FORMAT_R8_UINT:
553 format = ISL_FORMAT_R8_UNORM;
554 break;
555
556 case ISL_FORMAT_R16_SINT:
557 case ISL_FORMAT_R16_UINT:
558 format = ISL_FORMAT_R16_UNORM;
559 break;
560
561 case ISL_FORMAT_R32_SINT:
562 case ISL_FORMAT_R32_UINT:
563 format = ISL_FORMAT_R32_FLOAT;
564 break;
565
566 default:
567 break;
568 }
569 }
570 }
571
572 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
573 if (devinfo->gen <= 7) {
574 assert(mt->shadow_mt && !mt->stencil_mt->shadow_needs_update);
575 mt = mt->shadow_mt;
576 } else {
577 mt = mt->stencil_mt;
578 }
579 format = ISL_FORMAT_R8_UINT;
580 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
581 assert(mt->shadow_mt && !mt->shadow_needs_update);
582 mt = mt->shadow_mt;
583 format = ISL_FORMAT_R8_UINT;
584 } else if (intel_miptree_needs_fake_etc(brw, mt)) {
585 assert(mt->shadow_mt && !mt->shadow_needs_update);
586 mt = mt->shadow_mt;
587 }
588
589 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
590
591 struct isl_view view = {
592 .format = format,
593 .base_level = obj->MinLevel + obj->BaseLevel,
594 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
595 .base_array_layer = obj->MinLayer,
596 .array_len = view_num_layers,
597 .swizzle = {
598 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
599 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
600 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
601 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
602 },
603 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
604 };
605
606 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
607 * code. The actual surface swizzle should be identity.
608 */
609 if (devinfo->gen <= 7 && !devinfo->is_haswell)
610 view.swizzle = ISL_SWIZZLE_IDENTITY;
611
612 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
613 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
614 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
615
616 enum isl_aux_usage aux_usage =
617 intel_miptree_texture_aux_usage(brw, mt, format,
618 brw->gen9_astc5x5_wa_tex_mask);
619
620 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
621 surf_offset, surf_index,
622 0);
623 }
624 }
625
626 void
627 brw_emit_buffer_surface_state(struct brw_context *brw,
628 uint32_t *out_offset,
629 struct brw_bo *bo,
630 unsigned buffer_offset,
631 unsigned surface_format,
632 unsigned buffer_size,
633 unsigned pitch,
634 unsigned reloc_flags)
635 {
636 const struct gen_device_info *devinfo = &brw->screen->devinfo;
637 uint32_t *dw = brw_state_batch(brw,
638 brw->isl_dev.ss.size,
639 brw->isl_dev.ss.align,
640 out_offset);
641
642 isl_buffer_fill_state(&brw->isl_dev, dw,
643 .address = !bo ? buffer_offset :
644 brw_state_reloc(&brw->batch,
645 *out_offset + brw->isl_dev.ss.addr_offset,
646 bo, buffer_offset,
647 reloc_flags),
648 .size_B = buffer_size,
649 .format = surface_format,
650 .swizzle = ISL_SWIZZLE_IDENTITY,
651 .stride_B = pitch,
652 .mocs = brw_get_bo_mocs(devinfo, bo));
653 }
654
655 static unsigned
656 buffer_texture_range_size(struct brw_context *brw,
657 struct gl_texture_object *obj)
658 {
659 assert(obj->Target == GL_TEXTURE_BUFFER);
660 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
661 const unsigned buffer_size = (!obj->BufferObject ? 0 :
662 obj->BufferObject->Size);
663 const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
664
665 /* The ARB_texture_buffer_specification says:
666 *
667 * "The number of texels in the buffer texture's texel array is given by
668 *
669 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
670 *
671 * where <buffer_size> is the size of the buffer object, in basic
672 * machine units and <components> and <base_type> are the element count
673 * and base data type for elements, as specified in Table X.1. The
674 * number of texels in the texel array is then clamped to the
675 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
676 *
677 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
678 * so that when ISL divides by stride to obtain the number of texels, that
679 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
680 */
681 return MIN3((unsigned)obj->BufferSize,
682 buffer_size - buffer_offset,
683 brw->ctx.Const.MaxTextureBufferSize * texel_size);
684 }
685
686 void
687 brw_update_buffer_texture_surface(struct gl_context *ctx,
688 unsigned unit,
689 uint32_t *surf_offset)
690 {
691 struct brw_context *brw = brw_context(ctx);
692 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
693 struct intel_buffer_object *intel_obj =
694 intel_buffer_object(tObj->BufferObject);
695 const unsigned size = buffer_texture_range_size(brw, tObj);
696 struct brw_bo *bo = NULL;
697 mesa_format format = tObj->_BufferObjectFormat;
698 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
699 int texel_size = _mesa_get_format_bytes(format);
700
701 if (intel_obj)
702 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
703 false);
704
705 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
706 _mesa_problem(NULL, "bad format %s for texture buffer\n",
707 _mesa_get_format_name(format));
708 }
709
710 brw_emit_buffer_surface_state(brw, surf_offset, bo,
711 tObj->BufferOffset,
712 isl_format,
713 size,
714 texel_size,
715 0);
716 }
717
718 /**
719 * Set up a binding table entry for use by stream output logic (transform
720 * feedback).
721 *
722 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
723 */
724 void
725 brw_update_sol_surface(struct brw_context *brw,
726 struct gl_buffer_object *buffer_obj,
727 uint32_t *out_offset, unsigned num_vector_components,
728 unsigned stride_dwords, unsigned offset_dwords)
729 {
730 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
731 uint32_t offset_bytes = 4 * offset_dwords;
732 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
733 offset_bytes,
734 buffer_obj->Size - offset_bytes,
735 true);
736 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
737 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
738 size_t size_dwords = buffer_obj->Size / 4;
739 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
740
741 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
742 * too big to map using a single binding table entry?
743 */
744 assert((size_dwords - offset_dwords) / stride_dwords
745 <= BRW_MAX_NUM_BUFFER_ENTRIES);
746
747 if (size_dwords > offset_dwords + num_vector_components) {
748 /* There is room for at least 1 transform feedback output in the buffer.
749 * Compute the number of additional transform feedback outputs the
750 * buffer has room for.
751 */
752 buffer_size_minus_1 =
753 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
754 } else {
755 /* There isn't even room for a single transform feedback output in the
756 * buffer. We can't configure the binding table entry to prevent output
757 * entirely; we'll have to rely on the geometry shader to detect
758 * overflow. But to minimize the damage in case of a bug, set up the
759 * binding table entry to just allow a single output.
760 */
761 buffer_size_minus_1 = 0;
762 }
763 width = buffer_size_minus_1 & 0x7f;
764 height = (buffer_size_minus_1 & 0xfff80) >> 7;
765 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
766
767 switch (num_vector_components) {
768 case 1:
769 surface_format = ISL_FORMAT_R32_FLOAT;
770 break;
771 case 2:
772 surface_format = ISL_FORMAT_R32G32_FLOAT;
773 break;
774 case 3:
775 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
776 break;
777 case 4:
778 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
779 break;
780 default:
781 unreachable("Invalid vector size for transform feedback output");
782 }
783
784 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
785 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
786 surface_format << BRW_SURFACE_FORMAT_SHIFT |
787 BRW_SURFACE_RC_READ_WRITE;
788 surf[1] = brw_state_reloc(&brw->batch,
789 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
790 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
791 height << BRW_SURFACE_HEIGHT_SHIFT);
792 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
793 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
794 surf[4] = 0;
795 surf[5] = 0;
796 }
797
798 /* Creates a new WM constant buffer reflecting the current fragment program's
799 * constants, if needed by the fragment program.
800 *
801 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
802 * state atom.
803 */
804 static void
805 brw_upload_wm_pull_constants(struct brw_context *brw)
806 {
807 struct brw_stage_state *stage_state = &brw->wm.base;
808 /* BRW_NEW_FRAGMENT_PROGRAM */
809 struct brw_program *fp =
810 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
811
812 /* BRW_NEW_FS_PROG_DATA */
813 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
814
815 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
816 /* _NEW_PROGRAM_CONSTANTS */
817 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
818 stage_state, prog_data);
819 }
820
821 const struct brw_tracked_state brw_wm_pull_constants = {
822 .dirty = {
823 .mesa = _NEW_PROGRAM_CONSTANTS,
824 .brw = BRW_NEW_BATCH |
825 BRW_NEW_FRAGMENT_PROGRAM |
826 BRW_NEW_FS_PROG_DATA,
827 },
828 .emit = brw_upload_wm_pull_constants,
829 };
830
831 /**
832 * Creates a null renderbuffer surface.
833 *
834 * This is used when the shader doesn't write to any color output. An FB
835 * write to target 0 will still be emitted, because that's how the thread is
836 * terminated (and computed depth is returned), so we need to have the
837 * hardware discard the target 0 color output..
838 */
839 static void
840 emit_null_surface_state(struct brw_context *brw,
841 const struct gl_framebuffer *fb,
842 uint32_t *out_offset)
843 {
844 const struct gen_device_info *devinfo = &brw->screen->devinfo;
845 uint32_t *surf = brw_state_batch(brw,
846 brw->isl_dev.ss.size,
847 brw->isl_dev.ss.align,
848 out_offset);
849
850 /* Use the fb dimensions or 1x1x1 */
851 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
852 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
853 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
854
855 if (devinfo->gen != 6 || samples <= 1) {
856 isl_null_fill_state(&brw->isl_dev, surf,
857 isl_extent3d(width, height, 1));
858 return;
859 }
860
861 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
862 * So work around this problem by rendering into dummy color buffer.
863 *
864 * To decrease the amount of memory needed by the workaround buffer, we
865 * set its pitch to 128 bytes (the width of a Y tile). This means that
866 * the amount of memory needed for the workaround buffer is
867 * (width_in_tiles + height_in_tiles - 1) tiles.
868 *
869 * Note that since the workaround buffer will be interpreted by the
870 * hardware as an interleaved multisampled buffer, we need to compute
871 * width_in_tiles and height_in_tiles by dividing the width and height
872 * by 16 rather than the normal Y-tile size of 32.
873 */
874 unsigned width_in_tiles = ALIGN(width, 16) / 16;
875 unsigned height_in_tiles = ALIGN(height, 16) / 16;
876 unsigned pitch_minus_1 = 127;
877 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
878 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
879 size_needed);
880
881 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
882 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
883 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
884 brw->wm.multisampled_null_render_target_bo,
885 0, RELOC_WRITE);
886
887 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
888 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
889
890 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
891 * Notes):
892 *
893 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
894 */
895 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
896 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
897 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
898 surf[5] = 0;
899 }
900
901 /**
902 * Sets up a surface state structure to point at the given region.
903 * While it is only used for the front/back buffer currently, it should be
904 * usable for further buffers when doing ARB_draw_buffer support.
905 */
906 static uint32_t
907 gen4_update_renderbuffer_surface(struct brw_context *brw,
908 struct gl_renderbuffer *rb,
909 unsigned unit,
910 uint32_t surf_index)
911 {
912 const struct gen_device_info *devinfo = &brw->screen->devinfo;
913 struct gl_context *ctx = &brw->ctx;
914 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
915 struct intel_mipmap_tree *mt = irb->mt;
916 uint32_t *surf;
917 uint32_t tile_x, tile_y;
918 enum isl_format format;
919 uint32_t offset;
920 /* _NEW_BUFFERS */
921 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
922 /* BRW_NEW_FS_PROG_DATA */
923
924 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
925 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
926
927 if (tile_x != 0 || tile_y != 0) {
928 /* Original gen4 hardware couldn't draw to a non-tile-aligned
929 * destination in a miptree unless you actually setup your renderbuffer
930 * as a miptree and used the fragile lod/array_index/etc. controls to
931 * select the image. So, instead, we just make a new single-level
932 * miptree and render into that.
933 */
934 intel_renderbuffer_move_to_temp(brw, irb, false);
935 assert(irb->align_wa_mt);
936 mt = irb->align_wa_mt;
937 }
938 }
939
940 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
941
942 format = brw->mesa_to_isl_render_format[rb_format];
943 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
944 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
945 __func__, _mesa_get_format_name(rb_format));
946 }
947
948 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
949 format << BRW_SURFACE_FORMAT_SHIFT);
950
951 /* reloc */
952 assert(mt->offset % mt->cpp == 0);
953 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
954 mt->offset +
955 intel_renderbuffer_get_tile_offsets(irb,
956 &tile_x,
957 &tile_y),
958 RELOC_WRITE);
959
960 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
961 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
962
963 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
964 (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
965
966 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
967
968 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
969 /* Note that the low bits of these fields are missing, so
970 * there's the possibility of getting in trouble.
971 */
972 assert(tile_x % 4 == 0);
973 assert(tile_y % 2 == 0);
974 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
975 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
976 (mt->surf.image_alignment_el.height == 4 ?
977 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
978
979 if (devinfo->gen < 6) {
980 /* _NEW_COLOR */
981 if (!ctx->Color.ColorLogicOpEnabled &&
982 ctx->Color._AdvancedBlendMode == BLEND_NONE &&
983 (ctx->Color.BlendEnabled & (1 << unit)))
984 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
985
986 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
987 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
988 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
989 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
990 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
991 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
992
993 /* As mentioned above, disable writes to the alpha component when the
994 * renderbuffer is XRGB.
995 */
996 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
997 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
998 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
999 }
1000 }
1001
1002 return offset;
1003 }
1004
1005 static void
1006 update_renderbuffer_surfaces(struct brw_context *brw)
1007 {
1008 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1009 const struct gl_context *ctx = &brw->ctx;
1010
1011 /* _NEW_BUFFERS | _NEW_COLOR */
1012 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1013
1014 /* Render targets always start at binding table index 0. */
1015 const unsigned rt_start = 0;
1016
1017 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1018
1019 /* Update surfaces for drawing buffers */
1020 if (fb->_NumColorDrawBuffers >= 1) {
1021 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1022 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1023
1024 if (intel_renderbuffer(rb)) {
1025 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1026 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1027 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1028 } else {
1029 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1030 }
1031 }
1032 } else {
1033 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1034 }
1035
1036 /* The PIPE_CONTROL command description says:
1037 *
1038 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1039 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1040 * Target Cache Flush by enabling this bit. When render target flush
1041 * is set due to new association of BTI, PS Scoreboard Stall bit must
1042 * be set in this packet."
1043 */
1044 if (devinfo->gen >= 11) {
1045 brw_emit_pipe_control_flush(brw,
1046 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1047 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1048 }
1049
1050 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1051 }
1052
1053 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1054 .dirty = {
1055 .mesa = _NEW_BUFFERS |
1056 _NEW_COLOR,
1057 .brw = BRW_NEW_BATCH,
1058 },
1059 .emit = update_renderbuffer_surfaces,
1060 };
1061
1062 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1063 .dirty = {
1064 .mesa = _NEW_BUFFERS,
1065 .brw = BRW_NEW_BATCH |
1066 BRW_NEW_AUX_STATE,
1067 },
1068 .emit = update_renderbuffer_surfaces,
1069 };
1070
1071 static void
1072 update_renderbuffer_read_surfaces(struct brw_context *brw)
1073 {
1074 const struct gl_context *ctx = &brw->ctx;
1075
1076 /* BRW_NEW_FS_PROG_DATA */
1077 const struct brw_wm_prog_data *wm_prog_data =
1078 brw_wm_prog_data(brw->wm.base.prog_data);
1079
1080 if (wm_prog_data->has_render_target_reads &&
1081 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1082 /* _NEW_BUFFERS */
1083 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1084
1085 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1086 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1087 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1088 const unsigned surf_index =
1089 wm_prog_data->binding_table.render_target_read_start + i;
1090 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1091
1092 if (irb) {
1093 const enum isl_format format = brw->mesa_to_isl_render_format[
1094 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1095 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1096 format));
1097
1098 /* Override the target of the texture if the render buffer is a
1099 * single slice of a 3D texture (since the minimum array element
1100 * field of the surface state structure is ignored by the sampler
1101 * unit for 3D textures on some hardware), or if the render buffer
1102 * is a 1D array (since shaders always provide the array index
1103 * coordinate at the Z component to avoid state-dependent
1104 * recompiles when changing the texture target of the
1105 * framebuffer).
1106 */
1107 const GLenum target =
1108 (irb->mt->target == GL_TEXTURE_3D &&
1109 irb->layer_count == 1) ? GL_TEXTURE_2D :
1110 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1111 irb->mt->target;
1112
1113 const struct isl_view view = {
1114 .format = format,
1115 .base_level = irb->mt_level - irb->mt->first_level,
1116 .levels = 1,
1117 .base_array_layer = irb->mt_layer,
1118 .array_len = irb->layer_count,
1119 .swizzle = ISL_SWIZZLE_IDENTITY,
1120 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1121 };
1122
1123 enum isl_aux_usage aux_usage =
1124 intel_miptree_texture_aux_usage(brw, irb->mt, format,
1125 brw->gen9_astc5x5_wa_tex_mask);
1126 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1127 aux_usage = ISL_AUX_USAGE_NONE;
1128
1129 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1130 surf_offset, surf_index,
1131 0);
1132
1133 } else {
1134 emit_null_surface_state(brw, fb, surf_offset);
1135 }
1136 }
1137
1138 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1139 }
1140 }
1141
1142 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1143 .dirty = {
1144 .mesa = _NEW_BUFFERS,
1145 .brw = BRW_NEW_BATCH |
1146 BRW_NEW_AUX_STATE |
1147 BRW_NEW_FS_PROG_DATA,
1148 },
1149 .emit = update_renderbuffer_read_surfaces,
1150 };
1151
1152 static bool
1153 is_depth_texture(struct intel_texture_object *iobj)
1154 {
1155 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1156 return base_format == GL_DEPTH_COMPONENT ||
1157 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1158 }
1159
1160 static void
1161 update_stage_texture_surfaces(struct brw_context *brw,
1162 const struct gl_program *prog,
1163 struct brw_stage_state *stage_state,
1164 bool for_gather, uint32_t plane)
1165 {
1166 if (!prog)
1167 return;
1168
1169 struct gl_context *ctx = &brw->ctx;
1170
1171 uint32_t *surf_offset = stage_state->surf_offset;
1172
1173 /* BRW_NEW_*_PROG_DATA */
1174 if (for_gather)
1175 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1176 else
1177 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1178
1179 unsigned num_samplers = util_last_bit(prog->info.textures_used);
1180 for (unsigned s = 0; s < num_samplers; s++) {
1181 surf_offset[s] = 0;
1182
1183 if (prog->info.textures_used & (1 << s)) {
1184 const unsigned unit = prog->SamplerUnits[s];
1185 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1186 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1187 struct intel_texture_object *iobj = intel_texture_object(obj);
1188
1189 /* _NEW_TEXTURE */
1190 if (!obj)
1191 continue;
1192
1193 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1194 /* A programming note for the sample_c message says:
1195 *
1196 * "The Surface Format of the associated surface must be
1197 * indicated as supporting shadow mapping as indicated in the
1198 * surface format table."
1199 *
1200 * Accessing non-depth textures via a sampler*Shadow type is
1201 * undefined. GLSL 4.50 page 162 says:
1202 *
1203 * "If a shadow texture call is made to a sampler that does not
1204 * represent a depth texture, then results are undefined."
1205 *
1206 * We give them a null surface (zeros) for undefined. We've seen
1207 * GPU hangs with color buffers and sample_c, so we try and avoid
1208 * those with this hack.
1209 */
1210 emit_null_surface_state(brw, NULL, surf_offset + s);
1211 } else {
1212 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1213 used_by_txf, plane);
1214 }
1215 }
1216 }
1217 }
1218
1219
1220 /**
1221 * Construct SURFACE_STATE objects for enabled textures.
1222 */
1223 static void
1224 brw_update_texture_surfaces(struct brw_context *brw)
1225 {
1226 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1227
1228 /* BRW_NEW_VERTEX_PROGRAM */
1229 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1230
1231 /* BRW_NEW_TESS_PROGRAMS */
1232 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1233 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1234
1235 /* BRW_NEW_GEOMETRY_PROGRAM */
1236 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1237
1238 /* BRW_NEW_FRAGMENT_PROGRAM */
1239 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1240
1241 /* _NEW_TEXTURE */
1242 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1243 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1244 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1245 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1246 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1247
1248 /* emit alternate set of surface state for gather. this
1249 * allows the surface format to be overriden for only the
1250 * gather4 messages. */
1251 if (devinfo->gen < 8) {
1252 if (vs && vs->info.uses_texture_gather)
1253 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1254 if (tcs && tcs->info.uses_texture_gather)
1255 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1256 if (tes && tes->info.uses_texture_gather)
1257 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1258 if (gs && gs->info.uses_texture_gather)
1259 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1260 if (fs && fs->info.uses_texture_gather)
1261 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1262 }
1263
1264 if (fs) {
1265 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1266 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1267 }
1268
1269 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1270 }
1271
1272 const struct brw_tracked_state brw_texture_surfaces = {
1273 .dirty = {
1274 .mesa = _NEW_TEXTURE,
1275 .brw = BRW_NEW_BATCH |
1276 BRW_NEW_AUX_STATE |
1277 BRW_NEW_FRAGMENT_PROGRAM |
1278 BRW_NEW_FS_PROG_DATA |
1279 BRW_NEW_GEOMETRY_PROGRAM |
1280 BRW_NEW_GS_PROG_DATA |
1281 BRW_NEW_TESS_PROGRAMS |
1282 BRW_NEW_TCS_PROG_DATA |
1283 BRW_NEW_TES_PROG_DATA |
1284 BRW_NEW_TEXTURE_BUFFER |
1285 BRW_NEW_VERTEX_PROGRAM |
1286 BRW_NEW_VS_PROG_DATA,
1287 },
1288 .emit = brw_update_texture_surfaces,
1289 };
1290
1291 static void
1292 brw_update_cs_texture_surfaces(struct brw_context *brw)
1293 {
1294 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1295
1296 /* BRW_NEW_COMPUTE_PROGRAM */
1297 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1298
1299 /* _NEW_TEXTURE */
1300 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1301
1302 /* emit alternate set of surface state for gather. this
1303 * allows the surface format to be overriden for only the
1304 * gather4 messages.
1305 */
1306 if (devinfo->gen < 8) {
1307 if (cs && cs->info.uses_texture_gather)
1308 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1309 }
1310
1311 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1312 }
1313
1314 const struct brw_tracked_state brw_cs_texture_surfaces = {
1315 .dirty = {
1316 .mesa = _NEW_TEXTURE,
1317 .brw = BRW_NEW_BATCH |
1318 BRW_NEW_COMPUTE_PROGRAM |
1319 BRW_NEW_AUX_STATE,
1320 },
1321 .emit = brw_update_cs_texture_surfaces,
1322 };
1323
1324 static void
1325 upload_buffer_surface(struct brw_context *brw,
1326 struct gl_buffer_binding *binding,
1327 uint32_t *out_offset,
1328 enum isl_format format,
1329 unsigned reloc_flags)
1330 {
1331 if (!binding->BufferObject) {
1332 emit_null_surface_state(brw, NULL, out_offset);
1333 } else {
1334 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1335 if (!binding->AutomaticSize)
1336 size = MIN2(size, binding->Size);
1337
1338 if (size == 0) {
1339 emit_null_surface_state(brw, NULL, out_offset);
1340 return;
1341 }
1342
1343 struct intel_buffer_object *iobj =
1344 intel_buffer_object(binding->BufferObject);
1345 struct brw_bo *bo =
1346 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1347 (reloc_flags & RELOC_WRITE) != 0);
1348
1349 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1350 format, size, 1, reloc_flags);
1351 }
1352 }
1353
1354 void
1355 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1356 struct brw_stage_state *stage_state,
1357 struct brw_stage_prog_data *prog_data)
1358 {
1359 struct gl_context *ctx = &brw->ctx;
1360
1361 if (!prog || (prog->info.num_ubos == 0 &&
1362 prog->info.num_ssbos == 0 &&
1363 prog->info.num_abos == 0))
1364 return;
1365
1366 if (prog->info.num_ubos) {
1367 assert(prog_data->binding_table.ubo_start < BRW_MAX_SURFACES);
1368 uint32_t *ubo_surf_offsets =
1369 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1370
1371 for (int i = 0; i < prog->info.num_ubos; i++) {
1372 struct gl_buffer_binding *binding =
1373 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1374 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1375 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1376 }
1377 }
1378
1379 if (prog->info.num_ssbos || prog->info.num_abos) {
1380 assert(prog_data->binding_table.ssbo_start < BRW_MAX_SURFACES);
1381 uint32_t *ssbo_surf_offsets =
1382 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1383 uint32_t *abo_surf_offsets = ssbo_surf_offsets + prog->info.num_ssbos;
1384
1385 for (int i = 0; i < prog->info.num_abos; i++) {
1386 struct gl_buffer_binding *binding =
1387 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1388 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1389 ISL_FORMAT_RAW, RELOC_WRITE);
1390 }
1391
1392 for (int i = 0; i < prog->info.num_ssbos; i++) {
1393 struct gl_buffer_binding *binding =
1394 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1395
1396 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1397 ISL_FORMAT_RAW, RELOC_WRITE);
1398 }
1399 }
1400
1401 stage_state->push_constants_dirty = true;
1402 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1403 }
1404
1405 static void
1406 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1407 {
1408 struct gl_context *ctx = &brw->ctx;
1409 /* _NEW_PROGRAM */
1410 struct gl_program *prog = ctx->FragmentProgram._Current;
1411
1412 /* BRW_NEW_FS_PROG_DATA */
1413 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1414 }
1415
1416 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1417 .dirty = {
1418 .mesa = _NEW_PROGRAM,
1419 .brw = BRW_NEW_BATCH |
1420 BRW_NEW_FS_PROG_DATA |
1421 BRW_NEW_UNIFORM_BUFFER,
1422 },
1423 .emit = brw_upload_wm_ubo_surfaces,
1424 };
1425
1426 static void
1427 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1428 {
1429 struct gl_context *ctx = &brw->ctx;
1430 /* _NEW_PROGRAM */
1431 struct gl_program *prog =
1432 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1433
1434 /* BRW_NEW_CS_PROG_DATA */
1435 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1436 }
1437
1438 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1439 .dirty = {
1440 .mesa = _NEW_PROGRAM,
1441 .brw = BRW_NEW_BATCH |
1442 BRW_NEW_CS_PROG_DATA |
1443 BRW_NEW_UNIFORM_BUFFER,
1444 },
1445 .emit = brw_upload_cs_ubo_surfaces,
1446 };
1447
1448 static void
1449 brw_upload_cs_image_surfaces(struct brw_context *brw)
1450 {
1451 /* _NEW_PROGRAM */
1452 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1453
1454 if (cp) {
1455 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1456 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1457 brw->cs.base.prog_data);
1458 }
1459 }
1460
1461 const struct brw_tracked_state brw_cs_image_surfaces = {
1462 .dirty = {
1463 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1464 .brw = BRW_NEW_BATCH |
1465 BRW_NEW_CS_PROG_DATA |
1466 BRW_NEW_AUX_STATE |
1467 BRW_NEW_IMAGE_UNITS
1468 },
1469 .emit = brw_upload_cs_image_surfaces,
1470 };
1471
1472 static uint32_t
1473 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1474 {
1475 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1476 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1477 if (access == GL_WRITE_ONLY || access == GL_NONE) {
1478 return hw_format;
1479 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1480 /* Typed surface reads support a very limited subset of the shader
1481 * image formats. Translate it into the closest format the
1482 * hardware supports.
1483 */
1484 return isl_lower_storage_image_format(devinfo, hw_format);
1485 } else {
1486 /* The hardware doesn't actually support a typed format that we can use
1487 * so we have to fall back to untyped read/write messages.
1488 */
1489 return ISL_FORMAT_RAW;
1490 }
1491 }
1492
1493 static void
1494 update_default_image_param(struct brw_context *brw,
1495 struct gl_image_unit *u,
1496 struct brw_image_param *param)
1497 {
1498 memset(param, 0, sizeof(*param));
1499 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1500 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1501 * detailed explanation of these parameters.
1502 */
1503 param->swizzling[0] = 0xff;
1504 param->swizzling[1] = 0xff;
1505 }
1506
1507 static void
1508 update_buffer_image_param(struct brw_context *brw,
1509 struct gl_image_unit *u,
1510 struct brw_image_param *param)
1511 {
1512 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1513 update_default_image_param(brw, u, param);
1514
1515 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1516 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1517 }
1518
1519 static void
1520 update_image_surface(struct brw_context *brw,
1521 struct gl_image_unit *u,
1522 GLenum access,
1523 uint32_t *surf_offset,
1524 struct brw_image_param *param)
1525 {
1526 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1527 struct gl_texture_object *obj = u->TexObj;
1528 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1529 const bool written = (access != GL_READ_ONLY && access != GL_NONE);
1530
1531 if (obj->Target == GL_TEXTURE_BUFFER) {
1532 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1533 _mesa_get_format_bytes(u->_ActualFormat));
1534 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1535 struct brw_bo *const bo = !obj->BufferObject ? NULL :
1536 intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
1537 obj->BufferOffset, buffer_size, written);
1538
1539 brw_emit_buffer_surface_state(
1540 brw, surf_offset, bo, obj->BufferOffset,
1541 format, buffer_size, texel_size,
1542 written ? RELOC_WRITE : 0);
1543
1544 update_buffer_image_param(brw, u, param);
1545
1546 } else {
1547 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1548 struct intel_mipmap_tree *mt = intel_obj->mt;
1549
1550 unsigned base_layer, num_layers;
1551 if (u->Layered) {
1552 if (obj->Target == GL_TEXTURE_3D) {
1553 base_layer = 0;
1554 num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
1555 } else {
1556 assert(obj->Immutable || obj->MinLayer == 0);
1557 base_layer = obj->MinLayer;
1558 num_layers = obj->Immutable ?
1559 obj->NumLayers :
1560 mt->surf.logical_level0_px.array_len;
1561 }
1562 } else {
1563 base_layer = obj->MinLayer + u->_Layer;
1564 num_layers = 1;
1565 }
1566
1567 struct isl_view view = {
1568 .format = format,
1569 .base_level = obj->MinLevel + u->Level,
1570 .levels = 1,
1571 .base_array_layer = base_layer,
1572 .array_len = num_layers,
1573 .swizzle = ISL_SWIZZLE_IDENTITY,
1574 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1575 };
1576
1577 if (format == ISL_FORMAT_RAW) {
1578 brw_emit_buffer_surface_state(
1579 brw, surf_offset, mt->bo, mt->offset,
1580 format, mt->bo->size - mt->offset, 1 /* pitch */,
1581 written ? RELOC_WRITE : 0);
1582
1583 } else {
1584 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1585 assert(!intel_miptree_has_color_unresolved(mt,
1586 view.base_level, 1,
1587 view.base_array_layer,
1588 view.array_len));
1589 brw_emit_surface_state(brw, mt, mt->target, view,
1590 ISL_AUX_USAGE_NONE,
1591 surf_offset, surf_index,
1592 written ? RELOC_WRITE : 0);
1593 }
1594
1595 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1596 }
1597
1598 } else {
1599 emit_null_surface_state(brw, NULL, surf_offset);
1600 update_default_image_param(brw, u, param);
1601 }
1602 }
1603
1604 void
1605 brw_upload_image_surfaces(struct brw_context *brw,
1606 const struct gl_program *prog,
1607 struct brw_stage_state *stage_state,
1608 struct brw_stage_prog_data *prog_data)
1609 {
1610 assert(prog);
1611 struct gl_context *ctx = &brw->ctx;
1612
1613 if (prog->info.num_images) {
1614 for (unsigned i = 0; i < prog->info.num_images; i++) {
1615 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1616 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1617
1618 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1619 &stage_state->surf_offset[surf_idx],
1620 &stage_state->image_param[i]);
1621 }
1622
1623 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1624 /* This may have changed the image metadata dependent on the context
1625 * image unit state and passed to the program as uniforms, make sure
1626 * that push and pull constants are reuploaded.
1627 */
1628 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1629 }
1630 }
1631
1632 static void
1633 brw_upload_wm_image_surfaces(struct brw_context *brw)
1634 {
1635 /* BRW_NEW_FRAGMENT_PROGRAM */
1636 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1637
1638 if (wm) {
1639 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1640 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1641 brw->wm.base.prog_data);
1642 }
1643 }
1644
1645 const struct brw_tracked_state brw_wm_image_surfaces = {
1646 .dirty = {
1647 .mesa = _NEW_TEXTURE,
1648 .brw = BRW_NEW_BATCH |
1649 BRW_NEW_AUX_STATE |
1650 BRW_NEW_FRAGMENT_PROGRAM |
1651 BRW_NEW_FS_PROG_DATA |
1652 BRW_NEW_IMAGE_UNITS
1653 },
1654 .emit = brw_upload_wm_image_surfaces,
1655 };
1656
1657 static void
1658 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1659 {
1660 struct gl_context *ctx = &brw->ctx;
1661 /* _NEW_PROGRAM */
1662 struct gl_program *prog =
1663 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1664 /* BRW_NEW_CS_PROG_DATA */
1665 const struct brw_cs_prog_data *cs_prog_data =
1666 brw_cs_prog_data(brw->cs.base.prog_data);
1667
1668 if (prog && cs_prog_data->uses_num_work_groups) {
1669 const unsigned surf_idx =
1670 cs_prog_data->binding_table.work_groups_start;
1671 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1672 struct brw_bo *bo;
1673 uint32_t bo_offset;
1674
1675 if (brw->compute.num_work_groups_bo == NULL) {
1676 bo = NULL;
1677 brw_upload_data(&brw->upload,
1678 (void *)brw->compute.num_work_groups,
1679 3 * sizeof(GLuint),
1680 sizeof(GLuint),
1681 &bo,
1682 &bo_offset);
1683 } else {
1684 bo = brw->compute.num_work_groups_bo;
1685 bo_offset = brw->compute.num_work_groups_offset;
1686 }
1687
1688 brw_emit_buffer_surface_state(brw, surf_offset,
1689 bo, bo_offset,
1690 ISL_FORMAT_RAW,
1691 3 * sizeof(GLuint), 1,
1692 RELOC_WRITE);
1693
1694 /* The state buffer now holds a reference to our upload, drop ours. */
1695 if (bo != brw->compute.num_work_groups_bo)
1696 brw_bo_unreference(bo);
1697
1698 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1699 }
1700 }
1701
1702 const struct brw_tracked_state brw_cs_work_groups_surface = {
1703 .dirty = {
1704 .brw = BRW_NEW_CS_PROG_DATA |
1705 BRW_NEW_CS_WORK_GROUPS
1706 },
1707 .emit = brw_upload_cs_work_groups_surface,
1708 };