i965: Workaround the gen9 hw astc5x5 sampler bug
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint32_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color =
170 intel_miptree_get_clear_color(devinfo, mt, view.format,
171 view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
172 &clear_bo, &clear_offset);
173 }
174
175 void *state = brw_state_batch(brw,
176 brw->isl_dev.ss.size,
177 brw->isl_dev.ss.align,
178 surf_offset);
179
180 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
181 .address = brw_state_reloc(&brw->batch,
182 *surf_offset + brw->isl_dev.ss.addr_offset,
183 mt->bo, offset, reloc_flags),
184 .aux_surf = aux_surf, .aux_usage = aux_usage,
185 .aux_address = aux_offset,
186 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
187 .clear_color = clear_color,
188 .use_clear_address = clear_bo != NULL,
189 .clear_address = clear_offset,
190 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
191 if (aux_surf) {
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
197 *
198 * FIXME: move to the point of assignment.
199 */
200 assert((aux_offset & 0xfff) == 0);
201
202 if (devinfo->gen >= 8) {
203 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
204 *aux_addr = brw_state_reloc(&brw->batch,
205 *surf_offset +
206 brw->isl_dev.ss.aux_addr_offset,
207 aux_bo, *aux_addr,
208 reloc_flags);
209 } else {
210 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216
217 }
218 }
219
220 if (clear_bo != NULL) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset & 0x3f) == 0);
223 uint64_t *clear_address =
224 state + brw->isl_dev.ss.clear_color_state_offset;
225 *clear_address = brw_state_reloc(&brw->batch,
226 *surf_offset +
227 brw->isl_dev.ss.clear_color_state_offset,
228 clear_bo, *clear_address, reloc_flags);
229 }
230 }
231
232 static uint32_t
233 gen6_update_renderbuffer_surface(struct brw_context *brw,
234 struct gl_renderbuffer *rb,
235 unsigned unit,
236 uint32_t surf_index)
237 {
238 struct gl_context *ctx = &brw->ctx;
239 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
240 struct intel_mipmap_tree *mt = irb->mt;
241
242 assert(brw_render_target_supported(brw, rb));
243
244 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
245 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
246 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
247 __func__, _mesa_get_format_name(rb_format));
248 }
249 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
250
251 struct isl_view view = {
252 .format = isl_format,
253 .base_level = irb->mt_level - irb->mt->first_level,
254 .levels = 1,
255 .base_array_layer = irb->mt_layer,
256 .array_len = MAX2(irb->layer_count, 1),
257 .swizzle = ISL_SWIZZLE_IDENTITY,
258 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
259 };
260
261 uint32_t offset;
262 brw_emit_surface_state(brw, mt, mt->target, view,
263 brw->draw_aux_usage[unit],
264 &offset, surf_index,
265 RELOC_WRITE);
266 return offset;
267 }
268
269 GLuint
270 translate_tex_target(GLenum target)
271 {
272 switch (target) {
273 case GL_TEXTURE_1D:
274 case GL_TEXTURE_1D_ARRAY_EXT:
275 return BRW_SURFACE_1D;
276
277 case GL_TEXTURE_RECTANGLE_NV:
278 return BRW_SURFACE_2D;
279
280 case GL_TEXTURE_2D:
281 case GL_TEXTURE_2D_ARRAY_EXT:
282 case GL_TEXTURE_EXTERNAL_OES:
283 case GL_TEXTURE_2D_MULTISAMPLE:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
285 return BRW_SURFACE_2D;
286
287 case GL_TEXTURE_3D:
288 return BRW_SURFACE_3D;
289
290 case GL_TEXTURE_CUBE_MAP:
291 case GL_TEXTURE_CUBE_MAP_ARRAY:
292 return BRW_SURFACE_CUBE;
293
294 default:
295 unreachable("not reached");
296 }
297 }
298
299 uint32_t
300 brw_get_surface_tiling_bits(enum isl_tiling tiling)
301 {
302 switch (tiling) {
303 case ISL_TILING_X:
304 return BRW_SURFACE_TILED;
305 case ISL_TILING_Y0:
306 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
307 default:
308 return 0;
309 }
310 }
311
312
313 uint32_t
314 brw_get_surface_num_multisamples(unsigned num_samples)
315 {
316 if (num_samples > 1)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4;
318 else
319 return BRW_SURFACE_MULTISAMPLECOUNT_1;
320 }
321
322 /**
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
324 * swizzling.
325 */
326 int
327 brw_get_texture_swizzle(const struct gl_context *ctx,
328 const struct gl_texture_object *t)
329 {
330 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
331
332 int swizzles[SWIZZLE_NIL + 1] = {
333 SWIZZLE_X,
334 SWIZZLE_Y,
335 SWIZZLE_Z,
336 SWIZZLE_W,
337 SWIZZLE_ZERO,
338 SWIZZLE_ONE,
339 SWIZZLE_NIL
340 };
341
342 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
343 img->_BaseFormat == GL_DEPTH_STENCIL) {
344 GLenum depth_mode = t->DepthMode;
345
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
349 */
350 if (_mesa_is_gles3(ctx) &&
351 img->InternalFormat != GL_DEPTH_COMPONENT &&
352 img->InternalFormat != GL_DEPTH_STENCIL) {
353 depth_mode = GL_RED;
354 }
355
356 switch (depth_mode) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 swizzles[3] = SWIZZLE_X;
362 break;
363 case GL_LUMINANCE:
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 break;
369 case GL_INTENSITY:
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_X;
374 break;
375 case GL_RED:
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_ZERO;
378 swizzles[2] = SWIZZLE_ZERO;
379 swizzles[3] = SWIZZLE_ONE;
380 break;
381 }
382 }
383
384 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
385
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
391 */
392 switch (img->_BaseFormat) {
393 case GL_ALPHA:
394 swizzles[0] = SWIZZLE_ZERO;
395 swizzles[1] = SWIZZLE_ZERO;
396 swizzles[2] = SWIZZLE_ZERO;
397 break;
398 case GL_LUMINANCE:
399 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
400 swizzles[0] = SWIZZLE_X;
401 swizzles[1] = SWIZZLE_X;
402 swizzles[2] = SWIZZLE_X;
403 swizzles[3] = SWIZZLE_ONE;
404 }
405 break;
406 case GL_LUMINANCE_ALPHA:
407 if (datatype == GL_SIGNED_NORMALIZED) {
408 swizzles[0] = SWIZZLE_X;
409 swizzles[1] = SWIZZLE_X;
410 swizzles[2] = SWIZZLE_X;
411 swizzles[3] = SWIZZLE_W;
412 }
413 break;
414 case GL_INTENSITY:
415 if (datatype == GL_SIGNED_NORMALIZED) {
416 swizzles[0] = SWIZZLE_X;
417 swizzles[1] = SWIZZLE_X;
418 swizzles[2] = SWIZZLE_X;
419 swizzles[3] = SWIZZLE_X;
420 }
421 break;
422 case GL_RED:
423 case GL_RG:
424 case GL_RGB:
425 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
426 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
427 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
428 swizzles[3] = SWIZZLE_ONE;
429 break;
430 }
431
432 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
433 swizzles[GET_SWZ(t->_Swizzle, 1)],
434 swizzles[GET_SWZ(t->_Swizzle, 2)],
435 swizzles[GET_SWZ(t->_Swizzle, 3)]);
436 }
437
438 /**
439 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
440 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
441 *
442 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
443 * 0 1 2 3 4 5
444 * 4 5 6 7 0 1
445 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
446 *
447 * which is simply adding 4 then modding by 8 (or anding with 7).
448 *
449 * We then may need to apply workarounds for textureGather hardware bugs.
450 */
451 static unsigned
452 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
453 {
454 unsigned scs = (swizzle + 4) & 7;
455
456 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
457 }
458
459 static void brw_update_texture_surface(struct gl_context *ctx,
460 unsigned unit,
461 uint32_t *surf_offset,
462 bool for_gather,
463 bool for_txf,
464 uint32_t plane)
465 {
466 struct brw_context *brw = brw_context(ctx);
467 const struct gen_device_info *devinfo = &brw->screen->devinfo;
468 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
469
470 if (obj->Target == GL_TEXTURE_BUFFER) {
471 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
472
473 } else {
474 struct intel_texture_object *intel_obj = intel_texture_object(obj);
475 struct intel_mipmap_tree *mt = intel_obj->mt;
476
477 if (plane > 0) {
478 if (mt->plane[plane - 1] == NULL)
479 return;
480 mt = mt->plane[plane - 1];
481 }
482
483 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
484 /* If this is a view with restricted NumLayers, then our effective depth
485 * is not just the miptree depth.
486 */
487 unsigned view_num_layers;
488 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
489 view_num_layers = obj->NumLayers;
490 } else {
491 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
492 mt->surf.logical_level0_px.depth :
493 mt->surf.logical_level0_px.array_len;
494 }
495
496 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
497 * texturing functions that return a float, as our code generation always
498 * selects the .x channel (which would always be 0).
499 */
500 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
501 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
502 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
503 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
504 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
505 brw_get_texture_swizzle(&brw->ctx, obj));
506
507 mesa_format mesa_fmt;
508 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
509 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
510 /* The format from intel_obj may be a combined depth stencil format
511 * when we just want depth. Pull it from the miptree instead. This
512 * is safe because texture views aren't allowed on depth/stencil.
513 */
514 mesa_fmt = mt->format;
515 } else if (mt->etc_format != MESA_FORMAT_NONE) {
516 mesa_fmt = mt->format;
517 } else if (plane > 0) {
518 mesa_fmt = mt->format;
519 } else {
520 mesa_fmt = intel_obj->_Format;
521 }
522 enum isl_format format = translate_tex_format(brw, mesa_fmt,
523 for_txf ? GL_DECODE_EXT :
524 sampler->sRGBDecode);
525
526 /* Implement gen6 and gen7 gather work-around */
527 bool need_green_to_blue = false;
528 if (for_gather) {
529 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
530 format == ISL_FORMAT_R32G32_SINT ||
531 format == ISL_FORMAT_R32G32_UINT)) {
532 format = ISL_FORMAT_R32G32_FLOAT_LD;
533 need_green_to_blue = devinfo->is_haswell;
534 } else if (devinfo->gen == 6) {
535 /* Sandybridge's gather4 message is broken for integer formats.
536 * To work around this, we pretend the surface is UNORM for
537 * 8 or 16-bit formats, and emit shader instructions to recover
538 * the real INT/UINT value. For 32-bit formats, we pretend
539 * the surface is FLOAT, and simply reinterpret the resulting
540 * bits.
541 */
542 switch (format) {
543 case ISL_FORMAT_R8_SINT:
544 case ISL_FORMAT_R8_UINT:
545 format = ISL_FORMAT_R8_UNORM;
546 break;
547
548 case ISL_FORMAT_R16_SINT:
549 case ISL_FORMAT_R16_UINT:
550 format = ISL_FORMAT_R16_UNORM;
551 break;
552
553 case ISL_FORMAT_R32_SINT:
554 case ISL_FORMAT_R32_UINT:
555 format = ISL_FORMAT_R32_FLOAT;
556 break;
557
558 default:
559 break;
560 }
561 }
562 }
563
564 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
565 if (devinfo->gen <= 7) {
566 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
567 mt = mt->r8stencil_mt;
568 } else {
569 mt = mt->stencil_mt;
570 }
571 format = ISL_FORMAT_R8_UINT;
572 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
573 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
574 mt = mt->r8stencil_mt;
575 format = ISL_FORMAT_R8_UINT;
576 }
577
578 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
579
580 struct isl_view view = {
581 .format = format,
582 .base_level = obj->MinLevel + obj->BaseLevel,
583 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
584 .base_array_layer = obj->MinLayer,
585 .array_len = view_num_layers,
586 .swizzle = {
587 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
588 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
589 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
590 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
591 },
592 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
593 };
594
595 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
596 * code. The actual surface swizzle should be identity.
597 */
598 if (devinfo->gen <= 7 && !devinfo->is_haswell)
599 view.swizzle = ISL_SWIZZLE_IDENTITY;
600
601 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
602 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
603 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
604
605 enum isl_aux_usage aux_usage =
606 intel_miptree_texture_aux_usage(brw, mt, format,
607 brw->gen9_astc5x5_wa_tex_mask);
608
609 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
610 surf_offset, surf_index,
611 0);
612 }
613 }
614
615 void
616 brw_emit_buffer_surface_state(struct brw_context *brw,
617 uint32_t *out_offset,
618 struct brw_bo *bo,
619 unsigned buffer_offset,
620 unsigned surface_format,
621 unsigned buffer_size,
622 unsigned pitch,
623 unsigned reloc_flags)
624 {
625 const struct gen_device_info *devinfo = &brw->screen->devinfo;
626 uint32_t *dw = brw_state_batch(brw,
627 brw->isl_dev.ss.size,
628 brw->isl_dev.ss.align,
629 out_offset);
630
631 isl_buffer_fill_state(&brw->isl_dev, dw,
632 .address = !bo ? buffer_offset :
633 brw_state_reloc(&brw->batch,
634 *out_offset + brw->isl_dev.ss.addr_offset,
635 bo, buffer_offset,
636 reloc_flags),
637 .size = buffer_size,
638 .format = surface_format,
639 .stride = pitch,
640 .mocs = brw_get_bo_mocs(devinfo, bo));
641 }
642
643 static unsigned
644 buffer_texture_range_size(struct brw_context *brw,
645 struct gl_texture_object *obj)
646 {
647 assert(obj->Target == GL_TEXTURE_BUFFER);
648 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
649 const unsigned buffer_size = (!obj->BufferObject ? 0 :
650 obj->BufferObject->Size);
651 const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
652
653 /* The ARB_texture_buffer_specification says:
654 *
655 * "The number of texels in the buffer texture's texel array is given by
656 *
657 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
658 *
659 * where <buffer_size> is the size of the buffer object, in basic
660 * machine units and <components> and <base_type> are the element count
661 * and base data type for elements, as specified in Table X.1. The
662 * number of texels in the texel array is then clamped to the
663 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
664 *
665 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
666 * so that when ISL divides by stride to obtain the number of texels, that
667 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
668 */
669 return MIN3((unsigned)obj->BufferSize,
670 buffer_size - buffer_offset,
671 brw->ctx.Const.MaxTextureBufferSize * texel_size);
672 }
673
674 void
675 brw_update_buffer_texture_surface(struct gl_context *ctx,
676 unsigned unit,
677 uint32_t *surf_offset)
678 {
679 struct brw_context *brw = brw_context(ctx);
680 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
681 struct intel_buffer_object *intel_obj =
682 intel_buffer_object(tObj->BufferObject);
683 const unsigned size = buffer_texture_range_size(brw, tObj);
684 struct brw_bo *bo = NULL;
685 mesa_format format = tObj->_BufferObjectFormat;
686 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
687 int texel_size = _mesa_get_format_bytes(format);
688
689 if (intel_obj)
690 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
691 false);
692
693 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
694 _mesa_problem(NULL, "bad format %s for texture buffer\n",
695 _mesa_get_format_name(format));
696 }
697
698 brw_emit_buffer_surface_state(brw, surf_offset, bo,
699 tObj->BufferOffset,
700 isl_format,
701 size,
702 texel_size,
703 0);
704 }
705
706 /**
707 * Set up a binding table entry for use by stream output logic (transform
708 * feedback).
709 *
710 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
711 */
712 void
713 brw_update_sol_surface(struct brw_context *brw,
714 struct gl_buffer_object *buffer_obj,
715 uint32_t *out_offset, unsigned num_vector_components,
716 unsigned stride_dwords, unsigned offset_dwords)
717 {
718 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
719 uint32_t offset_bytes = 4 * offset_dwords;
720 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
721 offset_bytes,
722 buffer_obj->Size - offset_bytes,
723 true);
724 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
725 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
726 size_t size_dwords = buffer_obj->Size / 4;
727 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
728
729 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
730 * too big to map using a single binding table entry?
731 */
732 assert((size_dwords - offset_dwords) / stride_dwords
733 <= BRW_MAX_NUM_BUFFER_ENTRIES);
734
735 if (size_dwords > offset_dwords + num_vector_components) {
736 /* There is room for at least 1 transform feedback output in the buffer.
737 * Compute the number of additional transform feedback outputs the
738 * buffer has room for.
739 */
740 buffer_size_minus_1 =
741 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
742 } else {
743 /* There isn't even room for a single transform feedback output in the
744 * buffer. We can't configure the binding table entry to prevent output
745 * entirely; we'll have to rely on the geometry shader to detect
746 * overflow. But to minimize the damage in case of a bug, set up the
747 * binding table entry to just allow a single output.
748 */
749 buffer_size_minus_1 = 0;
750 }
751 width = buffer_size_minus_1 & 0x7f;
752 height = (buffer_size_minus_1 & 0xfff80) >> 7;
753 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
754
755 switch (num_vector_components) {
756 case 1:
757 surface_format = ISL_FORMAT_R32_FLOAT;
758 break;
759 case 2:
760 surface_format = ISL_FORMAT_R32G32_FLOAT;
761 break;
762 case 3:
763 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
764 break;
765 case 4:
766 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
767 break;
768 default:
769 unreachable("Invalid vector size for transform feedback output");
770 }
771
772 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
773 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
774 surface_format << BRW_SURFACE_FORMAT_SHIFT |
775 BRW_SURFACE_RC_READ_WRITE;
776 surf[1] = brw_state_reloc(&brw->batch,
777 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
778 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
779 height << BRW_SURFACE_HEIGHT_SHIFT);
780 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
781 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
782 surf[4] = 0;
783 surf[5] = 0;
784 }
785
786 /* Creates a new WM constant buffer reflecting the current fragment program's
787 * constants, if needed by the fragment program.
788 *
789 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
790 * state atom.
791 */
792 static void
793 brw_upload_wm_pull_constants(struct brw_context *brw)
794 {
795 struct brw_stage_state *stage_state = &brw->wm.base;
796 /* BRW_NEW_FRAGMENT_PROGRAM */
797 struct brw_program *fp =
798 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
799
800 /* BRW_NEW_FS_PROG_DATA */
801 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
802
803 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
804 /* _NEW_PROGRAM_CONSTANTS */
805 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
806 stage_state, prog_data);
807 }
808
809 const struct brw_tracked_state brw_wm_pull_constants = {
810 .dirty = {
811 .mesa = _NEW_PROGRAM_CONSTANTS,
812 .brw = BRW_NEW_BATCH |
813 BRW_NEW_FRAGMENT_PROGRAM |
814 BRW_NEW_FS_PROG_DATA,
815 },
816 .emit = brw_upload_wm_pull_constants,
817 };
818
819 /**
820 * Creates a null renderbuffer surface.
821 *
822 * This is used when the shader doesn't write to any color output. An FB
823 * write to target 0 will still be emitted, because that's how the thread is
824 * terminated (and computed depth is returned), so we need to have the
825 * hardware discard the target 0 color output..
826 */
827 static void
828 emit_null_surface_state(struct brw_context *brw,
829 const struct gl_framebuffer *fb,
830 uint32_t *out_offset)
831 {
832 const struct gen_device_info *devinfo = &brw->screen->devinfo;
833 uint32_t *surf = brw_state_batch(brw,
834 brw->isl_dev.ss.size,
835 brw->isl_dev.ss.align,
836 out_offset);
837
838 /* Use the fb dimensions or 1x1x1 */
839 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
840 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
841 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
842
843 if (devinfo->gen != 6 || samples <= 1) {
844 isl_null_fill_state(&brw->isl_dev, surf,
845 isl_extent3d(width, height, 1));
846 return;
847 }
848
849 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
850 * So work around this problem by rendering into dummy color buffer.
851 *
852 * To decrease the amount of memory needed by the workaround buffer, we
853 * set its pitch to 128 bytes (the width of a Y tile). This means that
854 * the amount of memory needed for the workaround buffer is
855 * (width_in_tiles + height_in_tiles - 1) tiles.
856 *
857 * Note that since the workaround buffer will be interpreted by the
858 * hardware as an interleaved multisampled buffer, we need to compute
859 * width_in_tiles and height_in_tiles by dividing the width and height
860 * by 16 rather than the normal Y-tile size of 32.
861 */
862 unsigned width_in_tiles = ALIGN(width, 16) / 16;
863 unsigned height_in_tiles = ALIGN(height, 16) / 16;
864 unsigned pitch_minus_1 = 127;
865 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
866 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
867 size_needed);
868
869 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
870 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
871 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
872 brw->wm.multisampled_null_render_target_bo,
873 0, RELOC_WRITE);
874
875 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
876 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
877
878 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
879 * Notes):
880 *
881 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
882 */
883 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
884 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
885 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
886 surf[5] = 0;
887 }
888
889 /**
890 * Sets up a surface state structure to point at the given region.
891 * While it is only used for the front/back buffer currently, it should be
892 * usable for further buffers when doing ARB_draw_buffer support.
893 */
894 static uint32_t
895 gen4_update_renderbuffer_surface(struct brw_context *brw,
896 struct gl_renderbuffer *rb,
897 unsigned unit,
898 uint32_t surf_index)
899 {
900 const struct gen_device_info *devinfo = &brw->screen->devinfo;
901 struct gl_context *ctx = &brw->ctx;
902 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
903 struct intel_mipmap_tree *mt = irb->mt;
904 uint32_t *surf;
905 uint32_t tile_x, tile_y;
906 enum isl_format format;
907 uint32_t offset;
908 /* _NEW_BUFFERS */
909 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
910 /* BRW_NEW_FS_PROG_DATA */
911
912 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
913 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
914
915 if (tile_x != 0 || tile_y != 0) {
916 /* Original gen4 hardware couldn't draw to a non-tile-aligned
917 * destination in a miptree unless you actually setup your renderbuffer
918 * as a miptree and used the fragile lod/array_index/etc. controls to
919 * select the image. So, instead, we just make a new single-level
920 * miptree and render into that.
921 */
922 intel_renderbuffer_move_to_temp(brw, irb, false);
923 assert(irb->align_wa_mt);
924 mt = irb->align_wa_mt;
925 }
926 }
927
928 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
929
930 format = brw->mesa_to_isl_render_format[rb_format];
931 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
932 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
933 __func__, _mesa_get_format_name(rb_format));
934 }
935
936 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
937 format << BRW_SURFACE_FORMAT_SHIFT);
938
939 /* reloc */
940 assert(mt->offset % mt->cpp == 0);
941 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
942 mt->offset +
943 intel_renderbuffer_get_tile_offsets(irb,
944 &tile_x,
945 &tile_y),
946 RELOC_WRITE);
947
948 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
949 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
950
951 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
952 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
953
954 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
955
956 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
957 /* Note that the low bits of these fields are missing, so
958 * there's the possibility of getting in trouble.
959 */
960 assert(tile_x % 4 == 0);
961 assert(tile_y % 2 == 0);
962 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
963 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
964 (mt->surf.image_alignment_el.height == 4 ?
965 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
966
967 if (devinfo->gen < 6) {
968 /* _NEW_COLOR */
969 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
970 (ctx->Color.BlendEnabled & (1 << unit)))
971 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
972
973 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
974 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
975 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
976 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
977 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
978 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
979
980 /* As mentioned above, disable writes to the alpha component when the
981 * renderbuffer is XRGB.
982 */
983 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
984 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
985 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
986 }
987 }
988
989 return offset;
990 }
991
992 static void
993 update_renderbuffer_surfaces(struct brw_context *brw)
994 {
995 const struct gen_device_info *devinfo = &brw->screen->devinfo;
996 const struct gl_context *ctx = &brw->ctx;
997
998 /* _NEW_BUFFERS | _NEW_COLOR */
999 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1000
1001 /* Render targets always start at binding table index 0. */
1002 const unsigned rt_start = 0;
1003
1004 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1005
1006 /* Update surfaces for drawing buffers */
1007 if (fb->_NumColorDrawBuffers >= 1) {
1008 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1009 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1010
1011 if (intel_renderbuffer(rb)) {
1012 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1013 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1014 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1015 } else {
1016 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1017 }
1018 }
1019 } else {
1020 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1021 }
1022
1023 /* The PIPE_CONTROL command description says:
1024 *
1025 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1026 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1027 * Target Cache Flush by enabling this bit. When render target flush
1028 * is set due to new association of BTI, PS Scoreboard Stall bit must
1029 * be set in this packet."
1030 */
1031 if (devinfo->gen >= 11) {
1032 brw_emit_pipe_control_flush(brw,
1033 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1034 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1035 }
1036
1037 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1038 }
1039
1040 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1041 .dirty = {
1042 .mesa = _NEW_BUFFERS |
1043 _NEW_COLOR,
1044 .brw = BRW_NEW_BATCH,
1045 },
1046 .emit = update_renderbuffer_surfaces,
1047 };
1048
1049 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1050 .dirty = {
1051 .mesa = _NEW_BUFFERS,
1052 .brw = BRW_NEW_BATCH |
1053 BRW_NEW_AUX_STATE,
1054 },
1055 .emit = update_renderbuffer_surfaces,
1056 };
1057
1058 static void
1059 update_renderbuffer_read_surfaces(struct brw_context *brw)
1060 {
1061 const struct gl_context *ctx = &brw->ctx;
1062
1063 /* BRW_NEW_FS_PROG_DATA */
1064 const struct brw_wm_prog_data *wm_prog_data =
1065 brw_wm_prog_data(brw->wm.base.prog_data);
1066
1067 if (wm_prog_data->has_render_target_reads &&
1068 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1069 /* _NEW_BUFFERS */
1070 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1071
1072 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1073 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1074 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1075 const unsigned surf_index =
1076 wm_prog_data->binding_table.render_target_read_start + i;
1077 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1078
1079 if (irb) {
1080 const enum isl_format format = brw->mesa_to_isl_render_format[
1081 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1082 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1083 format));
1084
1085 /* Override the target of the texture if the render buffer is a
1086 * single slice of a 3D texture (since the minimum array element
1087 * field of the surface state structure is ignored by the sampler
1088 * unit for 3D textures on some hardware), or if the render buffer
1089 * is a 1D array (since shaders always provide the array index
1090 * coordinate at the Z component to avoid state-dependent
1091 * recompiles when changing the texture target of the
1092 * framebuffer).
1093 */
1094 const GLenum target =
1095 (irb->mt->target == GL_TEXTURE_3D &&
1096 irb->layer_count == 1) ? GL_TEXTURE_2D :
1097 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1098 irb->mt->target;
1099
1100 const struct isl_view view = {
1101 .format = format,
1102 .base_level = irb->mt_level - irb->mt->first_level,
1103 .levels = 1,
1104 .base_array_layer = irb->mt_layer,
1105 .array_len = irb->layer_count,
1106 .swizzle = ISL_SWIZZLE_IDENTITY,
1107 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1108 };
1109
1110 enum isl_aux_usage aux_usage =
1111 intel_miptree_texture_aux_usage(brw, irb->mt, format,
1112 brw->gen9_astc5x5_wa_tex_mask);
1113 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1114 aux_usage = ISL_AUX_USAGE_NONE;
1115
1116 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1117 surf_offset, surf_index,
1118 0);
1119
1120 } else {
1121 emit_null_surface_state(brw, fb, surf_offset);
1122 }
1123 }
1124
1125 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1126 }
1127 }
1128
1129 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1130 .dirty = {
1131 .mesa = _NEW_BUFFERS,
1132 .brw = BRW_NEW_BATCH |
1133 BRW_NEW_AUX_STATE |
1134 BRW_NEW_FS_PROG_DATA,
1135 },
1136 .emit = update_renderbuffer_read_surfaces,
1137 };
1138
1139 static bool
1140 is_depth_texture(struct intel_texture_object *iobj)
1141 {
1142 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1143 return base_format == GL_DEPTH_COMPONENT ||
1144 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1145 }
1146
1147 static void
1148 update_stage_texture_surfaces(struct brw_context *brw,
1149 const struct gl_program *prog,
1150 struct brw_stage_state *stage_state,
1151 bool for_gather, uint32_t plane)
1152 {
1153 if (!prog)
1154 return;
1155
1156 struct gl_context *ctx = &brw->ctx;
1157
1158 uint32_t *surf_offset = stage_state->surf_offset;
1159
1160 /* BRW_NEW_*_PROG_DATA */
1161 if (for_gather)
1162 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1163 else
1164 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1165
1166 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1167 for (unsigned s = 0; s < num_samplers; s++) {
1168 surf_offset[s] = 0;
1169
1170 if (prog->SamplersUsed & (1 << s)) {
1171 const unsigned unit = prog->SamplerUnits[s];
1172 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1173 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1174 struct intel_texture_object *iobj = intel_texture_object(obj);
1175
1176 /* _NEW_TEXTURE */
1177 if (!obj)
1178 continue;
1179
1180 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1181 /* A programming note for the sample_c message says:
1182 *
1183 * "The Surface Format of the associated surface must be
1184 * indicated as supporting shadow mapping as indicated in the
1185 * surface format table."
1186 *
1187 * Accessing non-depth textures via a sampler*Shadow type is
1188 * undefined. GLSL 4.50 page 162 says:
1189 *
1190 * "If a shadow texture call is made to a sampler that does not
1191 * represent a depth texture, then results are undefined."
1192 *
1193 * We give them a null surface (zeros) for undefined. We've seen
1194 * GPU hangs with color buffers and sample_c, so we try and avoid
1195 * those with this hack.
1196 */
1197 emit_null_surface_state(brw, NULL, surf_offset + s);
1198 } else {
1199 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1200 used_by_txf, plane);
1201 }
1202 }
1203 }
1204 }
1205
1206
1207 /**
1208 * Construct SURFACE_STATE objects for enabled textures.
1209 */
1210 static void
1211 brw_update_texture_surfaces(struct brw_context *brw)
1212 {
1213 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1214
1215 /* BRW_NEW_VERTEX_PROGRAM */
1216 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1217
1218 /* BRW_NEW_TESS_PROGRAMS */
1219 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1220 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1221
1222 /* BRW_NEW_GEOMETRY_PROGRAM */
1223 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1224
1225 /* BRW_NEW_FRAGMENT_PROGRAM */
1226 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1227
1228 /* _NEW_TEXTURE */
1229 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1230 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1231 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1232 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1233 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1234
1235 /* emit alternate set of surface state for gather. this
1236 * allows the surface format to be overriden for only the
1237 * gather4 messages. */
1238 if (devinfo->gen < 8) {
1239 if (vs && vs->info.uses_texture_gather)
1240 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1241 if (tcs && tcs->info.uses_texture_gather)
1242 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1243 if (tes && tes->info.uses_texture_gather)
1244 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1245 if (gs && gs->info.uses_texture_gather)
1246 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1247 if (fs && fs->info.uses_texture_gather)
1248 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1249 }
1250
1251 if (fs) {
1252 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1253 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1254 }
1255
1256 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1257 }
1258
1259 const struct brw_tracked_state brw_texture_surfaces = {
1260 .dirty = {
1261 .mesa = _NEW_TEXTURE,
1262 .brw = BRW_NEW_BATCH |
1263 BRW_NEW_AUX_STATE |
1264 BRW_NEW_FRAGMENT_PROGRAM |
1265 BRW_NEW_FS_PROG_DATA |
1266 BRW_NEW_GEOMETRY_PROGRAM |
1267 BRW_NEW_GS_PROG_DATA |
1268 BRW_NEW_TESS_PROGRAMS |
1269 BRW_NEW_TCS_PROG_DATA |
1270 BRW_NEW_TES_PROG_DATA |
1271 BRW_NEW_TEXTURE_BUFFER |
1272 BRW_NEW_VERTEX_PROGRAM |
1273 BRW_NEW_VS_PROG_DATA,
1274 },
1275 .emit = brw_update_texture_surfaces,
1276 };
1277
1278 static void
1279 brw_update_cs_texture_surfaces(struct brw_context *brw)
1280 {
1281 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1282
1283 /* BRW_NEW_COMPUTE_PROGRAM */
1284 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1285
1286 /* _NEW_TEXTURE */
1287 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1288
1289 /* emit alternate set of surface state for gather. this
1290 * allows the surface format to be overriden for only the
1291 * gather4 messages.
1292 */
1293 if (devinfo->gen < 8) {
1294 if (cs && cs->info.uses_texture_gather)
1295 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1296 }
1297
1298 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1299 }
1300
1301 const struct brw_tracked_state brw_cs_texture_surfaces = {
1302 .dirty = {
1303 .mesa = _NEW_TEXTURE,
1304 .brw = BRW_NEW_BATCH |
1305 BRW_NEW_COMPUTE_PROGRAM |
1306 BRW_NEW_AUX_STATE,
1307 },
1308 .emit = brw_update_cs_texture_surfaces,
1309 };
1310
1311 static void
1312 upload_buffer_surface(struct brw_context *brw,
1313 struct gl_buffer_binding *binding,
1314 uint32_t *out_offset,
1315 enum isl_format format,
1316 unsigned reloc_flags)
1317 {
1318 struct gl_context *ctx = &brw->ctx;
1319
1320 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1321 emit_null_surface_state(brw, NULL, out_offset);
1322 } else {
1323 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1324 if (!binding->AutomaticSize)
1325 size = MIN2(size, binding->Size);
1326
1327 if (size == 0) {
1328 emit_null_surface_state(brw, NULL, out_offset);
1329 return;
1330 }
1331
1332 struct intel_buffer_object *iobj =
1333 intel_buffer_object(binding->BufferObject);
1334 struct brw_bo *bo =
1335 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1336 (reloc_flags & RELOC_WRITE) != 0);
1337
1338 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1339 format, size, 1, reloc_flags);
1340 }
1341 }
1342
1343 void
1344 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1345 struct brw_stage_state *stage_state,
1346 struct brw_stage_prog_data *prog_data)
1347 {
1348 struct gl_context *ctx = &brw->ctx;
1349
1350 if (!prog || (prog->info.num_ubos == 0 &&
1351 prog->info.num_ssbos == 0 &&
1352 prog->info.num_abos == 0))
1353 return;
1354
1355 uint32_t *ubo_surf_offsets =
1356 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1357
1358 for (int i = 0; i < prog->info.num_ubos; i++) {
1359 struct gl_buffer_binding *binding =
1360 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1361 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1362 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1363 }
1364
1365 uint32_t *abo_surf_offsets =
1366 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1367 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1368
1369 for (int i = 0; i < prog->info.num_abos; i++) {
1370 struct gl_buffer_binding *binding =
1371 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1372 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1373 ISL_FORMAT_RAW, RELOC_WRITE);
1374 }
1375
1376 for (int i = 0; i < prog->info.num_ssbos; i++) {
1377 struct gl_buffer_binding *binding =
1378 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1379
1380 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1381 ISL_FORMAT_RAW, RELOC_WRITE);
1382 }
1383
1384 stage_state->push_constants_dirty = true;
1385 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1386 }
1387
1388 static void
1389 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1390 {
1391 struct gl_context *ctx = &brw->ctx;
1392 /* _NEW_PROGRAM */
1393 struct gl_program *prog = ctx->FragmentProgram._Current;
1394
1395 /* BRW_NEW_FS_PROG_DATA */
1396 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1397 }
1398
1399 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1400 .dirty = {
1401 .mesa = _NEW_PROGRAM,
1402 .brw = BRW_NEW_BATCH |
1403 BRW_NEW_FS_PROG_DATA |
1404 BRW_NEW_UNIFORM_BUFFER,
1405 },
1406 .emit = brw_upload_wm_ubo_surfaces,
1407 };
1408
1409 static void
1410 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1411 {
1412 struct gl_context *ctx = &brw->ctx;
1413 /* _NEW_PROGRAM */
1414 struct gl_program *prog =
1415 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1416
1417 /* BRW_NEW_CS_PROG_DATA */
1418 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1419 }
1420
1421 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1422 .dirty = {
1423 .mesa = _NEW_PROGRAM,
1424 .brw = BRW_NEW_BATCH |
1425 BRW_NEW_CS_PROG_DATA |
1426 BRW_NEW_UNIFORM_BUFFER,
1427 },
1428 .emit = brw_upload_cs_ubo_surfaces,
1429 };
1430
1431 static void
1432 brw_upload_cs_image_surfaces(struct brw_context *brw)
1433 {
1434 /* _NEW_PROGRAM */
1435 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1436
1437 if (cp) {
1438 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1439 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1440 brw->cs.base.prog_data);
1441 }
1442 }
1443
1444 const struct brw_tracked_state brw_cs_image_surfaces = {
1445 .dirty = {
1446 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1447 .brw = BRW_NEW_BATCH |
1448 BRW_NEW_CS_PROG_DATA |
1449 BRW_NEW_AUX_STATE |
1450 BRW_NEW_IMAGE_UNITS
1451 },
1452 .emit = brw_upload_cs_image_surfaces,
1453 };
1454
1455 static uint32_t
1456 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1457 {
1458 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1459 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1460 if (access == GL_WRITE_ONLY || access == GL_NONE) {
1461 return hw_format;
1462 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1463 /* Typed surface reads support a very limited subset of the shader
1464 * image formats. Translate it into the closest format the
1465 * hardware supports.
1466 */
1467 return isl_lower_storage_image_format(devinfo, hw_format);
1468 } else {
1469 /* The hardware doesn't actually support a typed format that we can use
1470 * so we have to fall back to untyped read/write messages.
1471 */
1472 return ISL_FORMAT_RAW;
1473 }
1474 }
1475
1476 static void
1477 update_default_image_param(struct brw_context *brw,
1478 struct gl_image_unit *u,
1479 struct brw_image_param *param)
1480 {
1481 memset(param, 0, sizeof(*param));
1482 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1483 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1484 * detailed explanation of these parameters.
1485 */
1486 param->swizzling[0] = 0xff;
1487 param->swizzling[1] = 0xff;
1488 }
1489
1490 static void
1491 update_buffer_image_param(struct brw_context *brw,
1492 struct gl_image_unit *u,
1493 struct brw_image_param *param)
1494 {
1495 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1496 update_default_image_param(brw, u, param);
1497
1498 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1499 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1500 }
1501
1502 static unsigned
1503 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1504 unsigned level)
1505 {
1506 if (target == GL_TEXTURE_CUBE_MAP)
1507 return 6;
1508
1509 return target == GL_TEXTURE_3D ?
1510 minify(mt->surf.logical_level0_px.depth, level) :
1511 mt->surf.logical_level0_px.array_len;
1512 }
1513
1514 static void
1515 update_image_surface(struct brw_context *brw,
1516 struct gl_image_unit *u,
1517 GLenum access,
1518 uint32_t *surf_offset,
1519 struct brw_image_param *param)
1520 {
1521 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1522 struct gl_texture_object *obj = u->TexObj;
1523 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1524 const bool written = (access != GL_READ_ONLY && access != GL_NONE);
1525
1526 if (obj->Target == GL_TEXTURE_BUFFER) {
1527 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1528 _mesa_get_format_bytes(u->_ActualFormat));
1529 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1530 struct brw_bo *const bo = !obj->BufferObject ? NULL :
1531 intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
1532 obj->BufferOffset, buffer_size, written);
1533
1534 brw_emit_buffer_surface_state(
1535 brw, surf_offset, bo, obj->BufferOffset,
1536 format, buffer_size, texel_size,
1537 written ? RELOC_WRITE : 0);
1538
1539 update_buffer_image_param(brw, u, param);
1540
1541 } else {
1542 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1543 struct intel_mipmap_tree *mt = intel_obj->mt;
1544 const unsigned num_layers = u->Layered ?
1545 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1546
1547 struct isl_view view = {
1548 .format = format,
1549 .base_level = obj->MinLevel + u->Level,
1550 .levels = 1,
1551 .base_array_layer = obj->MinLayer + u->_Layer,
1552 .array_len = num_layers,
1553 .swizzle = ISL_SWIZZLE_IDENTITY,
1554 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1555 };
1556
1557 if (format == ISL_FORMAT_RAW) {
1558 brw_emit_buffer_surface_state(
1559 brw, surf_offset, mt->bo, mt->offset,
1560 format, mt->bo->size - mt->offset, 1 /* pitch */,
1561 written ? RELOC_WRITE : 0);
1562
1563 } else {
1564 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1565 assert(!intel_miptree_has_color_unresolved(mt,
1566 view.base_level, 1,
1567 view.base_array_layer,
1568 view.array_len));
1569 brw_emit_surface_state(brw, mt, mt->target, view,
1570 ISL_AUX_USAGE_NONE,
1571 surf_offset, surf_index,
1572 written ? RELOC_WRITE : 0);
1573 }
1574
1575 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1576 }
1577
1578 } else {
1579 emit_null_surface_state(brw, NULL, surf_offset);
1580 update_default_image_param(brw, u, param);
1581 }
1582 }
1583
1584 void
1585 brw_upload_image_surfaces(struct brw_context *brw,
1586 const struct gl_program *prog,
1587 struct brw_stage_state *stage_state,
1588 struct brw_stage_prog_data *prog_data)
1589 {
1590 assert(prog);
1591 struct gl_context *ctx = &brw->ctx;
1592
1593 if (prog->info.num_images) {
1594 for (unsigned i = 0; i < prog->info.num_images; i++) {
1595 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1596 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1597
1598 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1599 &stage_state->surf_offset[surf_idx],
1600 &stage_state->image_param[i]);
1601 }
1602
1603 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1604 /* This may have changed the image metadata dependent on the context
1605 * image unit state and passed to the program as uniforms, make sure
1606 * that push and pull constants are reuploaded.
1607 */
1608 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1609 }
1610 }
1611
1612 static void
1613 brw_upload_wm_image_surfaces(struct brw_context *brw)
1614 {
1615 /* BRW_NEW_FRAGMENT_PROGRAM */
1616 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1617
1618 if (wm) {
1619 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1620 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1621 brw->wm.base.prog_data);
1622 }
1623 }
1624
1625 const struct brw_tracked_state brw_wm_image_surfaces = {
1626 .dirty = {
1627 .mesa = _NEW_TEXTURE,
1628 .brw = BRW_NEW_BATCH |
1629 BRW_NEW_AUX_STATE |
1630 BRW_NEW_FRAGMENT_PROGRAM |
1631 BRW_NEW_FS_PROG_DATA |
1632 BRW_NEW_IMAGE_UNITS
1633 },
1634 .emit = brw_upload_wm_image_surfaces,
1635 };
1636
1637 static void
1638 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1639 {
1640 struct gl_context *ctx = &brw->ctx;
1641 /* _NEW_PROGRAM */
1642 struct gl_program *prog =
1643 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1644 /* BRW_NEW_CS_PROG_DATA */
1645 const struct brw_cs_prog_data *cs_prog_data =
1646 brw_cs_prog_data(brw->cs.base.prog_data);
1647
1648 if (prog && cs_prog_data->uses_num_work_groups) {
1649 const unsigned surf_idx =
1650 cs_prog_data->binding_table.work_groups_start;
1651 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1652 struct brw_bo *bo;
1653 uint32_t bo_offset;
1654
1655 if (brw->compute.num_work_groups_bo == NULL) {
1656 bo = NULL;
1657 brw_upload_data(&brw->upload,
1658 (void *)brw->compute.num_work_groups,
1659 3 * sizeof(GLuint),
1660 sizeof(GLuint),
1661 &bo,
1662 &bo_offset);
1663 } else {
1664 bo = brw->compute.num_work_groups_bo;
1665 bo_offset = brw->compute.num_work_groups_offset;
1666 }
1667
1668 brw_emit_buffer_surface_state(brw, surf_offset,
1669 bo, bo_offset,
1670 ISL_FORMAT_RAW,
1671 3 * sizeof(GLuint), 1,
1672 RELOC_WRITE);
1673 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1674 }
1675 }
1676
1677 const struct brw_tracked_state brw_cs_work_groups_surface = {
1678 .dirty = {
1679 .brw = BRW_NEW_CS_PROG_DATA |
1680 BRW_NEW_CS_WORK_GROUPS
1681 },
1682 .emit = brw_upload_cs_work_groups_surface,
1683 };