4daa0e2add1380cae252098d67c8d262af7bbbbd
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint32_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color =
170 intel_miptree_get_clear_color(devinfo, mt, view.format,
171 view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
172 &clear_bo, &clear_offset);
173 }
174
175 void *state = brw_state_batch(brw,
176 brw->isl_dev.ss.size,
177 brw->isl_dev.ss.align,
178 surf_offset);
179
180 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
181 .address = brw_state_reloc(&brw->batch,
182 *surf_offset + brw->isl_dev.ss.addr_offset,
183 mt->bo, offset, reloc_flags),
184 .aux_surf = aux_surf, .aux_usage = aux_usage,
185 .aux_address = aux_offset,
186 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
187 .clear_color = clear_color,
188 .use_clear_address = clear_bo != NULL,
189 .clear_address = clear_offset,
190 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
191 if (aux_surf) {
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
197 *
198 * FIXME: move to the point of assignment.
199 */
200 assert((aux_offset & 0xfff) == 0);
201
202 if (devinfo->gen >= 8) {
203 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
204 *aux_addr = brw_state_reloc(&brw->batch,
205 *surf_offset +
206 brw->isl_dev.ss.aux_addr_offset,
207 aux_bo, *aux_addr,
208 reloc_flags);
209 } else {
210 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216
217 }
218 }
219
220 if (clear_bo != NULL) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset & 0x3f) == 0);
223 uint64_t *clear_address =
224 state + brw->isl_dev.ss.clear_color_state_offset;
225 *clear_address = brw_state_reloc(&brw->batch,
226 *surf_offset +
227 brw->isl_dev.ss.clear_color_state_offset,
228 clear_bo, *clear_address, reloc_flags);
229 }
230 }
231
232 static uint32_t
233 gen6_update_renderbuffer_surface(struct brw_context *brw,
234 struct gl_renderbuffer *rb,
235 unsigned unit,
236 uint32_t surf_index)
237 {
238 struct gl_context *ctx = &brw->ctx;
239 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
240 struct intel_mipmap_tree *mt = irb->mt;
241
242 assert(brw_render_target_supported(brw, rb));
243
244 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
245 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
246 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
247 __func__, _mesa_get_format_name(rb_format));
248 }
249 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
250
251 struct isl_view view = {
252 .format = isl_format,
253 .base_level = irb->mt_level - irb->mt->first_level,
254 .levels = 1,
255 .base_array_layer = irb->mt_layer,
256 .array_len = MAX2(irb->layer_count, 1),
257 .swizzle = ISL_SWIZZLE_IDENTITY,
258 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
259 };
260
261 uint32_t offset;
262 brw_emit_surface_state(brw, mt, mt->target, view,
263 brw->draw_aux_usage[unit],
264 &offset, surf_index,
265 RELOC_WRITE);
266 return offset;
267 }
268
269 GLuint
270 translate_tex_target(GLenum target)
271 {
272 switch (target) {
273 case GL_TEXTURE_1D:
274 case GL_TEXTURE_1D_ARRAY_EXT:
275 return BRW_SURFACE_1D;
276
277 case GL_TEXTURE_RECTANGLE_NV:
278 return BRW_SURFACE_2D;
279
280 case GL_TEXTURE_2D:
281 case GL_TEXTURE_2D_ARRAY_EXT:
282 case GL_TEXTURE_EXTERNAL_OES:
283 case GL_TEXTURE_2D_MULTISAMPLE:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
285 return BRW_SURFACE_2D;
286
287 case GL_TEXTURE_3D:
288 return BRW_SURFACE_3D;
289
290 case GL_TEXTURE_CUBE_MAP:
291 case GL_TEXTURE_CUBE_MAP_ARRAY:
292 return BRW_SURFACE_CUBE;
293
294 default:
295 unreachable("not reached");
296 }
297 }
298
299 uint32_t
300 brw_get_surface_tiling_bits(enum isl_tiling tiling)
301 {
302 switch (tiling) {
303 case ISL_TILING_X:
304 return BRW_SURFACE_TILED;
305 case ISL_TILING_Y0:
306 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
307 default:
308 return 0;
309 }
310 }
311
312
313 uint32_t
314 brw_get_surface_num_multisamples(unsigned num_samples)
315 {
316 if (num_samples > 1)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4;
318 else
319 return BRW_SURFACE_MULTISAMPLECOUNT_1;
320 }
321
322 /**
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
324 * swizzling.
325 */
326 int
327 brw_get_texture_swizzle(const struct gl_context *ctx,
328 const struct gl_texture_object *t)
329 {
330 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
331
332 int swizzles[SWIZZLE_NIL + 1] = {
333 SWIZZLE_X,
334 SWIZZLE_Y,
335 SWIZZLE_Z,
336 SWIZZLE_W,
337 SWIZZLE_ZERO,
338 SWIZZLE_ONE,
339 SWIZZLE_NIL
340 };
341
342 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
343 img->_BaseFormat == GL_DEPTH_STENCIL) {
344 GLenum depth_mode = t->DepthMode;
345
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
349 */
350 if (_mesa_is_gles3(ctx) &&
351 img->InternalFormat != GL_DEPTH_COMPONENT &&
352 img->InternalFormat != GL_DEPTH_STENCIL) {
353 depth_mode = GL_RED;
354 }
355
356 switch (depth_mode) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 swizzles[3] = SWIZZLE_X;
362 break;
363 case GL_LUMINANCE:
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 break;
369 case GL_INTENSITY:
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_X;
374 break;
375 case GL_RED:
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_ZERO;
378 swizzles[2] = SWIZZLE_ZERO;
379 swizzles[3] = SWIZZLE_ONE;
380 break;
381 }
382 }
383
384 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
385
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
391 */
392 switch (img->_BaseFormat) {
393 case GL_ALPHA:
394 swizzles[0] = SWIZZLE_ZERO;
395 swizzles[1] = SWIZZLE_ZERO;
396 swizzles[2] = SWIZZLE_ZERO;
397 break;
398 case GL_LUMINANCE:
399 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
400 swizzles[0] = SWIZZLE_X;
401 swizzles[1] = SWIZZLE_X;
402 swizzles[2] = SWIZZLE_X;
403 swizzles[3] = SWIZZLE_ONE;
404 }
405 break;
406 case GL_LUMINANCE_ALPHA:
407 if (datatype == GL_SIGNED_NORMALIZED) {
408 swizzles[0] = SWIZZLE_X;
409 swizzles[1] = SWIZZLE_X;
410 swizzles[2] = SWIZZLE_X;
411 swizzles[3] = SWIZZLE_W;
412 }
413 break;
414 case GL_INTENSITY:
415 if (datatype == GL_SIGNED_NORMALIZED) {
416 swizzles[0] = SWIZZLE_X;
417 swizzles[1] = SWIZZLE_X;
418 swizzles[2] = SWIZZLE_X;
419 swizzles[3] = SWIZZLE_X;
420 }
421 break;
422 case GL_RED:
423 if (img->TexFormat == MESA_FORMAT_R_SRGB8) {
424 swizzles[0] = SWIZZLE_X;
425 swizzles[1] = SWIZZLE_ZERO;
426 swizzles[2] = SWIZZLE_ZERO;
427 swizzles[3] = SWIZZLE_ONE;
428 break;
429 }
430 /* fallthrough */
431 case GL_RG:
432 case GL_RGB:
433 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
434 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
435 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
436 swizzles[3] = SWIZZLE_ONE;
437 break;
438 }
439
440 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
441 swizzles[GET_SWZ(t->_Swizzle, 1)],
442 swizzles[GET_SWZ(t->_Swizzle, 2)],
443 swizzles[GET_SWZ(t->_Swizzle, 3)]);
444 }
445
446 /**
447 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
448 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
449 *
450 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
451 * 0 1 2 3 4 5
452 * 4 5 6 7 0 1
453 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
454 *
455 * which is simply adding 4 then modding by 8 (or anding with 7).
456 *
457 * We then may need to apply workarounds for textureGather hardware bugs.
458 */
459 static unsigned
460 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
461 {
462 unsigned scs = (swizzle + 4) & 7;
463
464 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
465 }
466
467 static void brw_update_texture_surface(struct gl_context *ctx,
468 unsigned unit,
469 uint32_t *surf_offset,
470 bool for_gather,
471 bool for_txf,
472 uint32_t plane)
473 {
474 struct brw_context *brw = brw_context(ctx);
475 const struct gen_device_info *devinfo = &brw->screen->devinfo;
476 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
477
478 if (obj->Target == GL_TEXTURE_BUFFER) {
479 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
480
481 } else {
482 struct intel_texture_object *intel_obj = intel_texture_object(obj);
483 struct intel_mipmap_tree *mt = intel_obj->mt;
484
485 if (plane > 0) {
486 if (mt->plane[plane - 1] == NULL)
487 return;
488 mt = mt->plane[plane - 1];
489 }
490
491 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
492 /* If this is a view with restricted NumLayers, then our effective depth
493 * is not just the miptree depth.
494 */
495 unsigned view_num_layers;
496 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
497 view_num_layers = obj->NumLayers;
498 } else {
499 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
500 mt->surf.logical_level0_px.depth :
501 mt->surf.logical_level0_px.array_len;
502 }
503
504 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
505 * texturing functions that return a float, as our code generation always
506 * selects the .x channel (which would always be 0).
507 */
508 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
509 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
510 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
511 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
512 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
513 brw_get_texture_swizzle(&brw->ctx, obj));
514
515 mesa_format mesa_fmt;
516 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
517 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
518 /* The format from intel_obj may be a combined depth stencil format
519 * when we just want depth. Pull it from the miptree instead. This
520 * is safe because texture views aren't allowed on depth/stencil.
521 */
522 mesa_fmt = mt->format;
523 } else if (mt->etc_format != MESA_FORMAT_NONE) {
524 mesa_fmt = mt->format;
525 } else if (plane > 0) {
526 mesa_fmt = mt->format;
527 } else {
528 mesa_fmt = intel_obj->_Format;
529 }
530 enum isl_format format = translate_tex_format(brw, mesa_fmt,
531 for_txf ? GL_DECODE_EXT :
532 sampler->sRGBDecode);
533
534 /* Implement gen6 and gen7 gather work-around */
535 bool need_green_to_blue = false;
536 if (for_gather) {
537 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
538 format == ISL_FORMAT_R32G32_SINT ||
539 format == ISL_FORMAT_R32G32_UINT)) {
540 format = ISL_FORMAT_R32G32_FLOAT_LD;
541 need_green_to_blue = devinfo->is_haswell;
542 } else if (devinfo->gen == 6) {
543 /* Sandybridge's gather4 message is broken for integer formats.
544 * To work around this, we pretend the surface is UNORM for
545 * 8 or 16-bit formats, and emit shader instructions to recover
546 * the real INT/UINT value. For 32-bit formats, we pretend
547 * the surface is FLOAT, and simply reinterpret the resulting
548 * bits.
549 */
550 switch (format) {
551 case ISL_FORMAT_R8_SINT:
552 case ISL_FORMAT_R8_UINT:
553 format = ISL_FORMAT_R8_UNORM;
554 break;
555
556 case ISL_FORMAT_R16_SINT:
557 case ISL_FORMAT_R16_UINT:
558 format = ISL_FORMAT_R16_UNORM;
559 break;
560
561 case ISL_FORMAT_R32_SINT:
562 case ISL_FORMAT_R32_UINT:
563 format = ISL_FORMAT_R32_FLOAT;
564 break;
565
566 default:
567 break;
568 }
569 }
570 }
571
572 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
573 if (devinfo->gen <= 7) {
574 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
575 mt = mt->r8stencil_mt;
576 } else {
577 mt = mt->stencil_mt;
578 }
579 format = ISL_FORMAT_R8_UINT;
580 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
581 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
582 mt = mt->r8stencil_mt;
583 format = ISL_FORMAT_R8_UINT;
584 }
585
586 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
587
588 struct isl_view view = {
589 .format = format,
590 .base_level = obj->MinLevel + obj->BaseLevel,
591 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
592 .base_array_layer = obj->MinLayer,
593 .array_len = view_num_layers,
594 .swizzle = {
595 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
596 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
597 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
598 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
599 },
600 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
601 };
602
603 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
604 * code. The actual surface swizzle should be identity.
605 */
606 if (devinfo->gen <= 7 && !devinfo->is_haswell)
607 view.swizzle = ISL_SWIZZLE_IDENTITY;
608
609 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
610 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
611 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
612
613 enum isl_aux_usage aux_usage =
614 intel_miptree_texture_aux_usage(brw, mt, format,
615 brw->gen9_astc5x5_wa_tex_mask);
616
617 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
618 surf_offset, surf_index,
619 0);
620 }
621 }
622
623 void
624 brw_emit_buffer_surface_state(struct brw_context *brw,
625 uint32_t *out_offset,
626 struct brw_bo *bo,
627 unsigned buffer_offset,
628 unsigned surface_format,
629 unsigned buffer_size,
630 unsigned pitch,
631 unsigned reloc_flags)
632 {
633 const struct gen_device_info *devinfo = &brw->screen->devinfo;
634 uint32_t *dw = brw_state_batch(brw,
635 brw->isl_dev.ss.size,
636 brw->isl_dev.ss.align,
637 out_offset);
638
639 isl_buffer_fill_state(&brw->isl_dev, dw,
640 .address = !bo ? buffer_offset :
641 brw_state_reloc(&brw->batch,
642 *out_offset + brw->isl_dev.ss.addr_offset,
643 bo, buffer_offset,
644 reloc_flags),
645 .size_B = buffer_size,
646 .format = surface_format,
647 .stride_B = pitch,
648 .mocs = brw_get_bo_mocs(devinfo, bo));
649 }
650
651 static unsigned
652 buffer_texture_range_size(struct brw_context *brw,
653 struct gl_texture_object *obj)
654 {
655 assert(obj->Target == GL_TEXTURE_BUFFER);
656 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
657 const unsigned buffer_size = (!obj->BufferObject ? 0 :
658 obj->BufferObject->Size);
659 const unsigned buffer_offset = MIN2(buffer_size, obj->BufferOffset);
660
661 /* The ARB_texture_buffer_specification says:
662 *
663 * "The number of texels in the buffer texture's texel array is given by
664 *
665 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
666 *
667 * where <buffer_size> is the size of the buffer object, in basic
668 * machine units and <components> and <base_type> are the element count
669 * and base data type for elements, as specified in Table X.1. The
670 * number of texels in the texel array is then clamped to the
671 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
672 *
673 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
674 * so that when ISL divides by stride to obtain the number of texels, that
675 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
676 */
677 return MIN3((unsigned)obj->BufferSize,
678 buffer_size - buffer_offset,
679 brw->ctx.Const.MaxTextureBufferSize * texel_size);
680 }
681
682 void
683 brw_update_buffer_texture_surface(struct gl_context *ctx,
684 unsigned unit,
685 uint32_t *surf_offset)
686 {
687 struct brw_context *brw = brw_context(ctx);
688 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
689 struct intel_buffer_object *intel_obj =
690 intel_buffer_object(tObj->BufferObject);
691 const unsigned size = buffer_texture_range_size(brw, tObj);
692 struct brw_bo *bo = NULL;
693 mesa_format format = tObj->_BufferObjectFormat;
694 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
695 int texel_size = _mesa_get_format_bytes(format);
696
697 if (intel_obj)
698 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
699 false);
700
701 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
702 _mesa_problem(NULL, "bad format %s for texture buffer\n",
703 _mesa_get_format_name(format));
704 }
705
706 brw_emit_buffer_surface_state(brw, surf_offset, bo,
707 tObj->BufferOffset,
708 isl_format,
709 size,
710 texel_size,
711 0);
712 }
713
714 /**
715 * Set up a binding table entry for use by stream output logic (transform
716 * feedback).
717 *
718 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
719 */
720 void
721 brw_update_sol_surface(struct brw_context *brw,
722 struct gl_buffer_object *buffer_obj,
723 uint32_t *out_offset, unsigned num_vector_components,
724 unsigned stride_dwords, unsigned offset_dwords)
725 {
726 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
727 uint32_t offset_bytes = 4 * offset_dwords;
728 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
729 offset_bytes,
730 buffer_obj->Size - offset_bytes,
731 true);
732 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
733 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
734 size_t size_dwords = buffer_obj->Size / 4;
735 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
736
737 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
738 * too big to map using a single binding table entry?
739 */
740 assert((size_dwords - offset_dwords) / stride_dwords
741 <= BRW_MAX_NUM_BUFFER_ENTRIES);
742
743 if (size_dwords > offset_dwords + num_vector_components) {
744 /* There is room for at least 1 transform feedback output in the buffer.
745 * Compute the number of additional transform feedback outputs the
746 * buffer has room for.
747 */
748 buffer_size_minus_1 =
749 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
750 } else {
751 /* There isn't even room for a single transform feedback output in the
752 * buffer. We can't configure the binding table entry to prevent output
753 * entirely; we'll have to rely on the geometry shader to detect
754 * overflow. But to minimize the damage in case of a bug, set up the
755 * binding table entry to just allow a single output.
756 */
757 buffer_size_minus_1 = 0;
758 }
759 width = buffer_size_minus_1 & 0x7f;
760 height = (buffer_size_minus_1 & 0xfff80) >> 7;
761 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
762
763 switch (num_vector_components) {
764 case 1:
765 surface_format = ISL_FORMAT_R32_FLOAT;
766 break;
767 case 2:
768 surface_format = ISL_FORMAT_R32G32_FLOAT;
769 break;
770 case 3:
771 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
772 break;
773 case 4:
774 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
775 break;
776 default:
777 unreachable("Invalid vector size for transform feedback output");
778 }
779
780 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
781 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
782 surface_format << BRW_SURFACE_FORMAT_SHIFT |
783 BRW_SURFACE_RC_READ_WRITE;
784 surf[1] = brw_state_reloc(&brw->batch,
785 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
786 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
787 height << BRW_SURFACE_HEIGHT_SHIFT);
788 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
789 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
790 surf[4] = 0;
791 surf[5] = 0;
792 }
793
794 /* Creates a new WM constant buffer reflecting the current fragment program's
795 * constants, if needed by the fragment program.
796 *
797 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
798 * state atom.
799 */
800 static void
801 brw_upload_wm_pull_constants(struct brw_context *brw)
802 {
803 struct brw_stage_state *stage_state = &brw->wm.base;
804 /* BRW_NEW_FRAGMENT_PROGRAM */
805 struct brw_program *fp =
806 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
807
808 /* BRW_NEW_FS_PROG_DATA */
809 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
810
811 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
812 /* _NEW_PROGRAM_CONSTANTS */
813 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
814 stage_state, prog_data);
815 }
816
817 const struct brw_tracked_state brw_wm_pull_constants = {
818 .dirty = {
819 .mesa = _NEW_PROGRAM_CONSTANTS,
820 .brw = BRW_NEW_BATCH |
821 BRW_NEW_FRAGMENT_PROGRAM |
822 BRW_NEW_FS_PROG_DATA,
823 },
824 .emit = brw_upload_wm_pull_constants,
825 };
826
827 /**
828 * Creates a null renderbuffer surface.
829 *
830 * This is used when the shader doesn't write to any color output. An FB
831 * write to target 0 will still be emitted, because that's how the thread is
832 * terminated (and computed depth is returned), so we need to have the
833 * hardware discard the target 0 color output..
834 */
835 static void
836 emit_null_surface_state(struct brw_context *brw,
837 const struct gl_framebuffer *fb,
838 uint32_t *out_offset)
839 {
840 const struct gen_device_info *devinfo = &brw->screen->devinfo;
841 uint32_t *surf = brw_state_batch(brw,
842 brw->isl_dev.ss.size,
843 brw->isl_dev.ss.align,
844 out_offset);
845
846 /* Use the fb dimensions or 1x1x1 */
847 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
848 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
849 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
850
851 if (devinfo->gen != 6 || samples <= 1) {
852 isl_null_fill_state(&brw->isl_dev, surf,
853 isl_extent3d(width, height, 1));
854 return;
855 }
856
857 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
858 * So work around this problem by rendering into dummy color buffer.
859 *
860 * To decrease the amount of memory needed by the workaround buffer, we
861 * set its pitch to 128 bytes (the width of a Y tile). This means that
862 * the amount of memory needed for the workaround buffer is
863 * (width_in_tiles + height_in_tiles - 1) tiles.
864 *
865 * Note that since the workaround buffer will be interpreted by the
866 * hardware as an interleaved multisampled buffer, we need to compute
867 * width_in_tiles and height_in_tiles by dividing the width and height
868 * by 16 rather than the normal Y-tile size of 32.
869 */
870 unsigned width_in_tiles = ALIGN(width, 16) / 16;
871 unsigned height_in_tiles = ALIGN(height, 16) / 16;
872 unsigned pitch_minus_1 = 127;
873 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
874 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
875 size_needed);
876
877 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
878 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
879 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
880 brw->wm.multisampled_null_render_target_bo,
881 0, RELOC_WRITE);
882
883 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
884 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
885
886 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
887 * Notes):
888 *
889 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
890 */
891 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
892 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
893 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
894 surf[5] = 0;
895 }
896
897 /**
898 * Sets up a surface state structure to point at the given region.
899 * While it is only used for the front/back buffer currently, it should be
900 * usable for further buffers when doing ARB_draw_buffer support.
901 */
902 static uint32_t
903 gen4_update_renderbuffer_surface(struct brw_context *brw,
904 struct gl_renderbuffer *rb,
905 unsigned unit,
906 uint32_t surf_index)
907 {
908 const struct gen_device_info *devinfo = &brw->screen->devinfo;
909 struct gl_context *ctx = &brw->ctx;
910 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
911 struct intel_mipmap_tree *mt = irb->mt;
912 uint32_t *surf;
913 uint32_t tile_x, tile_y;
914 enum isl_format format;
915 uint32_t offset;
916 /* _NEW_BUFFERS */
917 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
918 /* BRW_NEW_FS_PROG_DATA */
919
920 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
921 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
922
923 if (tile_x != 0 || tile_y != 0) {
924 /* Original gen4 hardware couldn't draw to a non-tile-aligned
925 * destination in a miptree unless you actually setup your renderbuffer
926 * as a miptree and used the fragile lod/array_index/etc. controls to
927 * select the image. So, instead, we just make a new single-level
928 * miptree and render into that.
929 */
930 intel_renderbuffer_move_to_temp(brw, irb, false);
931 assert(irb->align_wa_mt);
932 mt = irb->align_wa_mt;
933 }
934 }
935
936 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
937
938 format = brw->mesa_to_isl_render_format[rb_format];
939 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
940 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
941 __func__, _mesa_get_format_name(rb_format));
942 }
943
944 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
945 format << BRW_SURFACE_FORMAT_SHIFT);
946
947 /* reloc */
948 assert(mt->offset % mt->cpp == 0);
949 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
950 mt->offset +
951 intel_renderbuffer_get_tile_offsets(irb,
952 &tile_x,
953 &tile_y),
954 RELOC_WRITE);
955
956 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
957 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
958
959 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
960 (mt->surf.row_pitch_B - 1) << BRW_SURFACE_PITCH_SHIFT);
961
962 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
963
964 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
965 /* Note that the low bits of these fields are missing, so
966 * there's the possibility of getting in trouble.
967 */
968 assert(tile_x % 4 == 0);
969 assert(tile_y % 2 == 0);
970 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
971 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
972 (mt->surf.image_alignment_el.height == 4 ?
973 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
974
975 if (devinfo->gen < 6) {
976 /* _NEW_COLOR */
977 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
978 (ctx->Color.BlendEnabled & (1 << unit)))
979 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
980
981 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
983 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
984 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
985 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
986 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
987
988 /* As mentioned above, disable writes to the alpha component when the
989 * renderbuffer is XRGB.
990 */
991 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
992 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
993 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
994 }
995 }
996
997 return offset;
998 }
999
1000 static void
1001 update_renderbuffer_surfaces(struct brw_context *brw)
1002 {
1003 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1004 const struct gl_context *ctx = &brw->ctx;
1005
1006 /* _NEW_BUFFERS | _NEW_COLOR */
1007 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1008
1009 /* Render targets always start at binding table index 0. */
1010 const unsigned rt_start = 0;
1011
1012 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1013
1014 /* Update surfaces for drawing buffers */
1015 if (fb->_NumColorDrawBuffers >= 1) {
1016 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1017 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1018
1019 if (intel_renderbuffer(rb)) {
1020 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1021 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1022 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1023 } else {
1024 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1025 }
1026 }
1027 } else {
1028 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1029 }
1030
1031 /* The PIPE_CONTROL command description says:
1032 *
1033 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1034 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1035 * Target Cache Flush by enabling this bit. When render target flush
1036 * is set due to new association of BTI, PS Scoreboard Stall bit must
1037 * be set in this packet."
1038 */
1039 if (devinfo->gen >= 11) {
1040 brw_emit_pipe_control_flush(brw,
1041 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1042 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1043 }
1044
1045 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1046 }
1047
1048 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1049 .dirty = {
1050 .mesa = _NEW_BUFFERS |
1051 _NEW_COLOR,
1052 .brw = BRW_NEW_BATCH,
1053 },
1054 .emit = update_renderbuffer_surfaces,
1055 };
1056
1057 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1058 .dirty = {
1059 .mesa = _NEW_BUFFERS,
1060 .brw = BRW_NEW_BATCH |
1061 BRW_NEW_AUX_STATE,
1062 },
1063 .emit = update_renderbuffer_surfaces,
1064 };
1065
1066 static void
1067 update_renderbuffer_read_surfaces(struct brw_context *brw)
1068 {
1069 const struct gl_context *ctx = &brw->ctx;
1070
1071 /* BRW_NEW_FS_PROG_DATA */
1072 const struct brw_wm_prog_data *wm_prog_data =
1073 brw_wm_prog_data(brw->wm.base.prog_data);
1074
1075 if (wm_prog_data->has_render_target_reads &&
1076 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1077 /* _NEW_BUFFERS */
1078 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1079
1080 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1081 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1082 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1083 const unsigned surf_index =
1084 wm_prog_data->binding_table.render_target_read_start + i;
1085 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1086
1087 if (irb) {
1088 const enum isl_format format = brw->mesa_to_isl_render_format[
1089 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1090 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1091 format));
1092
1093 /* Override the target of the texture if the render buffer is a
1094 * single slice of a 3D texture (since the minimum array element
1095 * field of the surface state structure is ignored by the sampler
1096 * unit for 3D textures on some hardware), or if the render buffer
1097 * is a 1D array (since shaders always provide the array index
1098 * coordinate at the Z component to avoid state-dependent
1099 * recompiles when changing the texture target of the
1100 * framebuffer).
1101 */
1102 const GLenum target =
1103 (irb->mt->target == GL_TEXTURE_3D &&
1104 irb->layer_count == 1) ? GL_TEXTURE_2D :
1105 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1106 irb->mt->target;
1107
1108 const struct isl_view view = {
1109 .format = format,
1110 .base_level = irb->mt_level - irb->mt->first_level,
1111 .levels = 1,
1112 .base_array_layer = irb->mt_layer,
1113 .array_len = irb->layer_count,
1114 .swizzle = ISL_SWIZZLE_IDENTITY,
1115 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1116 };
1117
1118 enum isl_aux_usage aux_usage =
1119 intel_miptree_texture_aux_usage(brw, irb->mt, format,
1120 brw->gen9_astc5x5_wa_tex_mask);
1121 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1122 aux_usage = ISL_AUX_USAGE_NONE;
1123
1124 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1125 surf_offset, surf_index,
1126 0);
1127
1128 } else {
1129 emit_null_surface_state(brw, fb, surf_offset);
1130 }
1131 }
1132
1133 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1134 }
1135 }
1136
1137 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1138 .dirty = {
1139 .mesa = _NEW_BUFFERS,
1140 .brw = BRW_NEW_BATCH |
1141 BRW_NEW_AUX_STATE |
1142 BRW_NEW_FS_PROG_DATA,
1143 },
1144 .emit = update_renderbuffer_read_surfaces,
1145 };
1146
1147 static bool
1148 is_depth_texture(struct intel_texture_object *iobj)
1149 {
1150 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1151 return base_format == GL_DEPTH_COMPONENT ||
1152 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1153 }
1154
1155 static void
1156 update_stage_texture_surfaces(struct brw_context *brw,
1157 const struct gl_program *prog,
1158 struct brw_stage_state *stage_state,
1159 bool for_gather, uint32_t plane)
1160 {
1161 if (!prog)
1162 return;
1163
1164 struct gl_context *ctx = &brw->ctx;
1165
1166 uint32_t *surf_offset = stage_state->surf_offset;
1167
1168 /* BRW_NEW_*_PROG_DATA */
1169 if (for_gather)
1170 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1171 else
1172 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1173
1174 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1175 for (unsigned s = 0; s < num_samplers; s++) {
1176 surf_offset[s] = 0;
1177
1178 if (prog->SamplersUsed & (1 << s)) {
1179 const unsigned unit = prog->SamplerUnits[s];
1180 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1181 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1182 struct intel_texture_object *iobj = intel_texture_object(obj);
1183
1184 /* _NEW_TEXTURE */
1185 if (!obj)
1186 continue;
1187
1188 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1189 /* A programming note for the sample_c message says:
1190 *
1191 * "The Surface Format of the associated surface must be
1192 * indicated as supporting shadow mapping as indicated in the
1193 * surface format table."
1194 *
1195 * Accessing non-depth textures via a sampler*Shadow type is
1196 * undefined. GLSL 4.50 page 162 says:
1197 *
1198 * "If a shadow texture call is made to a sampler that does not
1199 * represent a depth texture, then results are undefined."
1200 *
1201 * We give them a null surface (zeros) for undefined. We've seen
1202 * GPU hangs with color buffers and sample_c, so we try and avoid
1203 * those with this hack.
1204 */
1205 emit_null_surface_state(brw, NULL, surf_offset + s);
1206 } else {
1207 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1208 used_by_txf, plane);
1209 }
1210 }
1211 }
1212 }
1213
1214
1215 /**
1216 * Construct SURFACE_STATE objects for enabled textures.
1217 */
1218 static void
1219 brw_update_texture_surfaces(struct brw_context *brw)
1220 {
1221 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1222
1223 /* BRW_NEW_VERTEX_PROGRAM */
1224 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1225
1226 /* BRW_NEW_TESS_PROGRAMS */
1227 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1228 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1229
1230 /* BRW_NEW_GEOMETRY_PROGRAM */
1231 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1232
1233 /* BRW_NEW_FRAGMENT_PROGRAM */
1234 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1235
1236 /* _NEW_TEXTURE */
1237 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1238 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1239 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1240 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1241 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1242
1243 /* emit alternate set of surface state for gather. this
1244 * allows the surface format to be overriden for only the
1245 * gather4 messages. */
1246 if (devinfo->gen < 8) {
1247 if (vs && vs->info.uses_texture_gather)
1248 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1249 if (tcs && tcs->info.uses_texture_gather)
1250 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1251 if (tes && tes->info.uses_texture_gather)
1252 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1253 if (gs && gs->info.uses_texture_gather)
1254 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1255 if (fs && fs->info.uses_texture_gather)
1256 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1257 }
1258
1259 if (fs) {
1260 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1261 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1262 }
1263
1264 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1265 }
1266
1267 const struct brw_tracked_state brw_texture_surfaces = {
1268 .dirty = {
1269 .mesa = _NEW_TEXTURE,
1270 .brw = BRW_NEW_BATCH |
1271 BRW_NEW_AUX_STATE |
1272 BRW_NEW_FRAGMENT_PROGRAM |
1273 BRW_NEW_FS_PROG_DATA |
1274 BRW_NEW_GEOMETRY_PROGRAM |
1275 BRW_NEW_GS_PROG_DATA |
1276 BRW_NEW_TESS_PROGRAMS |
1277 BRW_NEW_TCS_PROG_DATA |
1278 BRW_NEW_TES_PROG_DATA |
1279 BRW_NEW_TEXTURE_BUFFER |
1280 BRW_NEW_VERTEX_PROGRAM |
1281 BRW_NEW_VS_PROG_DATA,
1282 },
1283 .emit = brw_update_texture_surfaces,
1284 };
1285
1286 static void
1287 brw_update_cs_texture_surfaces(struct brw_context *brw)
1288 {
1289 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1290
1291 /* BRW_NEW_COMPUTE_PROGRAM */
1292 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1293
1294 /* _NEW_TEXTURE */
1295 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1296
1297 /* emit alternate set of surface state for gather. this
1298 * allows the surface format to be overriden for only the
1299 * gather4 messages.
1300 */
1301 if (devinfo->gen < 8) {
1302 if (cs && cs->info.uses_texture_gather)
1303 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1304 }
1305
1306 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1307 }
1308
1309 const struct brw_tracked_state brw_cs_texture_surfaces = {
1310 .dirty = {
1311 .mesa = _NEW_TEXTURE,
1312 .brw = BRW_NEW_BATCH |
1313 BRW_NEW_COMPUTE_PROGRAM |
1314 BRW_NEW_AUX_STATE,
1315 },
1316 .emit = brw_update_cs_texture_surfaces,
1317 };
1318
1319 static void
1320 upload_buffer_surface(struct brw_context *brw,
1321 struct gl_buffer_binding *binding,
1322 uint32_t *out_offset,
1323 enum isl_format format,
1324 unsigned reloc_flags)
1325 {
1326 struct gl_context *ctx = &brw->ctx;
1327
1328 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1329 emit_null_surface_state(brw, NULL, out_offset);
1330 } else {
1331 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1332 if (!binding->AutomaticSize)
1333 size = MIN2(size, binding->Size);
1334
1335 if (size == 0) {
1336 emit_null_surface_state(brw, NULL, out_offset);
1337 return;
1338 }
1339
1340 struct intel_buffer_object *iobj =
1341 intel_buffer_object(binding->BufferObject);
1342 struct brw_bo *bo =
1343 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1344 (reloc_flags & RELOC_WRITE) != 0);
1345
1346 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1347 format, size, 1, reloc_flags);
1348 }
1349 }
1350
1351 void
1352 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1353 struct brw_stage_state *stage_state,
1354 struct brw_stage_prog_data *prog_data)
1355 {
1356 struct gl_context *ctx = &brw->ctx;
1357
1358 if (!prog || (prog->info.num_ubos == 0 &&
1359 prog->info.num_ssbos == 0 &&
1360 prog->info.num_abos == 0))
1361 return;
1362
1363 uint32_t *ubo_surf_offsets =
1364 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1365
1366 for (int i = 0; i < prog->info.num_ubos; i++) {
1367 struct gl_buffer_binding *binding =
1368 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1369 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1370 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1371 }
1372
1373 uint32_t *abo_surf_offsets =
1374 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1375 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1376
1377 for (int i = 0; i < prog->info.num_abos; i++) {
1378 struct gl_buffer_binding *binding =
1379 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1380 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1381 ISL_FORMAT_RAW, RELOC_WRITE);
1382 }
1383
1384 for (int i = 0; i < prog->info.num_ssbos; i++) {
1385 struct gl_buffer_binding *binding =
1386 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1387
1388 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1389 ISL_FORMAT_RAW, RELOC_WRITE);
1390 }
1391
1392 stage_state->push_constants_dirty = true;
1393 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1394 }
1395
1396 static void
1397 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1398 {
1399 struct gl_context *ctx = &brw->ctx;
1400 /* _NEW_PROGRAM */
1401 struct gl_program *prog = ctx->FragmentProgram._Current;
1402
1403 /* BRW_NEW_FS_PROG_DATA */
1404 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1405 }
1406
1407 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1408 .dirty = {
1409 .mesa = _NEW_PROGRAM,
1410 .brw = BRW_NEW_BATCH |
1411 BRW_NEW_FS_PROG_DATA |
1412 BRW_NEW_UNIFORM_BUFFER,
1413 },
1414 .emit = brw_upload_wm_ubo_surfaces,
1415 };
1416
1417 static void
1418 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1419 {
1420 struct gl_context *ctx = &brw->ctx;
1421 /* _NEW_PROGRAM */
1422 struct gl_program *prog =
1423 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1424
1425 /* BRW_NEW_CS_PROG_DATA */
1426 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1427 }
1428
1429 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1430 .dirty = {
1431 .mesa = _NEW_PROGRAM,
1432 .brw = BRW_NEW_BATCH |
1433 BRW_NEW_CS_PROG_DATA |
1434 BRW_NEW_UNIFORM_BUFFER,
1435 },
1436 .emit = brw_upload_cs_ubo_surfaces,
1437 };
1438
1439 static void
1440 brw_upload_cs_image_surfaces(struct brw_context *brw)
1441 {
1442 /* _NEW_PROGRAM */
1443 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1444
1445 if (cp) {
1446 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1447 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1448 brw->cs.base.prog_data);
1449 }
1450 }
1451
1452 const struct brw_tracked_state brw_cs_image_surfaces = {
1453 .dirty = {
1454 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1455 .brw = BRW_NEW_BATCH |
1456 BRW_NEW_CS_PROG_DATA |
1457 BRW_NEW_AUX_STATE |
1458 BRW_NEW_IMAGE_UNITS
1459 },
1460 .emit = brw_upload_cs_image_surfaces,
1461 };
1462
1463 static uint32_t
1464 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1465 {
1466 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1467 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1468 if (access == GL_WRITE_ONLY || access == GL_NONE) {
1469 return hw_format;
1470 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1471 /* Typed surface reads support a very limited subset of the shader
1472 * image formats. Translate it into the closest format the
1473 * hardware supports.
1474 */
1475 return isl_lower_storage_image_format(devinfo, hw_format);
1476 } else {
1477 /* The hardware doesn't actually support a typed format that we can use
1478 * so we have to fall back to untyped read/write messages.
1479 */
1480 return ISL_FORMAT_RAW;
1481 }
1482 }
1483
1484 static void
1485 update_default_image_param(struct brw_context *brw,
1486 struct gl_image_unit *u,
1487 struct brw_image_param *param)
1488 {
1489 memset(param, 0, sizeof(*param));
1490 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1491 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1492 * detailed explanation of these parameters.
1493 */
1494 param->swizzling[0] = 0xff;
1495 param->swizzling[1] = 0xff;
1496 }
1497
1498 static void
1499 update_buffer_image_param(struct brw_context *brw,
1500 struct gl_image_unit *u,
1501 struct brw_image_param *param)
1502 {
1503 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1504 update_default_image_param(brw, u, param);
1505
1506 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1507 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1508 }
1509
1510 static void
1511 update_image_surface(struct brw_context *brw,
1512 struct gl_image_unit *u,
1513 GLenum access,
1514 uint32_t *surf_offset,
1515 struct brw_image_param *param)
1516 {
1517 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1518 struct gl_texture_object *obj = u->TexObj;
1519 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1520 const bool written = (access != GL_READ_ONLY && access != GL_NONE);
1521
1522 if (obj->Target == GL_TEXTURE_BUFFER) {
1523 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1524 _mesa_get_format_bytes(u->_ActualFormat));
1525 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1526 struct brw_bo *const bo = !obj->BufferObject ? NULL :
1527 intel_bufferobj_buffer(brw, intel_buffer_object(obj->BufferObject),
1528 obj->BufferOffset, buffer_size, written);
1529
1530 brw_emit_buffer_surface_state(
1531 brw, surf_offset, bo, obj->BufferOffset,
1532 format, buffer_size, texel_size,
1533 written ? RELOC_WRITE : 0);
1534
1535 update_buffer_image_param(brw, u, param);
1536
1537 } else {
1538 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1539 struct intel_mipmap_tree *mt = intel_obj->mt;
1540
1541 unsigned base_layer, num_layers;
1542 if (u->Layered) {
1543 if (obj->Target == GL_TEXTURE_3D) {
1544 base_layer = 0;
1545 num_layers = minify(mt->surf.logical_level0_px.depth, u->Level);
1546 } else {
1547 assert(obj->Immutable || obj->MinLayer == 0);
1548 base_layer = obj->MinLayer;
1549 num_layers = obj->Immutable ?
1550 obj->NumLayers :
1551 mt->surf.logical_level0_px.array_len;
1552 }
1553 } else {
1554 base_layer = obj->MinLayer + u->_Layer;
1555 num_layers = 1;
1556 }
1557
1558 struct isl_view view = {
1559 .format = format,
1560 .base_level = obj->MinLevel + u->Level,
1561 .levels = 1,
1562 .base_array_layer = base_layer,
1563 .array_len = num_layers,
1564 .swizzle = ISL_SWIZZLE_IDENTITY,
1565 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1566 };
1567
1568 if (format == ISL_FORMAT_RAW) {
1569 brw_emit_buffer_surface_state(
1570 brw, surf_offset, mt->bo, mt->offset,
1571 format, mt->bo->size - mt->offset, 1 /* pitch */,
1572 written ? RELOC_WRITE : 0);
1573
1574 } else {
1575 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1576 assert(!intel_miptree_has_color_unresolved(mt,
1577 view.base_level, 1,
1578 view.base_array_layer,
1579 view.array_len));
1580 brw_emit_surface_state(brw, mt, mt->target, view,
1581 ISL_AUX_USAGE_NONE,
1582 surf_offset, surf_index,
1583 written ? RELOC_WRITE : 0);
1584 }
1585
1586 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1587 }
1588
1589 } else {
1590 emit_null_surface_state(brw, NULL, surf_offset);
1591 update_default_image_param(brw, u, param);
1592 }
1593 }
1594
1595 void
1596 brw_upload_image_surfaces(struct brw_context *brw,
1597 const struct gl_program *prog,
1598 struct brw_stage_state *stage_state,
1599 struct brw_stage_prog_data *prog_data)
1600 {
1601 assert(prog);
1602 struct gl_context *ctx = &brw->ctx;
1603
1604 if (prog->info.num_images) {
1605 for (unsigned i = 0; i < prog->info.num_images; i++) {
1606 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1607 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1608
1609 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1610 &stage_state->surf_offset[surf_idx],
1611 &stage_state->image_param[i]);
1612 }
1613
1614 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1615 /* This may have changed the image metadata dependent on the context
1616 * image unit state and passed to the program as uniforms, make sure
1617 * that push and pull constants are reuploaded.
1618 */
1619 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1620 }
1621 }
1622
1623 static void
1624 brw_upload_wm_image_surfaces(struct brw_context *brw)
1625 {
1626 /* BRW_NEW_FRAGMENT_PROGRAM */
1627 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1628
1629 if (wm) {
1630 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1631 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1632 brw->wm.base.prog_data);
1633 }
1634 }
1635
1636 const struct brw_tracked_state brw_wm_image_surfaces = {
1637 .dirty = {
1638 .mesa = _NEW_TEXTURE,
1639 .brw = BRW_NEW_BATCH |
1640 BRW_NEW_AUX_STATE |
1641 BRW_NEW_FRAGMENT_PROGRAM |
1642 BRW_NEW_FS_PROG_DATA |
1643 BRW_NEW_IMAGE_UNITS
1644 },
1645 .emit = brw_upload_wm_image_surfaces,
1646 };
1647
1648 static void
1649 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1650 {
1651 struct gl_context *ctx = &brw->ctx;
1652 /* _NEW_PROGRAM */
1653 struct gl_program *prog =
1654 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1655 /* BRW_NEW_CS_PROG_DATA */
1656 const struct brw_cs_prog_data *cs_prog_data =
1657 brw_cs_prog_data(brw->cs.base.prog_data);
1658
1659 if (prog && cs_prog_data->uses_num_work_groups) {
1660 const unsigned surf_idx =
1661 cs_prog_data->binding_table.work_groups_start;
1662 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1663 struct brw_bo *bo;
1664 uint32_t bo_offset;
1665
1666 if (brw->compute.num_work_groups_bo == NULL) {
1667 bo = NULL;
1668 brw_upload_data(&brw->upload,
1669 (void *)brw->compute.num_work_groups,
1670 3 * sizeof(GLuint),
1671 sizeof(GLuint),
1672 &bo,
1673 &bo_offset);
1674 } else {
1675 bo = brw->compute.num_work_groups_bo;
1676 bo_offset = brw->compute.num_work_groups_offset;
1677 }
1678
1679 brw_emit_buffer_surface_state(brw, surf_offset,
1680 bo, bo_offset,
1681 ISL_FORMAT_RAW,
1682 3 * sizeof(GLuint), 1,
1683 RELOC_WRITE);
1684 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1685 }
1686 }
1687
1688 const struct brw_tracked_state brw_cs_work_groups_surface = {
1689 .dirty = {
1690 .brw = BRW_NEW_CS_PROG_DATA |
1691 BRW_NEW_CS_WORK_GROUPS
1692 },
1693 .emit = brw_upload_cs_work_groups_surface,
1694 };