96c93a7e5bf706b6278899108ffc0ab9f0c10660
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint32_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color =
170 intel_miptree_get_clear_color(devinfo, mt, view.format,
171 view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
172 &clear_bo, &clear_offset);
173 }
174
175 void *state = brw_state_batch(brw,
176 brw->isl_dev.ss.size,
177 brw->isl_dev.ss.align,
178 surf_offset);
179
180 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
181 .address = brw_state_reloc(&brw->batch,
182 *surf_offset + brw->isl_dev.ss.addr_offset,
183 mt->bo, offset, reloc_flags),
184 .aux_surf = aux_surf, .aux_usage = aux_usage,
185 .aux_address = aux_offset,
186 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
187 .clear_color = clear_color,
188 .use_clear_address = clear_bo != NULL,
189 .clear_address = clear_offset,
190 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
191 if (aux_surf) {
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
197 *
198 * FIXME: move to the point of assignment.
199 */
200 assert((aux_offset & 0xfff) == 0);
201
202 if (devinfo->gen >= 8) {
203 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
204 *aux_addr = brw_state_reloc(&brw->batch,
205 *surf_offset +
206 brw->isl_dev.ss.aux_addr_offset,
207 aux_bo, *aux_addr,
208 reloc_flags);
209 } else {
210 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216
217 }
218 }
219
220 if (clear_bo != NULL) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset & 0x3f) == 0);
223 uint32_t *clear_address =
224 state + brw->isl_dev.ss.clear_color_state_offset;
225 *clear_address = brw_state_reloc(&brw->batch,
226 *surf_offset +
227 brw->isl_dev.ss.clear_color_state_offset,
228 clear_bo, *clear_address, reloc_flags);
229 }
230 }
231
232 static uint32_t
233 gen6_update_renderbuffer_surface(struct brw_context *brw,
234 struct gl_renderbuffer *rb,
235 unsigned unit,
236 uint32_t surf_index)
237 {
238 struct gl_context *ctx = &brw->ctx;
239 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
240 struct intel_mipmap_tree *mt = irb->mt;
241
242 assert(brw_render_target_supported(brw, rb));
243
244 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
245 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
246 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
247 __func__, _mesa_get_format_name(rb_format));
248 }
249 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
250
251 struct isl_view view = {
252 .format = isl_format,
253 .base_level = irb->mt_level - irb->mt->first_level,
254 .levels = 1,
255 .base_array_layer = irb->mt_layer,
256 .array_len = MAX2(irb->layer_count, 1),
257 .swizzle = ISL_SWIZZLE_IDENTITY,
258 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
259 };
260
261 uint32_t offset;
262 brw_emit_surface_state(brw, mt, mt->target, view,
263 brw->draw_aux_usage[unit],
264 &offset, surf_index,
265 RELOC_WRITE);
266 return offset;
267 }
268
269 GLuint
270 translate_tex_target(GLenum target)
271 {
272 switch (target) {
273 case GL_TEXTURE_1D:
274 case GL_TEXTURE_1D_ARRAY_EXT:
275 return BRW_SURFACE_1D;
276
277 case GL_TEXTURE_RECTANGLE_NV:
278 return BRW_SURFACE_2D;
279
280 case GL_TEXTURE_2D:
281 case GL_TEXTURE_2D_ARRAY_EXT:
282 case GL_TEXTURE_EXTERNAL_OES:
283 case GL_TEXTURE_2D_MULTISAMPLE:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
285 return BRW_SURFACE_2D;
286
287 case GL_TEXTURE_3D:
288 return BRW_SURFACE_3D;
289
290 case GL_TEXTURE_CUBE_MAP:
291 case GL_TEXTURE_CUBE_MAP_ARRAY:
292 return BRW_SURFACE_CUBE;
293
294 default:
295 unreachable("not reached");
296 }
297 }
298
299 uint32_t
300 brw_get_surface_tiling_bits(enum isl_tiling tiling)
301 {
302 switch (tiling) {
303 case ISL_TILING_X:
304 return BRW_SURFACE_TILED;
305 case ISL_TILING_Y0:
306 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
307 default:
308 return 0;
309 }
310 }
311
312
313 uint32_t
314 brw_get_surface_num_multisamples(unsigned num_samples)
315 {
316 if (num_samples > 1)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4;
318 else
319 return BRW_SURFACE_MULTISAMPLECOUNT_1;
320 }
321
322 /**
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
324 * swizzling.
325 */
326 int
327 brw_get_texture_swizzle(const struct gl_context *ctx,
328 const struct gl_texture_object *t)
329 {
330 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
331
332 int swizzles[SWIZZLE_NIL + 1] = {
333 SWIZZLE_X,
334 SWIZZLE_Y,
335 SWIZZLE_Z,
336 SWIZZLE_W,
337 SWIZZLE_ZERO,
338 SWIZZLE_ONE,
339 SWIZZLE_NIL
340 };
341
342 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
343 img->_BaseFormat == GL_DEPTH_STENCIL) {
344 GLenum depth_mode = t->DepthMode;
345
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
349 */
350 if (_mesa_is_gles3(ctx) &&
351 img->InternalFormat != GL_DEPTH_COMPONENT &&
352 img->InternalFormat != GL_DEPTH_STENCIL) {
353 depth_mode = GL_RED;
354 }
355
356 switch (depth_mode) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 swizzles[3] = SWIZZLE_X;
362 break;
363 case GL_LUMINANCE:
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 break;
369 case GL_INTENSITY:
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_X;
374 break;
375 case GL_RED:
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_ZERO;
378 swizzles[2] = SWIZZLE_ZERO;
379 swizzles[3] = SWIZZLE_ONE;
380 break;
381 }
382 }
383
384 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
385
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
391 */
392 switch (img->_BaseFormat) {
393 case GL_ALPHA:
394 swizzles[0] = SWIZZLE_ZERO;
395 swizzles[1] = SWIZZLE_ZERO;
396 swizzles[2] = SWIZZLE_ZERO;
397 break;
398 case GL_LUMINANCE:
399 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
400 swizzles[0] = SWIZZLE_X;
401 swizzles[1] = SWIZZLE_X;
402 swizzles[2] = SWIZZLE_X;
403 swizzles[3] = SWIZZLE_ONE;
404 }
405 break;
406 case GL_LUMINANCE_ALPHA:
407 if (datatype == GL_SIGNED_NORMALIZED) {
408 swizzles[0] = SWIZZLE_X;
409 swizzles[1] = SWIZZLE_X;
410 swizzles[2] = SWIZZLE_X;
411 swizzles[3] = SWIZZLE_W;
412 }
413 break;
414 case GL_INTENSITY:
415 if (datatype == GL_SIGNED_NORMALIZED) {
416 swizzles[0] = SWIZZLE_X;
417 swizzles[1] = SWIZZLE_X;
418 swizzles[2] = SWIZZLE_X;
419 swizzles[3] = SWIZZLE_X;
420 }
421 break;
422 case GL_RED:
423 case GL_RG:
424 case GL_RGB:
425 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
426 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
427 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
428 swizzles[3] = SWIZZLE_ONE;
429 break;
430 }
431
432 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
433 swizzles[GET_SWZ(t->_Swizzle, 1)],
434 swizzles[GET_SWZ(t->_Swizzle, 2)],
435 swizzles[GET_SWZ(t->_Swizzle, 3)]);
436 }
437
438 /**
439 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
440 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
441 *
442 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
443 * 0 1 2 3 4 5
444 * 4 5 6 7 0 1
445 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
446 *
447 * which is simply adding 4 then modding by 8 (or anding with 7).
448 *
449 * We then may need to apply workarounds for textureGather hardware bugs.
450 */
451 static unsigned
452 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
453 {
454 unsigned scs = (swizzle + 4) & 7;
455
456 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
457 }
458
459 static void brw_update_texture_surface(struct gl_context *ctx,
460 unsigned unit,
461 uint32_t *surf_offset,
462 bool for_gather,
463 bool for_txf,
464 uint32_t plane)
465 {
466 struct brw_context *brw = brw_context(ctx);
467 const struct gen_device_info *devinfo = &brw->screen->devinfo;
468 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
469
470 if (obj->Target == GL_TEXTURE_BUFFER) {
471 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
472
473 } else {
474 struct intel_texture_object *intel_obj = intel_texture_object(obj);
475 struct intel_mipmap_tree *mt = intel_obj->mt;
476
477 if (plane > 0) {
478 if (mt->plane[plane - 1] == NULL)
479 return;
480 mt = mt->plane[plane - 1];
481 }
482
483 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
484 /* If this is a view with restricted NumLayers, then our effective depth
485 * is not just the miptree depth.
486 */
487 unsigned view_num_layers;
488 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
489 view_num_layers = obj->NumLayers;
490 } else {
491 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
492 mt->surf.logical_level0_px.depth :
493 mt->surf.logical_level0_px.array_len;
494 }
495
496 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
497 * texturing functions that return a float, as our code generation always
498 * selects the .x channel (which would always be 0).
499 */
500 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
501 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
502 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
503 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
504 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
505 brw_get_texture_swizzle(&brw->ctx, obj));
506
507 mesa_format mesa_fmt;
508 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
509 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
510 /* The format from intel_obj may be a combined depth stencil format
511 * when we just want depth. Pull it from the miptree instead. This
512 * is safe because texture views aren't allowed on depth/stencil.
513 */
514 mesa_fmt = mt->format;
515 } else if (mt->etc_format != MESA_FORMAT_NONE) {
516 mesa_fmt = mt->format;
517 } else if (plane > 0) {
518 mesa_fmt = mt->format;
519 } else {
520 mesa_fmt = intel_obj->_Format;
521 }
522 enum isl_format format = translate_tex_format(brw, mesa_fmt,
523 for_txf ? GL_DECODE_EXT :
524 sampler->sRGBDecode);
525
526 /* Implement gen6 and gen7 gather work-around */
527 bool need_green_to_blue = false;
528 if (for_gather) {
529 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
530 format == ISL_FORMAT_R32G32_SINT ||
531 format == ISL_FORMAT_R32G32_UINT)) {
532 format = ISL_FORMAT_R32G32_FLOAT_LD;
533 need_green_to_blue = devinfo->is_haswell;
534 } else if (devinfo->gen == 6) {
535 /* Sandybridge's gather4 message is broken for integer formats.
536 * To work around this, we pretend the surface is UNORM for
537 * 8 or 16-bit formats, and emit shader instructions to recover
538 * the real INT/UINT value. For 32-bit formats, we pretend
539 * the surface is FLOAT, and simply reinterpret the resulting
540 * bits.
541 */
542 switch (format) {
543 case ISL_FORMAT_R8_SINT:
544 case ISL_FORMAT_R8_UINT:
545 format = ISL_FORMAT_R8_UNORM;
546 break;
547
548 case ISL_FORMAT_R16_SINT:
549 case ISL_FORMAT_R16_UINT:
550 format = ISL_FORMAT_R16_UNORM;
551 break;
552
553 case ISL_FORMAT_R32_SINT:
554 case ISL_FORMAT_R32_UINT:
555 format = ISL_FORMAT_R32_FLOAT;
556 break;
557
558 default:
559 break;
560 }
561 }
562 }
563
564 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
565 if (devinfo->gen <= 7) {
566 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
567 mt = mt->r8stencil_mt;
568 } else {
569 mt = mt->stencil_mt;
570 }
571 format = ISL_FORMAT_R8_UINT;
572 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
573 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
574 mt = mt->r8stencil_mt;
575 format = ISL_FORMAT_R8_UINT;
576 }
577
578 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
579
580 struct isl_view view = {
581 .format = format,
582 .base_level = obj->MinLevel + obj->BaseLevel,
583 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
584 .base_array_layer = obj->MinLayer,
585 .array_len = view_num_layers,
586 .swizzle = {
587 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
588 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
589 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
590 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
591 },
592 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
593 };
594
595 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
596 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
597 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
598
599 enum isl_aux_usage aux_usage =
600 intel_miptree_texture_aux_usage(brw, mt, format);
601
602 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
603 surf_offset, surf_index,
604 0);
605 }
606 }
607
608 void
609 brw_emit_buffer_surface_state(struct brw_context *brw,
610 uint32_t *out_offset,
611 struct brw_bo *bo,
612 unsigned buffer_offset,
613 unsigned surface_format,
614 unsigned buffer_size,
615 unsigned pitch,
616 unsigned reloc_flags)
617 {
618 const struct gen_device_info *devinfo = &brw->screen->devinfo;
619 uint32_t *dw = brw_state_batch(brw,
620 brw->isl_dev.ss.size,
621 brw->isl_dev.ss.align,
622 out_offset);
623
624 isl_buffer_fill_state(&brw->isl_dev, dw,
625 .address = !bo ? buffer_offset :
626 brw_state_reloc(&brw->batch,
627 *out_offset + brw->isl_dev.ss.addr_offset,
628 bo, buffer_offset,
629 reloc_flags),
630 .size = buffer_size,
631 .format = surface_format,
632 .stride = pitch,
633 .mocs = brw_get_bo_mocs(devinfo, bo));
634 }
635
636 void
637 brw_update_buffer_texture_surface(struct gl_context *ctx,
638 unsigned unit,
639 uint32_t *surf_offset)
640 {
641 struct brw_context *brw = brw_context(ctx);
642 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
643 struct intel_buffer_object *intel_obj =
644 intel_buffer_object(tObj->BufferObject);
645 uint32_t size = tObj->BufferSize;
646 struct brw_bo *bo = NULL;
647 mesa_format format = tObj->_BufferObjectFormat;
648 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
649 int texel_size = _mesa_get_format_bytes(format);
650
651 if (intel_obj) {
652 size = MIN2(size, intel_obj->Base.Size);
653 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
654 false);
655 }
656
657 /* The ARB_texture_buffer_specification says:
658 *
659 * "The number of texels in the buffer texture's texel array is given by
660 *
661 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
662 *
663 * where <buffer_size> is the size of the buffer object, in basic
664 * machine units and <components> and <base_type> are the element count
665 * and base data type for elements, as specified in Table X.1. The
666 * number of texels in the texel array is then clamped to the
667 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
668 *
669 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
670 * so that when ISL divides by stride to obtain the number of texels, that
671 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
672 */
673 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
674
675 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
676 _mesa_problem(NULL, "bad format %s for texture buffer\n",
677 _mesa_get_format_name(format));
678 }
679
680 brw_emit_buffer_surface_state(brw, surf_offset, bo,
681 tObj->BufferOffset,
682 isl_format,
683 size,
684 texel_size,
685 0);
686 }
687
688 /**
689 * Set up a binding table entry for use by stream output logic (transform
690 * feedback).
691 *
692 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
693 */
694 void
695 brw_update_sol_surface(struct brw_context *brw,
696 struct gl_buffer_object *buffer_obj,
697 uint32_t *out_offset, unsigned num_vector_components,
698 unsigned stride_dwords, unsigned offset_dwords)
699 {
700 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
701 uint32_t offset_bytes = 4 * offset_dwords;
702 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
703 offset_bytes,
704 buffer_obj->Size - offset_bytes,
705 true);
706 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
707 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
708 size_t size_dwords = buffer_obj->Size / 4;
709 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
710
711 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
712 * too big to map using a single binding table entry?
713 */
714 assert((size_dwords - offset_dwords) / stride_dwords
715 <= BRW_MAX_NUM_BUFFER_ENTRIES);
716
717 if (size_dwords > offset_dwords + num_vector_components) {
718 /* There is room for at least 1 transform feedback output in the buffer.
719 * Compute the number of additional transform feedback outputs the
720 * buffer has room for.
721 */
722 buffer_size_minus_1 =
723 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
724 } else {
725 /* There isn't even room for a single transform feedback output in the
726 * buffer. We can't configure the binding table entry to prevent output
727 * entirely; we'll have to rely on the geometry shader to detect
728 * overflow. But to minimize the damage in case of a bug, set up the
729 * binding table entry to just allow a single output.
730 */
731 buffer_size_minus_1 = 0;
732 }
733 width = buffer_size_minus_1 & 0x7f;
734 height = (buffer_size_minus_1 & 0xfff80) >> 7;
735 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
736
737 switch (num_vector_components) {
738 case 1:
739 surface_format = ISL_FORMAT_R32_FLOAT;
740 break;
741 case 2:
742 surface_format = ISL_FORMAT_R32G32_FLOAT;
743 break;
744 case 3:
745 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
746 break;
747 case 4:
748 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
749 break;
750 default:
751 unreachable("Invalid vector size for transform feedback output");
752 }
753
754 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
755 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
756 surface_format << BRW_SURFACE_FORMAT_SHIFT |
757 BRW_SURFACE_RC_READ_WRITE;
758 surf[1] = brw_state_reloc(&brw->batch,
759 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
760 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
761 height << BRW_SURFACE_HEIGHT_SHIFT);
762 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
763 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
764 surf[4] = 0;
765 surf[5] = 0;
766 }
767
768 /* Creates a new WM constant buffer reflecting the current fragment program's
769 * constants, if needed by the fragment program.
770 *
771 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
772 * state atom.
773 */
774 static void
775 brw_upload_wm_pull_constants(struct brw_context *brw)
776 {
777 struct brw_stage_state *stage_state = &brw->wm.base;
778 /* BRW_NEW_FRAGMENT_PROGRAM */
779 struct brw_program *fp =
780 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
781
782 /* BRW_NEW_FS_PROG_DATA */
783 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
784
785 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
786 /* _NEW_PROGRAM_CONSTANTS */
787 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
788 stage_state, prog_data);
789 }
790
791 const struct brw_tracked_state brw_wm_pull_constants = {
792 .dirty = {
793 .mesa = _NEW_PROGRAM_CONSTANTS,
794 .brw = BRW_NEW_BATCH |
795 BRW_NEW_FRAGMENT_PROGRAM |
796 BRW_NEW_FS_PROG_DATA,
797 },
798 .emit = brw_upload_wm_pull_constants,
799 };
800
801 /**
802 * Creates a null renderbuffer surface.
803 *
804 * This is used when the shader doesn't write to any color output. An FB
805 * write to target 0 will still be emitted, because that's how the thread is
806 * terminated (and computed depth is returned), so we need to have the
807 * hardware discard the target 0 color output..
808 */
809 static void
810 emit_null_surface_state(struct brw_context *brw,
811 const struct gl_framebuffer *fb,
812 uint32_t *out_offset)
813 {
814 const struct gen_device_info *devinfo = &brw->screen->devinfo;
815 uint32_t *surf = brw_state_batch(brw,
816 brw->isl_dev.ss.size,
817 brw->isl_dev.ss.align,
818 out_offset);
819
820 /* Use the fb dimensions or 1x1x1 */
821 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
822 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
823 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
824
825 if (devinfo->gen != 6 || samples <= 1) {
826 isl_null_fill_state(&brw->isl_dev, surf,
827 isl_extent3d(width, height, 1));
828 return;
829 }
830
831 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
832 * So work around this problem by rendering into dummy color buffer.
833 *
834 * To decrease the amount of memory needed by the workaround buffer, we
835 * set its pitch to 128 bytes (the width of a Y tile). This means that
836 * the amount of memory needed for the workaround buffer is
837 * (width_in_tiles + height_in_tiles - 1) tiles.
838 *
839 * Note that since the workaround buffer will be interpreted by the
840 * hardware as an interleaved multisampled buffer, we need to compute
841 * width_in_tiles and height_in_tiles by dividing the width and height
842 * by 16 rather than the normal Y-tile size of 32.
843 */
844 unsigned width_in_tiles = ALIGN(width, 16) / 16;
845 unsigned height_in_tiles = ALIGN(height, 16) / 16;
846 unsigned pitch_minus_1 = 127;
847 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
848 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
849 size_needed);
850
851 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
852 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
853 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
854 brw->wm.multisampled_null_render_target_bo,
855 0, RELOC_WRITE);
856
857 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
858 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
859
860 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
861 * Notes):
862 *
863 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
864 */
865 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
866 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
867 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
868 surf[5] = 0;
869 }
870
871 /**
872 * Sets up a surface state structure to point at the given region.
873 * While it is only used for the front/back buffer currently, it should be
874 * usable for further buffers when doing ARB_draw_buffer support.
875 */
876 static uint32_t
877 gen4_update_renderbuffer_surface(struct brw_context *brw,
878 struct gl_renderbuffer *rb,
879 unsigned unit,
880 uint32_t surf_index)
881 {
882 const struct gen_device_info *devinfo = &brw->screen->devinfo;
883 struct gl_context *ctx = &brw->ctx;
884 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
885 struct intel_mipmap_tree *mt = irb->mt;
886 uint32_t *surf;
887 uint32_t tile_x, tile_y;
888 enum isl_format format;
889 uint32_t offset;
890 /* _NEW_BUFFERS */
891 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
892 /* BRW_NEW_FS_PROG_DATA */
893
894 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
895 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
896
897 if (tile_x != 0 || tile_y != 0) {
898 /* Original gen4 hardware couldn't draw to a non-tile-aligned
899 * destination in a miptree unless you actually setup your renderbuffer
900 * as a miptree and used the fragile lod/array_index/etc. controls to
901 * select the image. So, instead, we just make a new single-level
902 * miptree and render into that.
903 */
904 intel_renderbuffer_move_to_temp(brw, irb, false);
905 assert(irb->align_wa_mt);
906 mt = irb->align_wa_mt;
907 }
908 }
909
910 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
911
912 format = brw->mesa_to_isl_render_format[rb_format];
913 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
914 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
915 __func__, _mesa_get_format_name(rb_format));
916 }
917
918 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
919 format << BRW_SURFACE_FORMAT_SHIFT);
920
921 /* reloc */
922 assert(mt->offset % mt->cpp == 0);
923 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
924 mt->offset +
925 intel_renderbuffer_get_tile_offsets(irb,
926 &tile_x,
927 &tile_y),
928 RELOC_WRITE);
929
930 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
931 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
932
933 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
934 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
935
936 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
937
938 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
939 /* Note that the low bits of these fields are missing, so
940 * there's the possibility of getting in trouble.
941 */
942 assert(tile_x % 4 == 0);
943 assert(tile_y % 2 == 0);
944 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
945 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
946 (mt->surf.image_alignment_el.height == 4 ?
947 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
948
949 if (devinfo->gen < 6) {
950 /* _NEW_COLOR */
951 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
952 (ctx->Color.BlendEnabled & (1 << unit)))
953 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
954
955 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
956 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
957 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
958 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
959 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
960 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
961
962 /* As mentioned above, disable writes to the alpha component when the
963 * renderbuffer is XRGB.
964 */
965 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
966 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
967 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
968 }
969 }
970
971 return offset;
972 }
973
974 static void
975 update_renderbuffer_surfaces(struct brw_context *brw)
976 {
977 const struct gen_device_info *devinfo = &brw->screen->devinfo;
978 const struct gl_context *ctx = &brw->ctx;
979
980 /* _NEW_BUFFERS | _NEW_COLOR */
981 const struct gl_framebuffer *fb = ctx->DrawBuffer;
982
983 /* Render targets always start at binding table index 0. */
984 const unsigned rt_start = 0;
985
986 uint32_t *surf_offsets = brw->wm.base.surf_offset;
987
988 /* Update surfaces for drawing buffers */
989 if (fb->_NumColorDrawBuffers >= 1) {
990 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
991 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
992
993 if (intel_renderbuffer(rb)) {
994 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
995 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
996 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
997 } else {
998 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
999 }
1000 }
1001 } else {
1002 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1003 }
1004
1005 /* The PIPE_CONTROL command description says:
1006 *
1007 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1008 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1009 * Target Cache Flush by enabling this bit. When render target flush
1010 * is set due to new association of BTI, PS Scoreboard Stall bit must
1011 * be set in this packet."
1012 */
1013 if (devinfo->gen >= 11) {
1014 brw_emit_pipe_control_flush(brw,
1015 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1016 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1017 }
1018
1019 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1020 }
1021
1022 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1023 .dirty = {
1024 .mesa = _NEW_BUFFERS |
1025 _NEW_COLOR,
1026 .brw = BRW_NEW_BATCH,
1027 },
1028 .emit = update_renderbuffer_surfaces,
1029 };
1030
1031 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1032 .dirty = {
1033 .mesa = _NEW_BUFFERS,
1034 .brw = BRW_NEW_BATCH |
1035 BRW_NEW_AUX_STATE,
1036 },
1037 .emit = update_renderbuffer_surfaces,
1038 };
1039
1040 static void
1041 update_renderbuffer_read_surfaces(struct brw_context *brw)
1042 {
1043 const struct gl_context *ctx = &brw->ctx;
1044
1045 /* BRW_NEW_FS_PROG_DATA */
1046 const struct brw_wm_prog_data *wm_prog_data =
1047 brw_wm_prog_data(brw->wm.base.prog_data);
1048
1049 if (wm_prog_data->has_render_target_reads &&
1050 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1051 /* _NEW_BUFFERS */
1052 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1053
1054 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1055 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1056 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1057 const unsigned surf_index =
1058 wm_prog_data->binding_table.render_target_read_start + i;
1059 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1060
1061 if (irb) {
1062 const enum isl_format format = brw->mesa_to_isl_render_format[
1063 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1064 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1065 format));
1066
1067 /* Override the target of the texture if the render buffer is a
1068 * single slice of a 3D texture (since the minimum array element
1069 * field of the surface state structure is ignored by the sampler
1070 * unit for 3D textures on some hardware), or if the render buffer
1071 * is a 1D array (since shaders always provide the array index
1072 * coordinate at the Z component to avoid state-dependent
1073 * recompiles when changing the texture target of the
1074 * framebuffer).
1075 */
1076 const GLenum target =
1077 (irb->mt->target == GL_TEXTURE_3D &&
1078 irb->layer_count == 1) ? GL_TEXTURE_2D :
1079 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1080 irb->mt->target;
1081
1082 const struct isl_view view = {
1083 .format = format,
1084 .base_level = irb->mt_level - irb->mt->first_level,
1085 .levels = 1,
1086 .base_array_layer = irb->mt_layer,
1087 .array_len = irb->layer_count,
1088 .swizzle = ISL_SWIZZLE_IDENTITY,
1089 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1090 };
1091
1092 enum isl_aux_usage aux_usage =
1093 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1094 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1095 aux_usage = ISL_AUX_USAGE_NONE;
1096
1097 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1098 surf_offset, surf_index,
1099 0);
1100
1101 } else {
1102 emit_null_surface_state(brw, fb, surf_offset);
1103 }
1104 }
1105
1106 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1107 }
1108 }
1109
1110 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1111 .dirty = {
1112 .mesa = _NEW_BUFFERS,
1113 .brw = BRW_NEW_BATCH |
1114 BRW_NEW_AUX_STATE |
1115 BRW_NEW_FS_PROG_DATA,
1116 },
1117 .emit = update_renderbuffer_read_surfaces,
1118 };
1119
1120 static bool
1121 is_depth_texture(struct intel_texture_object *iobj)
1122 {
1123 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1124 return base_format == GL_DEPTH_COMPONENT ||
1125 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1126 }
1127
1128 static void
1129 update_stage_texture_surfaces(struct brw_context *brw,
1130 const struct gl_program *prog,
1131 struct brw_stage_state *stage_state,
1132 bool for_gather, uint32_t plane)
1133 {
1134 if (!prog)
1135 return;
1136
1137 struct gl_context *ctx = &brw->ctx;
1138
1139 uint32_t *surf_offset = stage_state->surf_offset;
1140
1141 /* BRW_NEW_*_PROG_DATA */
1142 if (for_gather)
1143 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1144 else
1145 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1146
1147 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1148 for (unsigned s = 0; s < num_samplers; s++) {
1149 surf_offset[s] = 0;
1150
1151 if (prog->SamplersUsed & (1 << s)) {
1152 const unsigned unit = prog->SamplerUnits[s];
1153 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1154 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1155 struct intel_texture_object *iobj = intel_texture_object(obj);
1156
1157 /* _NEW_TEXTURE */
1158 if (!obj)
1159 continue;
1160
1161 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1162 /* A programming note for the sample_c message says:
1163 *
1164 * "The Surface Format of the associated surface must be
1165 * indicated as supporting shadow mapping as indicated in the
1166 * surface format table."
1167 *
1168 * Accessing non-depth textures via a sampler*Shadow type is
1169 * undefined. GLSL 4.50 page 162 says:
1170 *
1171 * "If a shadow texture call is made to a sampler that does not
1172 * represent a depth texture, then results are undefined."
1173 *
1174 * We give them a null surface (zeros) for undefined. We've seen
1175 * GPU hangs with color buffers and sample_c, so we try and avoid
1176 * those with this hack.
1177 */
1178 emit_null_surface_state(brw, NULL, surf_offset + s);
1179 } else {
1180 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1181 used_by_txf, plane);
1182 }
1183 }
1184 }
1185 }
1186
1187
1188 /**
1189 * Construct SURFACE_STATE objects for enabled textures.
1190 */
1191 static void
1192 brw_update_texture_surfaces(struct brw_context *brw)
1193 {
1194 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1195
1196 /* BRW_NEW_VERTEX_PROGRAM */
1197 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1198
1199 /* BRW_NEW_TESS_PROGRAMS */
1200 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1201 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1202
1203 /* BRW_NEW_GEOMETRY_PROGRAM */
1204 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1205
1206 /* BRW_NEW_FRAGMENT_PROGRAM */
1207 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1208
1209 /* _NEW_TEXTURE */
1210 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1211 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1212 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1213 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1214 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1215
1216 /* emit alternate set of surface state for gather. this
1217 * allows the surface format to be overriden for only the
1218 * gather4 messages. */
1219 if (devinfo->gen < 8) {
1220 if (vs && vs->info.uses_texture_gather)
1221 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1222 if (tcs && tcs->info.uses_texture_gather)
1223 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1224 if (tes && tes->info.uses_texture_gather)
1225 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1226 if (gs && gs->info.uses_texture_gather)
1227 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1228 if (fs && fs->info.uses_texture_gather)
1229 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1230 }
1231
1232 if (fs) {
1233 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1234 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1235 }
1236
1237 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1238 }
1239
1240 const struct brw_tracked_state brw_texture_surfaces = {
1241 .dirty = {
1242 .mesa = _NEW_TEXTURE,
1243 .brw = BRW_NEW_BATCH |
1244 BRW_NEW_AUX_STATE |
1245 BRW_NEW_FRAGMENT_PROGRAM |
1246 BRW_NEW_FS_PROG_DATA |
1247 BRW_NEW_GEOMETRY_PROGRAM |
1248 BRW_NEW_GS_PROG_DATA |
1249 BRW_NEW_TESS_PROGRAMS |
1250 BRW_NEW_TCS_PROG_DATA |
1251 BRW_NEW_TES_PROG_DATA |
1252 BRW_NEW_TEXTURE_BUFFER |
1253 BRW_NEW_VERTEX_PROGRAM |
1254 BRW_NEW_VS_PROG_DATA,
1255 },
1256 .emit = brw_update_texture_surfaces,
1257 };
1258
1259 static void
1260 brw_update_cs_texture_surfaces(struct brw_context *brw)
1261 {
1262 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1263
1264 /* BRW_NEW_COMPUTE_PROGRAM */
1265 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1266
1267 /* _NEW_TEXTURE */
1268 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1269
1270 /* emit alternate set of surface state for gather. this
1271 * allows the surface format to be overriden for only the
1272 * gather4 messages.
1273 */
1274 if (devinfo->gen < 8) {
1275 if (cs && cs->info.uses_texture_gather)
1276 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1277 }
1278
1279 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1280 }
1281
1282 const struct brw_tracked_state brw_cs_texture_surfaces = {
1283 .dirty = {
1284 .mesa = _NEW_TEXTURE,
1285 .brw = BRW_NEW_BATCH |
1286 BRW_NEW_COMPUTE_PROGRAM |
1287 BRW_NEW_AUX_STATE,
1288 },
1289 .emit = brw_update_cs_texture_surfaces,
1290 };
1291
1292 static void
1293 upload_buffer_surface(struct brw_context *brw,
1294 struct gl_buffer_binding *binding,
1295 uint32_t *out_offset,
1296 enum isl_format format,
1297 unsigned reloc_flags)
1298 {
1299 struct gl_context *ctx = &brw->ctx;
1300
1301 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1302 emit_null_surface_state(brw, NULL, out_offset);
1303 } else {
1304 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1305 if (!binding->AutomaticSize)
1306 size = MIN2(size, binding->Size);
1307
1308 struct intel_buffer_object *iobj =
1309 intel_buffer_object(binding->BufferObject);
1310 struct brw_bo *bo =
1311 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1312 (reloc_flags & RELOC_WRITE) != 0);
1313
1314 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1315 format, size, 1, reloc_flags);
1316 }
1317 }
1318
1319 void
1320 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1321 struct brw_stage_state *stage_state,
1322 struct brw_stage_prog_data *prog_data)
1323 {
1324 struct gl_context *ctx = &brw->ctx;
1325
1326 if (!prog || (prog->info.num_ubos == 0 &&
1327 prog->info.num_ssbos == 0 &&
1328 prog->info.num_abos == 0))
1329 return;
1330
1331 uint32_t *ubo_surf_offsets =
1332 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1333
1334 for (int i = 0; i < prog->info.num_ubos; i++) {
1335 struct gl_buffer_binding *binding =
1336 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1337 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1338 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1339 }
1340
1341 uint32_t *abo_surf_offsets =
1342 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1343 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1344
1345 for (int i = 0; i < prog->info.num_abos; i++) {
1346 struct gl_buffer_binding *binding =
1347 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1348 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1349 ISL_FORMAT_RAW, RELOC_WRITE);
1350 }
1351
1352 for (int i = 0; i < prog->info.num_ssbos; i++) {
1353 struct gl_buffer_binding *binding =
1354 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1355
1356 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1357 ISL_FORMAT_RAW, RELOC_WRITE);
1358 }
1359
1360 stage_state->push_constants_dirty = true;
1361 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1362 }
1363
1364 static void
1365 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1366 {
1367 struct gl_context *ctx = &brw->ctx;
1368 /* _NEW_PROGRAM */
1369 struct gl_program *prog = ctx->FragmentProgram._Current;
1370
1371 /* BRW_NEW_FS_PROG_DATA */
1372 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1373 }
1374
1375 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1376 .dirty = {
1377 .mesa = _NEW_PROGRAM,
1378 .brw = BRW_NEW_BATCH |
1379 BRW_NEW_FS_PROG_DATA |
1380 BRW_NEW_UNIFORM_BUFFER,
1381 },
1382 .emit = brw_upload_wm_ubo_surfaces,
1383 };
1384
1385 static void
1386 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1387 {
1388 struct gl_context *ctx = &brw->ctx;
1389 /* _NEW_PROGRAM */
1390 struct gl_program *prog =
1391 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1392
1393 /* BRW_NEW_CS_PROG_DATA */
1394 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1395 }
1396
1397 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1398 .dirty = {
1399 .mesa = _NEW_PROGRAM,
1400 .brw = BRW_NEW_BATCH |
1401 BRW_NEW_CS_PROG_DATA |
1402 BRW_NEW_UNIFORM_BUFFER,
1403 },
1404 .emit = brw_upload_cs_ubo_surfaces,
1405 };
1406
1407 static void
1408 brw_upload_cs_image_surfaces(struct brw_context *brw)
1409 {
1410 /* _NEW_PROGRAM */
1411 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1412
1413 if (cp) {
1414 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1415 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1416 brw->cs.base.prog_data);
1417 }
1418 }
1419
1420 const struct brw_tracked_state brw_cs_image_surfaces = {
1421 .dirty = {
1422 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1423 .brw = BRW_NEW_BATCH |
1424 BRW_NEW_CS_PROG_DATA |
1425 BRW_NEW_AUX_STATE |
1426 BRW_NEW_IMAGE_UNITS
1427 },
1428 .emit = brw_upload_cs_image_surfaces,
1429 };
1430
1431 static uint32_t
1432 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1433 {
1434 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1435 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1436 if (access == GL_WRITE_ONLY) {
1437 return hw_format;
1438 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1439 /* Typed surface reads support a very limited subset of the shader
1440 * image formats. Translate it into the closest format the
1441 * hardware supports.
1442 */
1443 return isl_lower_storage_image_format(devinfo, hw_format);
1444 } else {
1445 /* The hardware doesn't actually support a typed format that we can use
1446 * so we have to fall back to untyped read/write messages.
1447 */
1448 return ISL_FORMAT_RAW;
1449 }
1450 }
1451
1452 static void
1453 update_default_image_param(struct brw_context *brw,
1454 struct gl_image_unit *u,
1455 unsigned surface_idx,
1456 struct brw_image_param *param)
1457 {
1458 memset(param, 0, sizeof(*param));
1459 param->surface_idx = surface_idx;
1460 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1461 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1462 * detailed explanation of these parameters.
1463 */
1464 param->swizzling[0] = 0xff;
1465 param->swizzling[1] = 0xff;
1466 }
1467
1468 static void
1469 update_buffer_image_param(struct brw_context *brw,
1470 struct gl_image_unit *u,
1471 unsigned surface_idx,
1472 struct brw_image_param *param)
1473 {
1474 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1475 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1476 update_default_image_param(brw, u, surface_idx, param);
1477
1478 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1479 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1480 }
1481
1482 static unsigned
1483 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1484 unsigned level)
1485 {
1486 if (target == GL_TEXTURE_CUBE_MAP)
1487 return 6;
1488
1489 return target == GL_TEXTURE_3D ?
1490 minify(mt->surf.logical_level0_px.depth, level) :
1491 mt->surf.logical_level0_px.array_len;
1492 }
1493
1494 static void
1495 update_image_surface(struct brw_context *brw,
1496 struct gl_image_unit *u,
1497 GLenum access,
1498 unsigned surface_idx,
1499 uint32_t *surf_offset,
1500 struct brw_image_param *param)
1501 {
1502 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1503 struct gl_texture_object *obj = u->TexObj;
1504 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1505
1506 if (obj->Target == GL_TEXTURE_BUFFER) {
1507 struct intel_buffer_object *intel_obj =
1508 intel_buffer_object(obj->BufferObject);
1509 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1510 _mesa_get_format_bytes(u->_ActualFormat));
1511
1512 brw_emit_buffer_surface_state(
1513 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1514 format, intel_obj->Base.Size, texel_size,
1515 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1516
1517 update_buffer_image_param(brw, u, surface_idx, param);
1518
1519 } else {
1520 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1521 struct intel_mipmap_tree *mt = intel_obj->mt;
1522 const unsigned num_layers = u->Layered ?
1523 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1524
1525 struct isl_view view = {
1526 .format = format,
1527 .base_level = obj->MinLevel + u->Level,
1528 .levels = 1,
1529 .base_array_layer = obj->MinLayer + u->_Layer,
1530 .array_len = num_layers,
1531 .swizzle = ISL_SWIZZLE_IDENTITY,
1532 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1533 };
1534
1535 if (format == ISL_FORMAT_RAW) {
1536 brw_emit_buffer_surface_state(
1537 brw, surf_offset, mt->bo, mt->offset,
1538 format, mt->bo->size - mt->offset, 1 /* pitch */,
1539 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1540
1541 } else {
1542 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1543 assert(!intel_miptree_has_color_unresolved(mt,
1544 view.base_level, 1,
1545 view.base_array_layer,
1546 view.array_len));
1547 brw_emit_surface_state(brw, mt, mt->target, view,
1548 ISL_AUX_USAGE_NONE,
1549 surf_offset, surf_index,
1550 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1551 }
1552
1553 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1554 param->surface_idx = surface_idx;
1555 }
1556
1557 } else {
1558 emit_null_surface_state(brw, NULL, surf_offset);
1559 update_default_image_param(brw, u, surface_idx, param);
1560 }
1561 }
1562
1563 void
1564 brw_upload_image_surfaces(struct brw_context *brw,
1565 const struct gl_program *prog,
1566 struct brw_stage_state *stage_state,
1567 struct brw_stage_prog_data *prog_data)
1568 {
1569 assert(prog);
1570 struct gl_context *ctx = &brw->ctx;
1571
1572 if (prog->info.num_images) {
1573 for (unsigned i = 0; i < prog->info.num_images; i++) {
1574 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1575 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1576
1577 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1578 surf_idx,
1579 &stage_state->surf_offset[surf_idx],
1580 &stage_state->image_param[i]);
1581 }
1582
1583 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1584 /* This may have changed the image metadata dependent on the context
1585 * image unit state and passed to the program as uniforms, make sure
1586 * that push and pull constants are reuploaded.
1587 */
1588 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1589 }
1590 }
1591
1592 static void
1593 brw_upload_wm_image_surfaces(struct brw_context *brw)
1594 {
1595 /* BRW_NEW_FRAGMENT_PROGRAM */
1596 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1597
1598 if (wm) {
1599 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1600 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1601 brw->wm.base.prog_data);
1602 }
1603 }
1604
1605 const struct brw_tracked_state brw_wm_image_surfaces = {
1606 .dirty = {
1607 .mesa = _NEW_TEXTURE,
1608 .brw = BRW_NEW_BATCH |
1609 BRW_NEW_AUX_STATE |
1610 BRW_NEW_FRAGMENT_PROGRAM |
1611 BRW_NEW_FS_PROG_DATA |
1612 BRW_NEW_IMAGE_UNITS
1613 },
1614 .emit = brw_upload_wm_image_surfaces,
1615 };
1616
1617 static void
1618 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1619 {
1620 struct gl_context *ctx = &brw->ctx;
1621 /* _NEW_PROGRAM */
1622 struct gl_program *prog =
1623 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1624 /* BRW_NEW_CS_PROG_DATA */
1625 const struct brw_cs_prog_data *cs_prog_data =
1626 brw_cs_prog_data(brw->cs.base.prog_data);
1627
1628 if (prog && cs_prog_data->uses_num_work_groups) {
1629 const unsigned surf_idx =
1630 cs_prog_data->binding_table.work_groups_start;
1631 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1632 struct brw_bo *bo;
1633 uint32_t bo_offset;
1634
1635 if (brw->compute.num_work_groups_bo == NULL) {
1636 bo = NULL;
1637 brw_upload_data(&brw->upload,
1638 (void *)brw->compute.num_work_groups,
1639 3 * sizeof(GLuint),
1640 sizeof(GLuint),
1641 &bo,
1642 &bo_offset);
1643 } else {
1644 bo = brw->compute.num_work_groups_bo;
1645 bo_offset = brw->compute.num_work_groups_offset;
1646 }
1647
1648 brw_emit_buffer_surface_state(brw, surf_offset,
1649 bo, bo_offset,
1650 ISL_FORMAT_RAW,
1651 3 * sizeof(GLuint), 1,
1652 RELOC_WRITE);
1653 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1654 }
1655 }
1656
1657 const struct brw_tracked_state brw_cs_work_groups_surface = {
1658 .dirty = {
1659 .brw = BRW_NEW_CS_PROG_DATA |
1660 BRW_NEW_CS_WORK_GROUPS
1661 },
1662 .emit = brw_upload_cs_work_groups_surface,
1663 };