i965: Add and use a getter for the miptree aux buffer
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct intel_miptree_aux_buffer *aux_buf = intel_miptree_get_aux_buffer(mt);
159
160 if (aux_usage != ISL_AUX_USAGE_NONE) {
161 aux_surf = &aux_buf->surf;
162 aux_bo = aux_buf->bo;
163 aux_offset = aux_buf->offset;
164
165 /* We only really need a clear color if we also have an auxiliary
166 * surface. Without one, it does nothing.
167 */
168 clear_color = mt->fast_clear_color;
169 }
170
171 void *state = brw_state_batch(brw,
172 brw->isl_dev.ss.size,
173 brw->isl_dev.ss.align,
174 surf_offset);
175
176 bool use_clear_address = devinfo->gen >= 10 && aux_surf;
177
178 struct brw_bo *clear_bo = NULL;
179 uint32_t clear_offset = 0;
180 if (use_clear_address) {
181 clear_bo = aux_buf->clear_color_bo;
182 clear_offset = aux_buf->clear_color_offset;
183 }
184
185 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
186 .address = brw_state_reloc(&brw->batch,
187 *surf_offset + brw->isl_dev.ss.addr_offset,
188 mt->bo, offset, reloc_flags),
189 .aux_surf = aux_surf, .aux_usage = aux_usage,
190 .aux_address = aux_offset,
191 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
192 .clear_color = clear_color,
193 .use_clear_address = use_clear_address,
194 .clear_address = clear_offset,
195 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
196 if (aux_surf) {
197 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
198 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
199 * contain other control information. Since buffer addresses are always
200 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
201 * an ordinary reloc to do the necessary address translation.
202 *
203 * FIXME: move to the point of assignment.
204 */
205 assert((aux_offset & 0xfff) == 0);
206
207 if (devinfo->gen >= 8) {
208 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
209 *aux_addr = brw_state_reloc(&brw->batch,
210 *surf_offset +
211 brw->isl_dev.ss.aux_addr_offset,
212 aux_bo, *aux_addr,
213 reloc_flags);
214 } else {
215 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
216 *aux_addr = brw_state_reloc(&brw->batch,
217 *surf_offset +
218 brw->isl_dev.ss.aux_addr_offset,
219 aux_bo, *aux_addr,
220 reloc_flags);
221
222 }
223 }
224
225 if (use_clear_address) {
226 /* Make sure the offset is aligned with a cacheline. */
227 assert((clear_offset & 0x3f) == 0);
228 uint32_t *clear_address =
229 state + brw->isl_dev.ss.clear_color_state_offset;
230 *clear_address = brw_state_reloc(&brw->batch,
231 *surf_offset +
232 brw->isl_dev.ss.clear_color_state_offset,
233 clear_bo, *clear_address, reloc_flags);
234 }
235 }
236
237 static uint32_t
238 gen6_update_renderbuffer_surface(struct brw_context *brw,
239 struct gl_renderbuffer *rb,
240 unsigned unit,
241 uint32_t surf_index)
242 {
243 struct gl_context *ctx = &brw->ctx;
244 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
245 struct intel_mipmap_tree *mt = irb->mt;
246
247 assert(brw_render_target_supported(brw, rb));
248
249 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
250 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
251 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
252 __func__, _mesa_get_format_name(rb_format));
253 }
254 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
255
256 struct isl_view view = {
257 .format = isl_format,
258 .base_level = irb->mt_level - irb->mt->first_level,
259 .levels = 1,
260 .base_array_layer = irb->mt_layer,
261 .array_len = MAX2(irb->layer_count, 1),
262 .swizzle = ISL_SWIZZLE_IDENTITY,
263 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
264 };
265
266 uint32_t offset;
267 brw_emit_surface_state(brw, mt, mt->target, view,
268 brw->draw_aux_usage[unit],
269 &offset, surf_index,
270 RELOC_WRITE);
271 return offset;
272 }
273
274 GLuint
275 translate_tex_target(GLenum target)
276 {
277 switch (target) {
278 case GL_TEXTURE_1D:
279 case GL_TEXTURE_1D_ARRAY_EXT:
280 return BRW_SURFACE_1D;
281
282 case GL_TEXTURE_RECTANGLE_NV:
283 return BRW_SURFACE_2D;
284
285 case GL_TEXTURE_2D:
286 case GL_TEXTURE_2D_ARRAY_EXT:
287 case GL_TEXTURE_EXTERNAL_OES:
288 case GL_TEXTURE_2D_MULTISAMPLE:
289 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
290 return BRW_SURFACE_2D;
291
292 case GL_TEXTURE_3D:
293 return BRW_SURFACE_3D;
294
295 case GL_TEXTURE_CUBE_MAP:
296 case GL_TEXTURE_CUBE_MAP_ARRAY:
297 return BRW_SURFACE_CUBE;
298
299 default:
300 unreachable("not reached");
301 }
302 }
303
304 uint32_t
305 brw_get_surface_tiling_bits(enum isl_tiling tiling)
306 {
307 switch (tiling) {
308 case ISL_TILING_X:
309 return BRW_SURFACE_TILED;
310 case ISL_TILING_Y0:
311 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
312 default:
313 return 0;
314 }
315 }
316
317
318 uint32_t
319 brw_get_surface_num_multisamples(unsigned num_samples)
320 {
321 if (num_samples > 1)
322 return BRW_SURFACE_MULTISAMPLECOUNT_4;
323 else
324 return BRW_SURFACE_MULTISAMPLECOUNT_1;
325 }
326
327 /**
328 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
329 * swizzling.
330 */
331 int
332 brw_get_texture_swizzle(const struct gl_context *ctx,
333 const struct gl_texture_object *t)
334 {
335 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
336
337 int swizzles[SWIZZLE_NIL + 1] = {
338 SWIZZLE_X,
339 SWIZZLE_Y,
340 SWIZZLE_Z,
341 SWIZZLE_W,
342 SWIZZLE_ZERO,
343 SWIZZLE_ONE,
344 SWIZZLE_NIL
345 };
346
347 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
348 img->_BaseFormat == GL_DEPTH_STENCIL) {
349 GLenum depth_mode = t->DepthMode;
350
351 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
352 * with depth component data specified with a sized internal format.
353 * Otherwise, it's left at the old default, GL_LUMINANCE.
354 */
355 if (_mesa_is_gles3(ctx) &&
356 img->InternalFormat != GL_DEPTH_COMPONENT &&
357 img->InternalFormat != GL_DEPTH_STENCIL) {
358 depth_mode = GL_RED;
359 }
360
361 switch (depth_mode) {
362 case GL_ALPHA:
363 swizzles[0] = SWIZZLE_ZERO;
364 swizzles[1] = SWIZZLE_ZERO;
365 swizzles[2] = SWIZZLE_ZERO;
366 swizzles[3] = SWIZZLE_X;
367 break;
368 case GL_LUMINANCE:
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_ONE;
373 break;
374 case GL_INTENSITY:
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_X;
377 swizzles[2] = SWIZZLE_X;
378 swizzles[3] = SWIZZLE_X;
379 break;
380 case GL_RED:
381 swizzles[0] = SWIZZLE_X;
382 swizzles[1] = SWIZZLE_ZERO;
383 swizzles[2] = SWIZZLE_ZERO;
384 swizzles[3] = SWIZZLE_ONE;
385 break;
386 }
387 }
388
389 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
390
391 /* If the texture's format is alpha-only, force R, G, and B to
392 * 0.0. Similarly, if the texture's format has no alpha channel,
393 * force the alpha value read to 1.0. This allows for the
394 * implementation to use an RGBA texture for any of these formats
395 * without leaking any unexpected values.
396 */
397 switch (img->_BaseFormat) {
398 case GL_ALPHA:
399 swizzles[0] = SWIZZLE_ZERO;
400 swizzles[1] = SWIZZLE_ZERO;
401 swizzles[2] = SWIZZLE_ZERO;
402 break;
403 case GL_LUMINANCE:
404 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
405 swizzles[0] = SWIZZLE_X;
406 swizzles[1] = SWIZZLE_X;
407 swizzles[2] = SWIZZLE_X;
408 swizzles[3] = SWIZZLE_ONE;
409 }
410 break;
411 case GL_LUMINANCE_ALPHA:
412 if (datatype == GL_SIGNED_NORMALIZED) {
413 swizzles[0] = SWIZZLE_X;
414 swizzles[1] = SWIZZLE_X;
415 swizzles[2] = SWIZZLE_X;
416 swizzles[3] = SWIZZLE_W;
417 }
418 break;
419 case GL_INTENSITY:
420 if (datatype == GL_SIGNED_NORMALIZED) {
421 swizzles[0] = SWIZZLE_X;
422 swizzles[1] = SWIZZLE_X;
423 swizzles[2] = SWIZZLE_X;
424 swizzles[3] = SWIZZLE_X;
425 }
426 break;
427 case GL_RED:
428 case GL_RG:
429 case GL_RGB:
430 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
431 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
432 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
433 swizzles[3] = SWIZZLE_ONE;
434 break;
435 }
436
437 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
438 swizzles[GET_SWZ(t->_Swizzle, 1)],
439 swizzles[GET_SWZ(t->_Swizzle, 2)],
440 swizzles[GET_SWZ(t->_Swizzle, 3)]);
441 }
442
443 /**
444 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
445 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
446 *
447 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
448 * 0 1 2 3 4 5
449 * 4 5 6 7 0 1
450 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
451 *
452 * which is simply adding 4 then modding by 8 (or anding with 7).
453 *
454 * We then may need to apply workarounds for textureGather hardware bugs.
455 */
456 static unsigned
457 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
458 {
459 unsigned scs = (swizzle + 4) & 7;
460
461 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
462 }
463
464 static void brw_update_texture_surface(struct gl_context *ctx,
465 unsigned unit,
466 uint32_t *surf_offset,
467 bool for_gather,
468 bool for_txf,
469 uint32_t plane)
470 {
471 struct brw_context *brw = brw_context(ctx);
472 const struct gen_device_info *devinfo = &brw->screen->devinfo;
473 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
474
475 if (obj->Target == GL_TEXTURE_BUFFER) {
476 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
477
478 } else {
479 struct intel_texture_object *intel_obj = intel_texture_object(obj);
480 struct intel_mipmap_tree *mt = intel_obj->mt;
481
482 if (plane > 0) {
483 if (mt->plane[plane - 1] == NULL)
484 return;
485 mt = mt->plane[plane - 1];
486 }
487
488 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
489 /* If this is a view with restricted NumLayers, then our effective depth
490 * is not just the miptree depth.
491 */
492 unsigned view_num_layers;
493 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
494 view_num_layers = obj->NumLayers;
495 } else {
496 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
497 mt->surf.logical_level0_px.depth :
498 mt->surf.logical_level0_px.array_len;
499 }
500
501 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
502 * texturing functions that return a float, as our code generation always
503 * selects the .x channel (which would always be 0).
504 */
505 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
506 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
507 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
508 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
509 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
510 brw_get_texture_swizzle(&brw->ctx, obj));
511
512 mesa_format mesa_fmt;
513 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
514 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
515 /* The format from intel_obj may be a combined depth stencil format
516 * when we just want depth. Pull it from the miptree instead. This
517 * is safe because texture views aren't allowed on depth/stencil.
518 */
519 mesa_fmt = mt->format;
520 } else if (mt->etc_format != MESA_FORMAT_NONE) {
521 mesa_fmt = mt->format;
522 } else if (plane > 0) {
523 mesa_fmt = mt->format;
524 } else {
525 mesa_fmt = intel_obj->_Format;
526 }
527 enum isl_format format = translate_tex_format(brw, mesa_fmt,
528 for_txf ? GL_DECODE_EXT :
529 sampler->sRGBDecode);
530
531 /* Implement gen6 and gen7 gather work-around */
532 bool need_green_to_blue = false;
533 if (for_gather) {
534 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
535 format == ISL_FORMAT_R32G32_SINT ||
536 format == ISL_FORMAT_R32G32_UINT)) {
537 format = ISL_FORMAT_R32G32_FLOAT_LD;
538 need_green_to_blue = devinfo->is_haswell;
539 } else if (devinfo->gen == 6) {
540 /* Sandybridge's gather4 message is broken for integer formats.
541 * To work around this, we pretend the surface is UNORM for
542 * 8 or 16-bit formats, and emit shader instructions to recover
543 * the real INT/UINT value. For 32-bit formats, we pretend
544 * the surface is FLOAT, and simply reinterpret the resulting
545 * bits.
546 */
547 switch (format) {
548 case ISL_FORMAT_R8_SINT:
549 case ISL_FORMAT_R8_UINT:
550 format = ISL_FORMAT_R8_UNORM;
551 break;
552
553 case ISL_FORMAT_R16_SINT:
554 case ISL_FORMAT_R16_UINT:
555 format = ISL_FORMAT_R16_UNORM;
556 break;
557
558 case ISL_FORMAT_R32_SINT:
559 case ISL_FORMAT_R32_UINT:
560 format = ISL_FORMAT_R32_FLOAT;
561 break;
562
563 default:
564 break;
565 }
566 }
567 }
568
569 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
570 if (devinfo->gen <= 7) {
571 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
572 mt = mt->r8stencil_mt;
573 } else {
574 mt = mt->stencil_mt;
575 }
576 format = ISL_FORMAT_R8_UINT;
577 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
578 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
579 mt = mt->r8stencil_mt;
580 format = ISL_FORMAT_R8_UINT;
581 }
582
583 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
584
585 struct isl_view view = {
586 .format = format,
587 .base_level = obj->MinLevel + obj->BaseLevel,
588 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
589 .base_array_layer = obj->MinLayer,
590 .array_len = view_num_layers,
591 .swizzle = {
592 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
593 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
594 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
595 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
596 },
597 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
598 };
599
600 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
601 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
602 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
603
604 enum isl_aux_usage aux_usage =
605 intel_miptree_texture_aux_usage(brw, mt, format);
606
607 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
608 surf_offset, surf_index,
609 0);
610 }
611 }
612
613 void
614 brw_emit_buffer_surface_state(struct brw_context *brw,
615 uint32_t *out_offset,
616 struct brw_bo *bo,
617 unsigned buffer_offset,
618 unsigned surface_format,
619 unsigned buffer_size,
620 unsigned pitch,
621 unsigned reloc_flags)
622 {
623 const struct gen_device_info *devinfo = &brw->screen->devinfo;
624 uint32_t *dw = brw_state_batch(brw,
625 brw->isl_dev.ss.size,
626 brw->isl_dev.ss.align,
627 out_offset);
628
629 isl_buffer_fill_state(&brw->isl_dev, dw,
630 .address = !bo ? buffer_offset :
631 brw_state_reloc(&brw->batch,
632 *out_offset + brw->isl_dev.ss.addr_offset,
633 bo, buffer_offset,
634 reloc_flags),
635 .size = buffer_size,
636 .format = surface_format,
637 .stride = pitch,
638 .mocs = brw_get_bo_mocs(devinfo, bo));
639 }
640
641 void
642 brw_update_buffer_texture_surface(struct gl_context *ctx,
643 unsigned unit,
644 uint32_t *surf_offset)
645 {
646 struct brw_context *brw = brw_context(ctx);
647 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
648 struct intel_buffer_object *intel_obj =
649 intel_buffer_object(tObj->BufferObject);
650 uint32_t size = tObj->BufferSize;
651 struct brw_bo *bo = NULL;
652 mesa_format format = tObj->_BufferObjectFormat;
653 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
654 int texel_size = _mesa_get_format_bytes(format);
655
656 if (intel_obj) {
657 size = MIN2(size, intel_obj->Base.Size);
658 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
659 false);
660 }
661
662 /* The ARB_texture_buffer_specification says:
663 *
664 * "The number of texels in the buffer texture's texel array is given by
665 *
666 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
667 *
668 * where <buffer_size> is the size of the buffer object, in basic
669 * machine units and <components> and <base_type> are the element count
670 * and base data type for elements, as specified in Table X.1. The
671 * number of texels in the texel array is then clamped to the
672 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
673 *
674 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
675 * so that when ISL divides by stride to obtain the number of texels, that
676 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
677 */
678 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
679
680 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
681 _mesa_problem(NULL, "bad format %s for texture buffer\n",
682 _mesa_get_format_name(format));
683 }
684
685 brw_emit_buffer_surface_state(brw, surf_offset, bo,
686 tObj->BufferOffset,
687 isl_format,
688 size,
689 texel_size,
690 0);
691 }
692
693 /**
694 * Set up a binding table entry for use by stream output logic (transform
695 * feedback).
696 *
697 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
698 */
699 void
700 brw_update_sol_surface(struct brw_context *brw,
701 struct gl_buffer_object *buffer_obj,
702 uint32_t *out_offset, unsigned num_vector_components,
703 unsigned stride_dwords, unsigned offset_dwords)
704 {
705 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
706 uint32_t offset_bytes = 4 * offset_dwords;
707 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
708 offset_bytes,
709 buffer_obj->Size - offset_bytes,
710 true);
711 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
712 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
713 size_t size_dwords = buffer_obj->Size / 4;
714 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
715
716 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
717 * too big to map using a single binding table entry?
718 */
719 assert((size_dwords - offset_dwords) / stride_dwords
720 <= BRW_MAX_NUM_BUFFER_ENTRIES);
721
722 if (size_dwords > offset_dwords + num_vector_components) {
723 /* There is room for at least 1 transform feedback output in the buffer.
724 * Compute the number of additional transform feedback outputs the
725 * buffer has room for.
726 */
727 buffer_size_minus_1 =
728 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
729 } else {
730 /* There isn't even room for a single transform feedback output in the
731 * buffer. We can't configure the binding table entry to prevent output
732 * entirely; we'll have to rely on the geometry shader to detect
733 * overflow. But to minimize the damage in case of a bug, set up the
734 * binding table entry to just allow a single output.
735 */
736 buffer_size_minus_1 = 0;
737 }
738 width = buffer_size_minus_1 & 0x7f;
739 height = (buffer_size_minus_1 & 0xfff80) >> 7;
740 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
741
742 switch (num_vector_components) {
743 case 1:
744 surface_format = ISL_FORMAT_R32_FLOAT;
745 break;
746 case 2:
747 surface_format = ISL_FORMAT_R32G32_FLOAT;
748 break;
749 case 3:
750 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
751 break;
752 case 4:
753 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
754 break;
755 default:
756 unreachable("Invalid vector size for transform feedback output");
757 }
758
759 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
760 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
761 surface_format << BRW_SURFACE_FORMAT_SHIFT |
762 BRW_SURFACE_RC_READ_WRITE;
763 surf[1] = brw_state_reloc(&brw->batch,
764 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
765 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
766 height << BRW_SURFACE_HEIGHT_SHIFT);
767 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
768 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
769 surf[4] = 0;
770 surf[5] = 0;
771 }
772
773 /* Creates a new WM constant buffer reflecting the current fragment program's
774 * constants, if needed by the fragment program.
775 *
776 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
777 * state atom.
778 */
779 static void
780 brw_upload_wm_pull_constants(struct brw_context *brw)
781 {
782 struct brw_stage_state *stage_state = &brw->wm.base;
783 /* BRW_NEW_FRAGMENT_PROGRAM */
784 struct brw_program *fp =
785 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
786
787 /* BRW_NEW_FS_PROG_DATA */
788 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
789
790 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
791 /* _NEW_PROGRAM_CONSTANTS */
792 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
793 stage_state, prog_data);
794 }
795
796 const struct brw_tracked_state brw_wm_pull_constants = {
797 .dirty = {
798 .mesa = _NEW_PROGRAM_CONSTANTS,
799 .brw = BRW_NEW_BATCH |
800 BRW_NEW_FRAGMENT_PROGRAM |
801 BRW_NEW_FS_PROG_DATA,
802 },
803 .emit = brw_upload_wm_pull_constants,
804 };
805
806 /**
807 * Creates a null renderbuffer surface.
808 *
809 * This is used when the shader doesn't write to any color output. An FB
810 * write to target 0 will still be emitted, because that's how the thread is
811 * terminated (and computed depth is returned), so we need to have the
812 * hardware discard the target 0 color output..
813 */
814 static void
815 emit_null_surface_state(struct brw_context *brw,
816 const struct gl_framebuffer *fb,
817 uint32_t *out_offset)
818 {
819 const struct gen_device_info *devinfo = &brw->screen->devinfo;
820 uint32_t *surf = brw_state_batch(brw,
821 brw->isl_dev.ss.size,
822 brw->isl_dev.ss.align,
823 out_offset);
824
825 /* Use the fb dimensions or 1x1x1 */
826 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
827 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
828 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
829
830 if (devinfo->gen != 6 || samples <= 1) {
831 isl_null_fill_state(&brw->isl_dev, surf,
832 isl_extent3d(width, height, 1));
833 return;
834 }
835
836 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
837 * So work around this problem by rendering into dummy color buffer.
838 *
839 * To decrease the amount of memory needed by the workaround buffer, we
840 * set its pitch to 128 bytes (the width of a Y tile). This means that
841 * the amount of memory needed for the workaround buffer is
842 * (width_in_tiles + height_in_tiles - 1) tiles.
843 *
844 * Note that since the workaround buffer will be interpreted by the
845 * hardware as an interleaved multisampled buffer, we need to compute
846 * width_in_tiles and height_in_tiles by dividing the width and height
847 * by 16 rather than the normal Y-tile size of 32.
848 */
849 unsigned width_in_tiles = ALIGN(width, 16) / 16;
850 unsigned height_in_tiles = ALIGN(height, 16) / 16;
851 unsigned pitch_minus_1 = 127;
852 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
853 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
854 size_needed);
855
856 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
857 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
858 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
859 brw->wm.multisampled_null_render_target_bo,
860 0, RELOC_WRITE);
861
862 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
863 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
864
865 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
866 * Notes):
867 *
868 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
869 */
870 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
871 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
872 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
873 surf[5] = 0;
874 }
875
876 /**
877 * Sets up a surface state structure to point at the given region.
878 * While it is only used for the front/back buffer currently, it should be
879 * usable for further buffers when doing ARB_draw_buffer support.
880 */
881 static uint32_t
882 gen4_update_renderbuffer_surface(struct brw_context *brw,
883 struct gl_renderbuffer *rb,
884 unsigned unit,
885 uint32_t surf_index)
886 {
887 const struct gen_device_info *devinfo = &brw->screen->devinfo;
888 struct gl_context *ctx = &brw->ctx;
889 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
890 struct intel_mipmap_tree *mt = irb->mt;
891 uint32_t *surf;
892 uint32_t tile_x, tile_y;
893 enum isl_format format;
894 uint32_t offset;
895 /* _NEW_BUFFERS */
896 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
897 /* BRW_NEW_FS_PROG_DATA */
898
899 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
900 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
901
902 if (tile_x != 0 || tile_y != 0) {
903 /* Original gen4 hardware couldn't draw to a non-tile-aligned
904 * destination in a miptree unless you actually setup your renderbuffer
905 * as a miptree and used the fragile lod/array_index/etc. controls to
906 * select the image. So, instead, we just make a new single-level
907 * miptree and render into that.
908 */
909 intel_renderbuffer_move_to_temp(brw, irb, false);
910 assert(irb->align_wa_mt);
911 mt = irb->align_wa_mt;
912 }
913 }
914
915 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
916
917 format = brw->mesa_to_isl_render_format[rb_format];
918 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
919 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
920 __func__, _mesa_get_format_name(rb_format));
921 }
922
923 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
924 format << BRW_SURFACE_FORMAT_SHIFT);
925
926 /* reloc */
927 assert(mt->offset % mt->cpp == 0);
928 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
929 mt->offset +
930 intel_renderbuffer_get_tile_offsets(irb,
931 &tile_x,
932 &tile_y),
933 RELOC_WRITE);
934
935 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
936 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
937
938 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
939 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
940
941 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
942
943 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
944 /* Note that the low bits of these fields are missing, so
945 * there's the possibility of getting in trouble.
946 */
947 assert(tile_x % 4 == 0);
948 assert(tile_y % 2 == 0);
949 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
950 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
951 (mt->surf.image_alignment_el.height == 4 ?
952 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
953
954 if (devinfo->gen < 6) {
955 /* _NEW_COLOR */
956 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
957 (ctx->Color.BlendEnabled & (1 << unit)))
958 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
959
960 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
961 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
962 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
963 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
964 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
965 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
966
967 /* As mentioned above, disable writes to the alpha component when the
968 * renderbuffer is XRGB.
969 */
970 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
971 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
972 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
973 }
974 }
975
976 return offset;
977 }
978
979 static void
980 update_renderbuffer_surfaces(struct brw_context *brw)
981 {
982 const struct gen_device_info *devinfo = &brw->screen->devinfo;
983 const struct gl_context *ctx = &brw->ctx;
984
985 /* _NEW_BUFFERS | _NEW_COLOR */
986 const struct gl_framebuffer *fb = ctx->DrawBuffer;
987
988 /* Render targets always start at binding table index 0. */
989 const unsigned rt_start = 0;
990
991 uint32_t *surf_offsets = brw->wm.base.surf_offset;
992
993 /* Update surfaces for drawing buffers */
994 if (fb->_NumColorDrawBuffers >= 1) {
995 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
996 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
997
998 if (intel_renderbuffer(rb)) {
999 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1000 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1001 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1002 } else {
1003 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1004 }
1005 }
1006 } else {
1007 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1008 }
1009
1010 /* The PIPE_CONTROL command description says:
1011 *
1012 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1013 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1014 * Target Cache Flush by enabling this bit. When render target flush
1015 * is set due to new association of BTI, PS Scoreboard Stall bit must
1016 * be set in this packet."
1017 */
1018 if (devinfo->gen >= 11) {
1019 brw_emit_pipe_control_flush(brw,
1020 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1021 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1022 }
1023
1024 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1025 }
1026
1027 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1028 .dirty = {
1029 .mesa = _NEW_BUFFERS |
1030 _NEW_COLOR,
1031 .brw = BRW_NEW_BATCH,
1032 },
1033 .emit = update_renderbuffer_surfaces,
1034 };
1035
1036 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1037 .dirty = {
1038 .mesa = _NEW_BUFFERS,
1039 .brw = BRW_NEW_BATCH |
1040 BRW_NEW_AUX_STATE,
1041 },
1042 .emit = update_renderbuffer_surfaces,
1043 };
1044
1045 static void
1046 update_renderbuffer_read_surfaces(struct brw_context *brw)
1047 {
1048 const struct gl_context *ctx = &brw->ctx;
1049
1050 /* BRW_NEW_FS_PROG_DATA */
1051 const struct brw_wm_prog_data *wm_prog_data =
1052 brw_wm_prog_data(brw->wm.base.prog_data);
1053
1054 if (wm_prog_data->has_render_target_reads &&
1055 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1056 /* _NEW_BUFFERS */
1057 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1058
1059 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1060 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1061 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1062 const unsigned surf_index =
1063 wm_prog_data->binding_table.render_target_read_start + i;
1064 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1065
1066 if (irb) {
1067 const enum isl_format format = brw->mesa_to_isl_render_format[
1068 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1069 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1070 format));
1071
1072 /* Override the target of the texture if the render buffer is a
1073 * single slice of a 3D texture (since the minimum array element
1074 * field of the surface state structure is ignored by the sampler
1075 * unit for 3D textures on some hardware), or if the render buffer
1076 * is a 1D array (since shaders always provide the array index
1077 * coordinate at the Z component to avoid state-dependent
1078 * recompiles when changing the texture target of the
1079 * framebuffer).
1080 */
1081 const GLenum target =
1082 (irb->mt->target == GL_TEXTURE_3D &&
1083 irb->layer_count == 1) ? GL_TEXTURE_2D :
1084 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1085 irb->mt->target;
1086
1087 const struct isl_view view = {
1088 .format = format,
1089 .base_level = irb->mt_level - irb->mt->first_level,
1090 .levels = 1,
1091 .base_array_layer = irb->mt_layer,
1092 .array_len = irb->layer_count,
1093 .swizzle = ISL_SWIZZLE_IDENTITY,
1094 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1095 };
1096
1097 enum isl_aux_usage aux_usage =
1098 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1099 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1100 aux_usage = ISL_AUX_USAGE_NONE;
1101
1102 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1103 surf_offset, surf_index,
1104 0);
1105
1106 } else {
1107 emit_null_surface_state(brw, fb, surf_offset);
1108 }
1109 }
1110
1111 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1112 }
1113 }
1114
1115 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1116 .dirty = {
1117 .mesa = _NEW_BUFFERS,
1118 .brw = BRW_NEW_BATCH |
1119 BRW_NEW_AUX_STATE |
1120 BRW_NEW_FS_PROG_DATA,
1121 },
1122 .emit = update_renderbuffer_read_surfaces,
1123 };
1124
1125 static bool
1126 is_depth_texture(struct intel_texture_object *iobj)
1127 {
1128 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1129 return base_format == GL_DEPTH_COMPONENT ||
1130 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1131 }
1132
1133 static void
1134 update_stage_texture_surfaces(struct brw_context *brw,
1135 const struct gl_program *prog,
1136 struct brw_stage_state *stage_state,
1137 bool for_gather, uint32_t plane)
1138 {
1139 if (!prog)
1140 return;
1141
1142 struct gl_context *ctx = &brw->ctx;
1143
1144 uint32_t *surf_offset = stage_state->surf_offset;
1145
1146 /* BRW_NEW_*_PROG_DATA */
1147 if (for_gather)
1148 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1149 else
1150 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1151
1152 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1153 for (unsigned s = 0; s < num_samplers; s++) {
1154 surf_offset[s] = 0;
1155
1156 if (prog->SamplersUsed & (1 << s)) {
1157 const unsigned unit = prog->SamplerUnits[s];
1158 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1159 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1160 struct intel_texture_object *iobj = intel_texture_object(obj);
1161
1162 /* _NEW_TEXTURE */
1163 if (!obj)
1164 continue;
1165
1166 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1167 /* A programming note for the sample_c message says:
1168 *
1169 * "The Surface Format of the associated surface must be
1170 * indicated as supporting shadow mapping as indicated in the
1171 * surface format table."
1172 *
1173 * Accessing non-depth textures via a sampler*Shadow type is
1174 * undefined. GLSL 4.50 page 162 says:
1175 *
1176 * "If a shadow texture call is made to a sampler that does not
1177 * represent a depth texture, then results are undefined."
1178 *
1179 * We give them a null surface (zeros) for undefined. We've seen
1180 * GPU hangs with color buffers and sample_c, so we try and avoid
1181 * those with this hack.
1182 */
1183 emit_null_surface_state(brw, NULL, surf_offset + s);
1184 } else {
1185 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1186 used_by_txf, plane);
1187 }
1188 }
1189 }
1190 }
1191
1192
1193 /**
1194 * Construct SURFACE_STATE objects for enabled textures.
1195 */
1196 static void
1197 brw_update_texture_surfaces(struct brw_context *brw)
1198 {
1199 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1200
1201 /* BRW_NEW_VERTEX_PROGRAM */
1202 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1203
1204 /* BRW_NEW_TESS_PROGRAMS */
1205 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1206 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1207
1208 /* BRW_NEW_GEOMETRY_PROGRAM */
1209 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1210
1211 /* BRW_NEW_FRAGMENT_PROGRAM */
1212 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1213
1214 /* _NEW_TEXTURE */
1215 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1216 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1217 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1218 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1219 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1220
1221 /* emit alternate set of surface state for gather. this
1222 * allows the surface format to be overriden for only the
1223 * gather4 messages. */
1224 if (devinfo->gen < 8) {
1225 if (vs && vs->info.uses_texture_gather)
1226 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1227 if (tcs && tcs->info.uses_texture_gather)
1228 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1229 if (tes && tes->info.uses_texture_gather)
1230 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1231 if (gs && gs->info.uses_texture_gather)
1232 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1233 if (fs && fs->info.uses_texture_gather)
1234 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1235 }
1236
1237 if (fs) {
1238 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1239 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1240 }
1241
1242 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1243 }
1244
1245 const struct brw_tracked_state brw_texture_surfaces = {
1246 .dirty = {
1247 .mesa = _NEW_TEXTURE,
1248 .brw = BRW_NEW_BATCH |
1249 BRW_NEW_AUX_STATE |
1250 BRW_NEW_FRAGMENT_PROGRAM |
1251 BRW_NEW_FS_PROG_DATA |
1252 BRW_NEW_GEOMETRY_PROGRAM |
1253 BRW_NEW_GS_PROG_DATA |
1254 BRW_NEW_TESS_PROGRAMS |
1255 BRW_NEW_TCS_PROG_DATA |
1256 BRW_NEW_TES_PROG_DATA |
1257 BRW_NEW_TEXTURE_BUFFER |
1258 BRW_NEW_VERTEX_PROGRAM |
1259 BRW_NEW_VS_PROG_DATA,
1260 },
1261 .emit = brw_update_texture_surfaces,
1262 };
1263
1264 static void
1265 brw_update_cs_texture_surfaces(struct brw_context *brw)
1266 {
1267 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1268
1269 /* BRW_NEW_COMPUTE_PROGRAM */
1270 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1271
1272 /* _NEW_TEXTURE */
1273 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1274
1275 /* emit alternate set of surface state for gather. this
1276 * allows the surface format to be overriden for only the
1277 * gather4 messages.
1278 */
1279 if (devinfo->gen < 8) {
1280 if (cs && cs->info.uses_texture_gather)
1281 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1282 }
1283
1284 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1285 }
1286
1287 const struct brw_tracked_state brw_cs_texture_surfaces = {
1288 .dirty = {
1289 .mesa = _NEW_TEXTURE,
1290 .brw = BRW_NEW_BATCH |
1291 BRW_NEW_COMPUTE_PROGRAM |
1292 BRW_NEW_AUX_STATE,
1293 },
1294 .emit = brw_update_cs_texture_surfaces,
1295 };
1296
1297 static void
1298 upload_buffer_surface(struct brw_context *brw,
1299 struct gl_buffer_binding *binding,
1300 uint32_t *out_offset,
1301 enum isl_format format,
1302 unsigned reloc_flags)
1303 {
1304 struct gl_context *ctx = &brw->ctx;
1305
1306 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1307 emit_null_surface_state(brw, NULL, out_offset);
1308 } else {
1309 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1310 if (!binding->AutomaticSize)
1311 size = MIN2(size, binding->Size);
1312
1313 struct intel_buffer_object *iobj =
1314 intel_buffer_object(binding->BufferObject);
1315 struct brw_bo *bo =
1316 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1317 (reloc_flags & RELOC_WRITE) != 0);
1318
1319 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1320 format, size, 1, reloc_flags);
1321 }
1322 }
1323
1324 void
1325 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1326 struct brw_stage_state *stage_state,
1327 struct brw_stage_prog_data *prog_data)
1328 {
1329 struct gl_context *ctx = &brw->ctx;
1330
1331 if (!prog || (prog->info.num_ubos == 0 &&
1332 prog->info.num_ssbos == 0 &&
1333 prog->info.num_abos == 0))
1334 return;
1335
1336 uint32_t *ubo_surf_offsets =
1337 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1338
1339 for (int i = 0; i < prog->info.num_ubos; i++) {
1340 struct gl_buffer_binding *binding =
1341 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1342 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1343 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1344 }
1345
1346 uint32_t *abo_surf_offsets =
1347 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1348 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1349
1350 for (int i = 0; i < prog->info.num_abos; i++) {
1351 struct gl_buffer_binding *binding =
1352 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1353 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1354 ISL_FORMAT_RAW, RELOC_WRITE);
1355 }
1356
1357 for (int i = 0; i < prog->info.num_ssbos; i++) {
1358 struct gl_buffer_binding *binding =
1359 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1360
1361 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1362 ISL_FORMAT_RAW, RELOC_WRITE);
1363 }
1364
1365 stage_state->push_constants_dirty = true;
1366 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1367 }
1368
1369 static void
1370 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1371 {
1372 struct gl_context *ctx = &brw->ctx;
1373 /* _NEW_PROGRAM */
1374 struct gl_program *prog = ctx->FragmentProgram._Current;
1375
1376 /* BRW_NEW_FS_PROG_DATA */
1377 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1378 }
1379
1380 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1381 .dirty = {
1382 .mesa = _NEW_PROGRAM,
1383 .brw = BRW_NEW_BATCH |
1384 BRW_NEW_FS_PROG_DATA |
1385 BRW_NEW_UNIFORM_BUFFER,
1386 },
1387 .emit = brw_upload_wm_ubo_surfaces,
1388 };
1389
1390 static void
1391 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1392 {
1393 struct gl_context *ctx = &brw->ctx;
1394 /* _NEW_PROGRAM */
1395 struct gl_program *prog =
1396 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1397
1398 /* BRW_NEW_CS_PROG_DATA */
1399 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1400 }
1401
1402 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1403 .dirty = {
1404 .mesa = _NEW_PROGRAM,
1405 .brw = BRW_NEW_BATCH |
1406 BRW_NEW_CS_PROG_DATA |
1407 BRW_NEW_UNIFORM_BUFFER,
1408 },
1409 .emit = brw_upload_cs_ubo_surfaces,
1410 };
1411
1412 static void
1413 brw_upload_cs_image_surfaces(struct brw_context *brw)
1414 {
1415 /* _NEW_PROGRAM */
1416 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1417
1418 if (cp) {
1419 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1420 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1421 brw->cs.base.prog_data);
1422 }
1423 }
1424
1425 const struct brw_tracked_state brw_cs_image_surfaces = {
1426 .dirty = {
1427 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1428 .brw = BRW_NEW_BATCH |
1429 BRW_NEW_CS_PROG_DATA |
1430 BRW_NEW_AUX_STATE |
1431 BRW_NEW_IMAGE_UNITS
1432 },
1433 .emit = brw_upload_cs_image_surfaces,
1434 };
1435
1436 static uint32_t
1437 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1438 {
1439 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1440 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1441 if (access == GL_WRITE_ONLY) {
1442 return hw_format;
1443 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1444 /* Typed surface reads support a very limited subset of the shader
1445 * image formats. Translate it into the closest format the
1446 * hardware supports.
1447 */
1448 return isl_lower_storage_image_format(devinfo, hw_format);
1449 } else {
1450 /* The hardware doesn't actually support a typed format that we can use
1451 * so we have to fall back to untyped read/write messages.
1452 */
1453 return ISL_FORMAT_RAW;
1454 }
1455 }
1456
1457 static void
1458 update_default_image_param(struct brw_context *brw,
1459 struct gl_image_unit *u,
1460 unsigned surface_idx,
1461 struct brw_image_param *param)
1462 {
1463 memset(param, 0, sizeof(*param));
1464 param->surface_idx = surface_idx;
1465 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1466 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1467 * detailed explanation of these parameters.
1468 */
1469 param->swizzling[0] = 0xff;
1470 param->swizzling[1] = 0xff;
1471 }
1472
1473 static void
1474 update_buffer_image_param(struct brw_context *brw,
1475 struct gl_image_unit *u,
1476 unsigned surface_idx,
1477 struct brw_image_param *param)
1478 {
1479 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1480 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1481 update_default_image_param(brw, u, surface_idx, param);
1482
1483 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1484 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1485 }
1486
1487 static unsigned
1488 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1489 unsigned level)
1490 {
1491 if (target == GL_TEXTURE_CUBE_MAP)
1492 return 6;
1493
1494 return target == GL_TEXTURE_3D ?
1495 minify(mt->surf.logical_level0_px.depth, level) :
1496 mt->surf.logical_level0_px.array_len;
1497 }
1498
1499 static void
1500 update_image_surface(struct brw_context *brw,
1501 struct gl_image_unit *u,
1502 GLenum access,
1503 unsigned surface_idx,
1504 uint32_t *surf_offset,
1505 struct brw_image_param *param)
1506 {
1507 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1508 struct gl_texture_object *obj = u->TexObj;
1509 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1510
1511 if (obj->Target == GL_TEXTURE_BUFFER) {
1512 struct intel_buffer_object *intel_obj =
1513 intel_buffer_object(obj->BufferObject);
1514 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1515 _mesa_get_format_bytes(u->_ActualFormat));
1516
1517 brw_emit_buffer_surface_state(
1518 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1519 format, intel_obj->Base.Size, texel_size,
1520 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1521
1522 update_buffer_image_param(brw, u, surface_idx, param);
1523
1524 } else {
1525 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1526 struct intel_mipmap_tree *mt = intel_obj->mt;
1527 const unsigned num_layers = u->Layered ?
1528 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1529
1530 struct isl_view view = {
1531 .format = format,
1532 .base_level = obj->MinLevel + u->Level,
1533 .levels = 1,
1534 .base_array_layer = obj->MinLayer + u->_Layer,
1535 .array_len = num_layers,
1536 .swizzle = ISL_SWIZZLE_IDENTITY,
1537 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1538 };
1539
1540 if (format == ISL_FORMAT_RAW) {
1541 brw_emit_buffer_surface_state(
1542 brw, surf_offset, mt->bo, mt->offset,
1543 format, mt->bo->size - mt->offset, 1 /* pitch */,
1544 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1545
1546 } else {
1547 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1548 assert(!intel_miptree_has_color_unresolved(mt,
1549 view.base_level, 1,
1550 view.base_array_layer,
1551 view.array_len));
1552 brw_emit_surface_state(brw, mt, mt->target, view,
1553 ISL_AUX_USAGE_NONE,
1554 surf_offset, surf_index,
1555 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1556 }
1557
1558 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1559 param->surface_idx = surface_idx;
1560 }
1561
1562 } else {
1563 emit_null_surface_state(brw, NULL, surf_offset);
1564 update_default_image_param(brw, u, surface_idx, param);
1565 }
1566 }
1567
1568 void
1569 brw_upload_image_surfaces(struct brw_context *brw,
1570 const struct gl_program *prog,
1571 struct brw_stage_state *stage_state,
1572 struct brw_stage_prog_data *prog_data)
1573 {
1574 assert(prog);
1575 struct gl_context *ctx = &brw->ctx;
1576
1577 if (prog->info.num_images) {
1578 for (unsigned i = 0; i < prog->info.num_images; i++) {
1579 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1580 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1581
1582 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1583 surf_idx,
1584 &stage_state->surf_offset[surf_idx],
1585 &stage_state->image_param[i]);
1586 }
1587
1588 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1589 /* This may have changed the image metadata dependent on the context
1590 * image unit state and passed to the program as uniforms, make sure
1591 * that push and pull constants are reuploaded.
1592 */
1593 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1594 }
1595 }
1596
1597 static void
1598 brw_upload_wm_image_surfaces(struct brw_context *brw)
1599 {
1600 /* BRW_NEW_FRAGMENT_PROGRAM */
1601 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1602
1603 if (wm) {
1604 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1605 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1606 brw->wm.base.prog_data);
1607 }
1608 }
1609
1610 const struct brw_tracked_state brw_wm_image_surfaces = {
1611 .dirty = {
1612 .mesa = _NEW_TEXTURE,
1613 .brw = BRW_NEW_BATCH |
1614 BRW_NEW_AUX_STATE |
1615 BRW_NEW_FRAGMENT_PROGRAM |
1616 BRW_NEW_FS_PROG_DATA |
1617 BRW_NEW_IMAGE_UNITS
1618 },
1619 .emit = brw_upload_wm_image_surfaces,
1620 };
1621
1622 static void
1623 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1624 {
1625 struct gl_context *ctx = &brw->ctx;
1626 /* _NEW_PROGRAM */
1627 struct gl_program *prog =
1628 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1629 /* BRW_NEW_CS_PROG_DATA */
1630 const struct brw_cs_prog_data *cs_prog_data =
1631 brw_cs_prog_data(brw->cs.base.prog_data);
1632
1633 if (prog && cs_prog_data->uses_num_work_groups) {
1634 const unsigned surf_idx =
1635 cs_prog_data->binding_table.work_groups_start;
1636 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1637 struct brw_bo *bo;
1638 uint32_t bo_offset;
1639
1640 if (brw->compute.num_work_groups_bo == NULL) {
1641 bo = NULL;
1642 brw_upload_data(&brw->upload,
1643 (void *)brw->compute.num_work_groups,
1644 3 * sizeof(GLuint),
1645 sizeof(GLuint),
1646 &bo,
1647 &bo_offset);
1648 } else {
1649 bo = brw->compute.num_work_groups_bo;
1650 bo_offset = brw->compute.num_work_groups_offset;
1651 }
1652
1653 brw_emit_buffer_surface_state(brw, surf_offset,
1654 bo, bo_offset,
1655 ISL_FORMAT_RAW,
1656 3 * sizeof(GLuint), 1,
1657 RELOC_WRITE);
1658 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1659 }
1660 }
1661
1662 const struct brw_tracked_state brw_cs_work_groups_surface = {
1663 .dirty = {
1664 .brw = BRW_NEW_CS_PROG_DATA |
1665 BRW_NEW_CS_WORK_GROUPS
1666 },
1667 .emit = brw_upload_cs_work_groups_surface,
1668 };