mesa: Rename MESA_shader_framebuffer_fetch gl_extensions bits to EXT.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 switch (aux_usage) {
159 case ISL_AUX_USAGE_MCS:
160 case ISL_AUX_USAGE_CCS_D:
161 case ISL_AUX_USAGE_CCS_E:
162 aux_surf = &mt->mcs_buf->surf;
163 aux_bo = mt->mcs_buf->bo;
164 aux_offset = mt->mcs_buf->offset;
165 break;
166
167 case ISL_AUX_USAGE_HIZ:
168 aux_surf = &mt->hiz_buf->surf;
169 aux_bo = mt->hiz_buf->bo;
170 aux_offset = 0;
171 break;
172
173 case ISL_AUX_USAGE_NONE:
174 break;
175 }
176
177 if (aux_usage != ISL_AUX_USAGE_NONE) {
178 /* We only really need a clear color if we also have an auxiliary
179 * surface. Without one, it does nothing.
180 */
181 clear_color = mt->fast_clear_color;
182 }
183
184 void *state = brw_state_batch(brw,
185 brw->isl_dev.ss.size,
186 brw->isl_dev.ss.align,
187 surf_offset);
188
189 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
190 .address = brw_state_reloc(&brw->batch,
191 *surf_offset + brw->isl_dev.ss.addr_offset,
192 mt->bo, offset, reloc_flags),
193 .aux_surf = aux_surf, .aux_usage = aux_usage,
194 .aux_address = aux_offset,
195 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
196 .clear_color = clear_color,
197 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
198 if (aux_surf) {
199 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
200 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
201 * contain other control information. Since buffer addresses are always
202 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
203 * an ordinary reloc to do the necessary address translation.
204 *
205 * FIXME: move to the point of assignment.
206 */
207 assert((aux_offset & 0xfff) == 0);
208 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
209 *aux_addr = brw_state_reloc(&brw->batch,
210 *surf_offset +
211 brw->isl_dev.ss.aux_addr_offset,
212 aux_bo, *aux_addr,
213 reloc_flags);
214 }
215 }
216
217 static uint32_t
218 gen6_update_renderbuffer_surface(struct brw_context *brw,
219 struct gl_renderbuffer *rb,
220 unsigned unit,
221 uint32_t surf_index)
222 {
223 struct gl_context *ctx = &brw->ctx;
224 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
225 struct intel_mipmap_tree *mt = irb->mt;
226
227 assert(brw_render_target_supported(brw, rb));
228
229 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
230 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
231 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
232 __func__, _mesa_get_format_name(rb_format));
233 }
234 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
235
236 struct isl_view view = {
237 .format = isl_format,
238 .base_level = irb->mt_level - irb->mt->first_level,
239 .levels = 1,
240 .base_array_layer = irb->mt_layer,
241 .array_len = MAX2(irb->layer_count, 1),
242 .swizzle = ISL_SWIZZLE_IDENTITY,
243 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
244 };
245
246 uint32_t offset;
247 brw_emit_surface_state(brw, mt, mt->target, view,
248 brw->draw_aux_usage[unit],
249 &offset, surf_index,
250 RELOC_WRITE);
251 return offset;
252 }
253
254 GLuint
255 translate_tex_target(GLenum target)
256 {
257 switch (target) {
258 case GL_TEXTURE_1D:
259 case GL_TEXTURE_1D_ARRAY_EXT:
260 return BRW_SURFACE_1D;
261
262 case GL_TEXTURE_RECTANGLE_NV:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_2D:
266 case GL_TEXTURE_2D_ARRAY_EXT:
267 case GL_TEXTURE_EXTERNAL_OES:
268 case GL_TEXTURE_2D_MULTISAMPLE:
269 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
270 return BRW_SURFACE_2D;
271
272 case GL_TEXTURE_3D:
273 return BRW_SURFACE_3D;
274
275 case GL_TEXTURE_CUBE_MAP:
276 case GL_TEXTURE_CUBE_MAP_ARRAY:
277 return BRW_SURFACE_CUBE;
278
279 default:
280 unreachable("not reached");
281 }
282 }
283
284 uint32_t
285 brw_get_surface_tiling_bits(enum isl_tiling tiling)
286 {
287 switch (tiling) {
288 case ISL_TILING_X:
289 return BRW_SURFACE_TILED;
290 case ISL_TILING_Y0:
291 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
292 default:
293 return 0;
294 }
295 }
296
297
298 uint32_t
299 brw_get_surface_num_multisamples(unsigned num_samples)
300 {
301 if (num_samples > 1)
302 return BRW_SURFACE_MULTISAMPLECOUNT_4;
303 else
304 return BRW_SURFACE_MULTISAMPLECOUNT_1;
305 }
306
307 /**
308 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
309 * swizzling.
310 */
311 int
312 brw_get_texture_swizzle(const struct gl_context *ctx,
313 const struct gl_texture_object *t)
314 {
315 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
316
317 int swizzles[SWIZZLE_NIL + 1] = {
318 SWIZZLE_X,
319 SWIZZLE_Y,
320 SWIZZLE_Z,
321 SWIZZLE_W,
322 SWIZZLE_ZERO,
323 SWIZZLE_ONE,
324 SWIZZLE_NIL
325 };
326
327 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
328 img->_BaseFormat == GL_DEPTH_STENCIL) {
329 GLenum depth_mode = t->DepthMode;
330
331 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
332 * with depth component data specified with a sized internal format.
333 * Otherwise, it's left at the old default, GL_LUMINANCE.
334 */
335 if (_mesa_is_gles3(ctx) &&
336 img->InternalFormat != GL_DEPTH_COMPONENT &&
337 img->InternalFormat != GL_DEPTH_STENCIL) {
338 depth_mode = GL_RED;
339 }
340
341 switch (depth_mode) {
342 case GL_ALPHA:
343 swizzles[0] = SWIZZLE_ZERO;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_X;
347 break;
348 case GL_LUMINANCE:
349 swizzles[0] = SWIZZLE_X;
350 swizzles[1] = SWIZZLE_X;
351 swizzles[2] = SWIZZLE_X;
352 swizzles[3] = SWIZZLE_ONE;
353 break;
354 case GL_INTENSITY:
355 swizzles[0] = SWIZZLE_X;
356 swizzles[1] = SWIZZLE_X;
357 swizzles[2] = SWIZZLE_X;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_RED:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 }
367 }
368
369 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
370
371 /* If the texture's format is alpha-only, force R, G, and B to
372 * 0.0. Similarly, if the texture's format has no alpha channel,
373 * force the alpha value read to 1.0. This allows for the
374 * implementation to use an RGBA texture for any of these formats
375 * without leaking any unexpected values.
376 */
377 switch (img->_BaseFormat) {
378 case GL_ALPHA:
379 swizzles[0] = SWIZZLE_ZERO;
380 swizzles[1] = SWIZZLE_ZERO;
381 swizzles[2] = SWIZZLE_ZERO;
382 break;
383 case GL_LUMINANCE:
384 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_ONE;
389 }
390 break;
391 case GL_LUMINANCE_ALPHA:
392 if (datatype == GL_SIGNED_NORMALIZED) {
393 swizzles[0] = SWIZZLE_X;
394 swizzles[1] = SWIZZLE_X;
395 swizzles[2] = SWIZZLE_X;
396 swizzles[3] = SWIZZLE_W;
397 }
398 break;
399 case GL_INTENSITY:
400 if (datatype == GL_SIGNED_NORMALIZED) {
401 swizzles[0] = SWIZZLE_X;
402 swizzles[1] = SWIZZLE_X;
403 swizzles[2] = SWIZZLE_X;
404 swizzles[3] = SWIZZLE_X;
405 }
406 break;
407 case GL_RED:
408 case GL_RG:
409 case GL_RGB:
410 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
411 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
412 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
413 swizzles[3] = SWIZZLE_ONE;
414 break;
415 }
416
417 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
418 swizzles[GET_SWZ(t->_Swizzle, 1)],
419 swizzles[GET_SWZ(t->_Swizzle, 2)],
420 swizzles[GET_SWZ(t->_Swizzle, 3)]);
421 }
422
423 /**
424 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
425 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
426 *
427 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
428 * 0 1 2 3 4 5
429 * 4 5 6 7 0 1
430 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
431 *
432 * which is simply adding 4 then modding by 8 (or anding with 7).
433 *
434 * We then may need to apply workarounds for textureGather hardware bugs.
435 */
436 static unsigned
437 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
438 {
439 unsigned scs = (swizzle + 4) & 7;
440
441 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
442 }
443
444 static void brw_update_texture_surface(struct gl_context *ctx,
445 unsigned unit,
446 uint32_t *surf_offset,
447 bool for_gather,
448 bool for_txf,
449 uint32_t plane)
450 {
451 struct brw_context *brw = brw_context(ctx);
452 const struct gen_device_info *devinfo = &brw->screen->devinfo;
453 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
454
455 if (obj->Target == GL_TEXTURE_BUFFER) {
456 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
457
458 } else {
459 struct intel_texture_object *intel_obj = intel_texture_object(obj);
460 struct intel_mipmap_tree *mt = intel_obj->mt;
461
462 if (plane > 0) {
463 if (mt->plane[plane - 1] == NULL)
464 return;
465 mt = mt->plane[plane - 1];
466 }
467
468 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
469 /* If this is a view with restricted NumLayers, then our effective depth
470 * is not just the miptree depth.
471 */
472 unsigned view_num_layers;
473 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
474 view_num_layers = obj->NumLayers;
475 } else {
476 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
477 mt->surf.logical_level0_px.depth :
478 mt->surf.logical_level0_px.array_len;
479 }
480
481 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
482 * texturing functions that return a float, as our code generation always
483 * selects the .x channel (which would always be 0).
484 */
485 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
486 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
487 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
488 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
489 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
490 brw_get_texture_swizzle(&brw->ctx, obj));
491
492 mesa_format mesa_fmt;
493 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
494 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
495 /* The format from intel_obj may be a combined depth stencil format
496 * when we just want depth. Pull it from the miptree instead. This
497 * is safe because texture views aren't allowed on depth/stencil.
498 */
499 mesa_fmt = mt->format;
500 } else if (mt->etc_format != MESA_FORMAT_NONE) {
501 mesa_fmt = mt->format;
502 } else if (plane > 0) {
503 mesa_fmt = mt->format;
504 } else {
505 mesa_fmt = intel_obj->_Format;
506 }
507 enum isl_format format = translate_tex_format(brw, mesa_fmt,
508 for_txf ? GL_DECODE_EXT :
509 sampler->sRGBDecode);
510
511 /* Implement gen6 and gen7 gather work-around */
512 bool need_green_to_blue = false;
513 if (for_gather) {
514 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
515 format == ISL_FORMAT_R32G32_SINT ||
516 format == ISL_FORMAT_R32G32_UINT)) {
517 format = ISL_FORMAT_R32G32_FLOAT_LD;
518 need_green_to_blue = devinfo->is_haswell;
519 } else if (devinfo->gen == 6) {
520 /* Sandybridge's gather4 message is broken for integer formats.
521 * To work around this, we pretend the surface is UNORM for
522 * 8 or 16-bit formats, and emit shader instructions to recover
523 * the real INT/UINT value. For 32-bit formats, we pretend
524 * the surface is FLOAT, and simply reinterpret the resulting
525 * bits.
526 */
527 switch (format) {
528 case ISL_FORMAT_R8_SINT:
529 case ISL_FORMAT_R8_UINT:
530 format = ISL_FORMAT_R8_UNORM;
531 break;
532
533 case ISL_FORMAT_R16_SINT:
534 case ISL_FORMAT_R16_UINT:
535 format = ISL_FORMAT_R16_UNORM;
536 break;
537
538 case ISL_FORMAT_R32_SINT:
539 case ISL_FORMAT_R32_UINT:
540 format = ISL_FORMAT_R32_FLOAT;
541 break;
542
543 default:
544 break;
545 }
546 }
547 }
548
549 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
550 if (devinfo->gen <= 7) {
551 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
552 mt = mt->r8stencil_mt;
553 } else {
554 mt = mt->stencil_mt;
555 }
556 format = ISL_FORMAT_R8_UINT;
557 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
558 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
559 mt = mt->r8stencil_mt;
560 format = ISL_FORMAT_R8_UINT;
561 }
562
563 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
564
565 struct isl_view view = {
566 .format = format,
567 .base_level = obj->MinLevel + obj->BaseLevel,
568 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
569 .base_array_layer = obj->MinLayer,
570 .array_len = view_num_layers,
571 .swizzle = {
572 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
573 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
574 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
575 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
576 },
577 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
578 };
579
580 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
581 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
582 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
583
584 enum isl_aux_usage aux_usage =
585 intel_miptree_texture_aux_usage(brw, mt, format);
586
587 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
588 surf_offset, surf_index,
589 0);
590 }
591 }
592
593 void
594 brw_emit_buffer_surface_state(struct brw_context *brw,
595 uint32_t *out_offset,
596 struct brw_bo *bo,
597 unsigned buffer_offset,
598 unsigned surface_format,
599 unsigned buffer_size,
600 unsigned pitch,
601 unsigned reloc_flags)
602 {
603 const struct gen_device_info *devinfo = &brw->screen->devinfo;
604 uint32_t *dw = brw_state_batch(brw,
605 brw->isl_dev.ss.size,
606 brw->isl_dev.ss.align,
607 out_offset);
608
609 isl_buffer_fill_state(&brw->isl_dev, dw,
610 .address = !bo ? buffer_offset :
611 brw_state_reloc(&brw->batch,
612 *out_offset + brw->isl_dev.ss.addr_offset,
613 bo, buffer_offset,
614 reloc_flags),
615 .size = buffer_size,
616 .format = surface_format,
617 .stride = pitch,
618 .mocs = brw_get_bo_mocs(devinfo, bo));
619 }
620
621 void
622 brw_update_buffer_texture_surface(struct gl_context *ctx,
623 unsigned unit,
624 uint32_t *surf_offset)
625 {
626 struct brw_context *brw = brw_context(ctx);
627 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
628 struct intel_buffer_object *intel_obj =
629 intel_buffer_object(tObj->BufferObject);
630 uint32_t size = tObj->BufferSize;
631 struct brw_bo *bo = NULL;
632 mesa_format format = tObj->_BufferObjectFormat;
633 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
634 int texel_size = _mesa_get_format_bytes(format);
635
636 if (intel_obj) {
637 size = MIN2(size, intel_obj->Base.Size);
638 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
639 false);
640 }
641
642 /* The ARB_texture_buffer_specification says:
643 *
644 * "The number of texels in the buffer texture's texel array is given by
645 *
646 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
647 *
648 * where <buffer_size> is the size of the buffer object, in basic
649 * machine units and <components> and <base_type> are the element count
650 * and base data type for elements, as specified in Table X.1. The
651 * number of texels in the texel array is then clamped to the
652 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
653 *
654 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
655 * so that when ISL divides by stride to obtain the number of texels, that
656 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
657 */
658 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
659
660 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
661 _mesa_problem(NULL, "bad format %s for texture buffer\n",
662 _mesa_get_format_name(format));
663 }
664
665 brw_emit_buffer_surface_state(brw, surf_offset, bo,
666 tObj->BufferOffset,
667 isl_format,
668 size,
669 texel_size,
670 0);
671 }
672
673 /**
674 * Set up a binding table entry for use by stream output logic (transform
675 * feedback).
676 *
677 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
678 */
679 void
680 brw_update_sol_surface(struct brw_context *brw,
681 struct gl_buffer_object *buffer_obj,
682 uint32_t *out_offset, unsigned num_vector_components,
683 unsigned stride_dwords, unsigned offset_dwords)
684 {
685 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
686 uint32_t offset_bytes = 4 * offset_dwords;
687 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
688 offset_bytes,
689 buffer_obj->Size - offset_bytes,
690 true);
691 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
692 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
693 size_t size_dwords = buffer_obj->Size / 4;
694 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
695
696 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
697 * too big to map using a single binding table entry?
698 */
699 assert((size_dwords - offset_dwords) / stride_dwords
700 <= BRW_MAX_NUM_BUFFER_ENTRIES);
701
702 if (size_dwords > offset_dwords + num_vector_components) {
703 /* There is room for at least 1 transform feedback output in the buffer.
704 * Compute the number of additional transform feedback outputs the
705 * buffer has room for.
706 */
707 buffer_size_minus_1 =
708 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
709 } else {
710 /* There isn't even room for a single transform feedback output in the
711 * buffer. We can't configure the binding table entry to prevent output
712 * entirely; we'll have to rely on the geometry shader to detect
713 * overflow. But to minimize the damage in case of a bug, set up the
714 * binding table entry to just allow a single output.
715 */
716 buffer_size_minus_1 = 0;
717 }
718 width = buffer_size_minus_1 & 0x7f;
719 height = (buffer_size_minus_1 & 0xfff80) >> 7;
720 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
721
722 switch (num_vector_components) {
723 case 1:
724 surface_format = ISL_FORMAT_R32_FLOAT;
725 break;
726 case 2:
727 surface_format = ISL_FORMAT_R32G32_FLOAT;
728 break;
729 case 3:
730 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
731 break;
732 case 4:
733 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
734 break;
735 default:
736 unreachable("Invalid vector size for transform feedback output");
737 }
738
739 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
740 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
741 surface_format << BRW_SURFACE_FORMAT_SHIFT |
742 BRW_SURFACE_RC_READ_WRITE;
743 surf[1] = brw_state_reloc(&brw->batch,
744 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
745 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
746 height << BRW_SURFACE_HEIGHT_SHIFT);
747 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
748 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
749 surf[4] = 0;
750 surf[5] = 0;
751 }
752
753 /* Creates a new WM constant buffer reflecting the current fragment program's
754 * constants, if needed by the fragment program.
755 *
756 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
757 * state atom.
758 */
759 static void
760 brw_upload_wm_pull_constants(struct brw_context *brw)
761 {
762 struct brw_stage_state *stage_state = &brw->wm.base;
763 /* BRW_NEW_FRAGMENT_PROGRAM */
764 struct brw_program *fp =
765 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
766
767 /* BRW_NEW_FS_PROG_DATA */
768 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
769
770 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
771 /* _NEW_PROGRAM_CONSTANTS */
772 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
773 stage_state, prog_data);
774 }
775
776 const struct brw_tracked_state brw_wm_pull_constants = {
777 .dirty = {
778 .mesa = _NEW_PROGRAM_CONSTANTS,
779 .brw = BRW_NEW_BATCH |
780 BRW_NEW_FRAGMENT_PROGRAM |
781 BRW_NEW_FS_PROG_DATA,
782 },
783 .emit = brw_upload_wm_pull_constants,
784 };
785
786 /**
787 * Creates a null renderbuffer surface.
788 *
789 * This is used when the shader doesn't write to any color output. An FB
790 * write to target 0 will still be emitted, because that's how the thread is
791 * terminated (and computed depth is returned), so we need to have the
792 * hardware discard the target 0 color output..
793 */
794 static void
795 emit_null_surface_state(struct brw_context *brw,
796 const struct gl_framebuffer *fb,
797 uint32_t *out_offset)
798 {
799 const struct gen_device_info *devinfo = &brw->screen->devinfo;
800 uint32_t *surf = brw_state_batch(brw,
801 brw->isl_dev.ss.size,
802 brw->isl_dev.ss.align,
803 out_offset);
804
805 /* Use the fb dimensions or 1x1x1 */
806 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
807 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
808 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
809
810 if (devinfo->gen != 6 || samples <= 1) {
811 isl_null_fill_state(&brw->isl_dev, surf,
812 isl_extent3d(width, height, 1));
813 return;
814 }
815
816 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
817 * So work around this problem by rendering into dummy color buffer.
818 *
819 * To decrease the amount of memory needed by the workaround buffer, we
820 * set its pitch to 128 bytes (the width of a Y tile). This means that
821 * the amount of memory needed for the workaround buffer is
822 * (width_in_tiles + height_in_tiles - 1) tiles.
823 *
824 * Note that since the workaround buffer will be interpreted by the
825 * hardware as an interleaved multisampled buffer, we need to compute
826 * width_in_tiles and height_in_tiles by dividing the width and height
827 * by 16 rather than the normal Y-tile size of 32.
828 */
829 unsigned width_in_tiles = ALIGN(width, 16) / 16;
830 unsigned height_in_tiles = ALIGN(height, 16) / 16;
831 unsigned pitch_minus_1 = 127;
832 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
833 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
834 size_needed);
835
836 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
837 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
838 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
839 brw->wm.multisampled_null_render_target_bo,
840 0, RELOC_WRITE);
841
842 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
843 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
844
845 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
846 * Notes):
847 *
848 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
849 */
850 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
851 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
852 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
853 surf[5] = 0;
854 }
855
856 /**
857 * Sets up a surface state structure to point at the given region.
858 * While it is only used for the front/back buffer currently, it should be
859 * usable for further buffers when doing ARB_draw_buffer support.
860 */
861 static uint32_t
862 gen4_update_renderbuffer_surface(struct brw_context *brw,
863 struct gl_renderbuffer *rb,
864 unsigned unit,
865 uint32_t surf_index)
866 {
867 const struct gen_device_info *devinfo = &brw->screen->devinfo;
868 struct gl_context *ctx = &brw->ctx;
869 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
870 struct intel_mipmap_tree *mt = irb->mt;
871 uint32_t *surf;
872 uint32_t tile_x, tile_y;
873 enum isl_format format;
874 uint32_t offset;
875 /* _NEW_BUFFERS */
876 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
877 /* BRW_NEW_FS_PROG_DATA */
878
879 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
880 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
881
882 if (tile_x != 0 || tile_y != 0) {
883 /* Original gen4 hardware couldn't draw to a non-tile-aligned
884 * destination in a miptree unless you actually setup your renderbuffer
885 * as a miptree and used the fragile lod/array_index/etc. controls to
886 * select the image. So, instead, we just make a new single-level
887 * miptree and render into that.
888 */
889 intel_renderbuffer_move_to_temp(brw, irb, false);
890 assert(irb->align_wa_mt);
891 mt = irb->align_wa_mt;
892 }
893 }
894
895 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
896
897 format = brw->mesa_to_isl_render_format[rb_format];
898 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
899 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
900 __func__, _mesa_get_format_name(rb_format));
901 }
902
903 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
904 format << BRW_SURFACE_FORMAT_SHIFT);
905
906 /* reloc */
907 assert(mt->offset % mt->cpp == 0);
908 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
909 mt->offset +
910 intel_renderbuffer_get_tile_offsets(irb,
911 &tile_x,
912 &tile_y),
913 RELOC_WRITE);
914
915 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
916 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
917
918 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
919 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
920
921 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
922
923 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
924 /* Note that the low bits of these fields are missing, so
925 * there's the possibility of getting in trouble.
926 */
927 assert(tile_x % 4 == 0);
928 assert(tile_y % 2 == 0);
929 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
930 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
931 (mt->surf.image_alignment_el.height == 4 ?
932 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
933
934 if (devinfo->gen < 6) {
935 /* _NEW_COLOR */
936 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
937 (ctx->Color.BlendEnabled & (1 << unit)))
938 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
939
940 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
941 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
942 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
943 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
944 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
945 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
946
947 /* As mentioned above, disable writes to the alpha component when the
948 * renderbuffer is XRGB.
949 */
950 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
951 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
952 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
953 }
954 }
955
956 return offset;
957 }
958
959 static void
960 update_renderbuffer_surfaces(struct brw_context *brw)
961 {
962 const struct gen_device_info *devinfo = &brw->screen->devinfo;
963 const struct gl_context *ctx = &brw->ctx;
964
965 /* _NEW_BUFFERS | _NEW_COLOR */
966 const struct gl_framebuffer *fb = ctx->DrawBuffer;
967
968 /* Render targets always start at binding table index 0. */
969 const unsigned rt_start = 0;
970
971 uint32_t *surf_offsets = brw->wm.base.surf_offset;
972
973 /* Update surfaces for drawing buffers */
974 if (fb->_NumColorDrawBuffers >= 1) {
975 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
976 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
977
978 if (intel_renderbuffer(rb)) {
979 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
980 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
981 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
982 } else {
983 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
984 }
985 }
986 } else {
987 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
988 }
989
990 /* The PIPE_CONTROL command description says:
991 *
992 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
993 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
994 * Target Cache Flush by enabling this bit. When render target flush
995 * is set due to new association of BTI, PS Scoreboard Stall bit must
996 * be set in this packet."
997 */
998 if (devinfo->gen >= 11) {
999 brw_emit_pipe_control_flush(brw,
1000 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1001 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1002 }
1003
1004 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1005 }
1006
1007 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1008 .dirty = {
1009 .mesa = _NEW_BUFFERS |
1010 _NEW_COLOR,
1011 .brw = BRW_NEW_BATCH,
1012 },
1013 .emit = update_renderbuffer_surfaces,
1014 };
1015
1016 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1017 .dirty = {
1018 .mesa = _NEW_BUFFERS,
1019 .brw = BRW_NEW_BATCH |
1020 BRW_NEW_AUX_STATE,
1021 },
1022 .emit = update_renderbuffer_surfaces,
1023 };
1024
1025 static void
1026 update_renderbuffer_read_surfaces(struct brw_context *brw)
1027 {
1028 const struct gl_context *ctx = &brw->ctx;
1029
1030 /* BRW_NEW_FS_PROG_DATA */
1031 const struct brw_wm_prog_data *wm_prog_data =
1032 brw_wm_prog_data(brw->wm.base.prog_data);
1033
1034 if (wm_prog_data->has_render_target_reads &&
1035 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1036 /* _NEW_BUFFERS */
1037 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1038
1039 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1040 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1041 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1042 const unsigned surf_index =
1043 wm_prog_data->binding_table.render_target_read_start + i;
1044 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1045
1046 if (irb) {
1047 const enum isl_format format = brw->mesa_to_isl_render_format[
1048 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1049 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1050 format));
1051
1052 /* Override the target of the texture if the render buffer is a
1053 * single slice of a 3D texture (since the minimum array element
1054 * field of the surface state structure is ignored by the sampler
1055 * unit for 3D textures on some hardware), or if the render buffer
1056 * is a 1D array (since shaders always provide the array index
1057 * coordinate at the Z component to avoid state-dependent
1058 * recompiles when changing the texture target of the
1059 * framebuffer).
1060 */
1061 const GLenum target =
1062 (irb->mt->target == GL_TEXTURE_3D &&
1063 irb->layer_count == 1) ? GL_TEXTURE_2D :
1064 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1065 irb->mt->target;
1066
1067 const struct isl_view view = {
1068 .format = format,
1069 .base_level = irb->mt_level - irb->mt->first_level,
1070 .levels = 1,
1071 .base_array_layer = irb->mt_layer,
1072 .array_len = irb->layer_count,
1073 .swizzle = ISL_SWIZZLE_IDENTITY,
1074 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1075 };
1076
1077 enum isl_aux_usage aux_usage =
1078 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1079 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1080 aux_usage = ISL_AUX_USAGE_NONE;
1081
1082 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1083 surf_offset, surf_index,
1084 0);
1085
1086 } else {
1087 emit_null_surface_state(brw, fb, surf_offset);
1088 }
1089 }
1090
1091 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1092 }
1093 }
1094
1095 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1096 .dirty = {
1097 .mesa = _NEW_BUFFERS,
1098 .brw = BRW_NEW_BATCH |
1099 BRW_NEW_AUX_STATE |
1100 BRW_NEW_FS_PROG_DATA,
1101 },
1102 .emit = update_renderbuffer_read_surfaces,
1103 };
1104
1105 static bool
1106 is_depth_texture(struct intel_texture_object *iobj)
1107 {
1108 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1109 return base_format == GL_DEPTH_COMPONENT ||
1110 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1111 }
1112
1113 static void
1114 update_stage_texture_surfaces(struct brw_context *brw,
1115 const struct gl_program *prog,
1116 struct brw_stage_state *stage_state,
1117 bool for_gather, uint32_t plane)
1118 {
1119 if (!prog)
1120 return;
1121
1122 struct gl_context *ctx = &brw->ctx;
1123
1124 uint32_t *surf_offset = stage_state->surf_offset;
1125
1126 /* BRW_NEW_*_PROG_DATA */
1127 if (for_gather)
1128 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1129 else
1130 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1131
1132 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1133 for (unsigned s = 0; s < num_samplers; s++) {
1134 surf_offset[s] = 0;
1135
1136 if (prog->SamplersUsed & (1 << s)) {
1137 const unsigned unit = prog->SamplerUnits[s];
1138 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1139 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1140 struct intel_texture_object *iobj = intel_texture_object(obj);
1141
1142 /* _NEW_TEXTURE */
1143 if (!obj)
1144 continue;
1145
1146 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1147 /* A programming note for the sample_c message says:
1148 *
1149 * "The Surface Format of the associated surface must be
1150 * indicated as supporting shadow mapping as indicated in the
1151 * surface format table."
1152 *
1153 * Accessing non-depth textures via a sampler*Shadow type is
1154 * undefined. GLSL 4.50 page 162 says:
1155 *
1156 * "If a shadow texture call is made to a sampler that does not
1157 * represent a depth texture, then results are undefined."
1158 *
1159 * We give them a null surface (zeros) for undefined. We've seen
1160 * GPU hangs with color buffers and sample_c, so we try and avoid
1161 * those with this hack.
1162 */
1163 emit_null_surface_state(brw, NULL, surf_offset + s);
1164 } else {
1165 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1166 used_by_txf, plane);
1167 }
1168 }
1169 }
1170 }
1171
1172
1173 /**
1174 * Construct SURFACE_STATE objects for enabled textures.
1175 */
1176 static void
1177 brw_update_texture_surfaces(struct brw_context *brw)
1178 {
1179 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1180
1181 /* BRW_NEW_VERTEX_PROGRAM */
1182 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1183
1184 /* BRW_NEW_TESS_PROGRAMS */
1185 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1186 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1187
1188 /* BRW_NEW_GEOMETRY_PROGRAM */
1189 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1190
1191 /* BRW_NEW_FRAGMENT_PROGRAM */
1192 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1193
1194 /* _NEW_TEXTURE */
1195 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1196 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1197 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1198 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1199 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1200
1201 /* emit alternate set of surface state for gather. this
1202 * allows the surface format to be overriden for only the
1203 * gather4 messages. */
1204 if (devinfo->gen < 8) {
1205 if (vs && vs->info.uses_texture_gather)
1206 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1207 if (tcs && tcs->info.uses_texture_gather)
1208 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1209 if (tes && tes->info.uses_texture_gather)
1210 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1211 if (gs && gs->info.uses_texture_gather)
1212 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1213 if (fs && fs->info.uses_texture_gather)
1214 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1215 }
1216
1217 if (fs) {
1218 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1219 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1220 }
1221
1222 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1223 }
1224
1225 const struct brw_tracked_state brw_texture_surfaces = {
1226 .dirty = {
1227 .mesa = _NEW_TEXTURE,
1228 .brw = BRW_NEW_BATCH |
1229 BRW_NEW_AUX_STATE |
1230 BRW_NEW_FRAGMENT_PROGRAM |
1231 BRW_NEW_FS_PROG_DATA |
1232 BRW_NEW_GEOMETRY_PROGRAM |
1233 BRW_NEW_GS_PROG_DATA |
1234 BRW_NEW_TESS_PROGRAMS |
1235 BRW_NEW_TCS_PROG_DATA |
1236 BRW_NEW_TES_PROG_DATA |
1237 BRW_NEW_TEXTURE_BUFFER |
1238 BRW_NEW_VERTEX_PROGRAM |
1239 BRW_NEW_VS_PROG_DATA,
1240 },
1241 .emit = brw_update_texture_surfaces,
1242 };
1243
1244 static void
1245 brw_update_cs_texture_surfaces(struct brw_context *brw)
1246 {
1247 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1248
1249 /* BRW_NEW_COMPUTE_PROGRAM */
1250 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1251
1252 /* _NEW_TEXTURE */
1253 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1254
1255 /* emit alternate set of surface state for gather. this
1256 * allows the surface format to be overriden for only the
1257 * gather4 messages.
1258 */
1259 if (devinfo->gen < 8) {
1260 if (cs && cs->info.uses_texture_gather)
1261 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1262 }
1263
1264 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1265 }
1266
1267 const struct brw_tracked_state brw_cs_texture_surfaces = {
1268 .dirty = {
1269 .mesa = _NEW_TEXTURE,
1270 .brw = BRW_NEW_BATCH |
1271 BRW_NEW_COMPUTE_PROGRAM |
1272 BRW_NEW_AUX_STATE,
1273 },
1274 .emit = brw_update_cs_texture_surfaces,
1275 };
1276
1277 static void
1278 upload_buffer_surface(struct brw_context *brw,
1279 struct gl_buffer_binding *binding,
1280 uint32_t *out_offset,
1281 enum isl_format format,
1282 unsigned reloc_flags)
1283 {
1284 struct gl_context *ctx = &brw->ctx;
1285
1286 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1287 emit_null_surface_state(brw, NULL, out_offset);
1288 } else {
1289 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1290 if (!binding->AutomaticSize)
1291 size = MIN2(size, binding->Size);
1292
1293 struct intel_buffer_object *iobj =
1294 intel_buffer_object(binding->BufferObject);
1295 struct brw_bo *bo =
1296 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1297 (reloc_flags & RELOC_WRITE) != 0);
1298
1299 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1300 format, size, 1, reloc_flags);
1301 }
1302 }
1303
1304 void
1305 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1306 struct brw_stage_state *stage_state,
1307 struct brw_stage_prog_data *prog_data)
1308 {
1309 struct gl_context *ctx = &brw->ctx;
1310
1311 if (!prog || (prog->info.num_ubos == 0 &&
1312 prog->info.num_ssbos == 0 &&
1313 prog->info.num_abos == 0))
1314 return;
1315
1316 uint32_t *ubo_surf_offsets =
1317 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1318
1319 for (int i = 0; i < prog->info.num_ubos; i++) {
1320 struct gl_buffer_binding *binding =
1321 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1322 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1323 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1324 }
1325
1326 uint32_t *abo_surf_offsets =
1327 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1328 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1329
1330 for (int i = 0; i < prog->info.num_abos; i++) {
1331 struct gl_buffer_binding *binding =
1332 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1333 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1334 ISL_FORMAT_RAW, RELOC_WRITE);
1335 }
1336
1337 for (int i = 0; i < prog->info.num_ssbos; i++) {
1338 struct gl_buffer_binding *binding =
1339 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1340
1341 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1342 ISL_FORMAT_RAW, RELOC_WRITE);
1343 }
1344
1345 stage_state->push_constants_dirty = true;
1346 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1347 }
1348
1349 static void
1350 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1351 {
1352 struct gl_context *ctx = &brw->ctx;
1353 /* _NEW_PROGRAM */
1354 struct gl_program *prog = ctx->FragmentProgram._Current;
1355
1356 /* BRW_NEW_FS_PROG_DATA */
1357 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1358 }
1359
1360 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1361 .dirty = {
1362 .mesa = _NEW_PROGRAM,
1363 .brw = BRW_NEW_BATCH |
1364 BRW_NEW_FS_PROG_DATA |
1365 BRW_NEW_UNIFORM_BUFFER,
1366 },
1367 .emit = brw_upload_wm_ubo_surfaces,
1368 };
1369
1370 static void
1371 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1372 {
1373 struct gl_context *ctx = &brw->ctx;
1374 /* _NEW_PROGRAM */
1375 struct gl_program *prog =
1376 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1377
1378 /* BRW_NEW_CS_PROG_DATA */
1379 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1380 }
1381
1382 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1383 .dirty = {
1384 .mesa = _NEW_PROGRAM,
1385 .brw = BRW_NEW_BATCH |
1386 BRW_NEW_CS_PROG_DATA |
1387 BRW_NEW_UNIFORM_BUFFER,
1388 },
1389 .emit = brw_upload_cs_ubo_surfaces,
1390 };
1391
1392 static void
1393 brw_upload_cs_image_surfaces(struct brw_context *brw)
1394 {
1395 /* _NEW_PROGRAM */
1396 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1397
1398 if (cp) {
1399 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1400 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1401 brw->cs.base.prog_data);
1402 }
1403 }
1404
1405 const struct brw_tracked_state brw_cs_image_surfaces = {
1406 .dirty = {
1407 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1408 .brw = BRW_NEW_BATCH |
1409 BRW_NEW_CS_PROG_DATA |
1410 BRW_NEW_AUX_STATE |
1411 BRW_NEW_IMAGE_UNITS
1412 },
1413 .emit = brw_upload_cs_image_surfaces,
1414 };
1415
1416 static uint32_t
1417 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1418 {
1419 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1420 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1421 if (access == GL_WRITE_ONLY) {
1422 return hw_format;
1423 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1424 /* Typed surface reads support a very limited subset of the shader
1425 * image formats. Translate it into the closest format the
1426 * hardware supports.
1427 */
1428 return isl_lower_storage_image_format(devinfo, hw_format);
1429 } else {
1430 /* The hardware doesn't actually support a typed format that we can use
1431 * so we have to fall back to untyped read/write messages.
1432 */
1433 return ISL_FORMAT_RAW;
1434 }
1435 }
1436
1437 static void
1438 update_default_image_param(struct brw_context *brw,
1439 struct gl_image_unit *u,
1440 unsigned surface_idx,
1441 struct brw_image_param *param)
1442 {
1443 memset(param, 0, sizeof(*param));
1444 param->surface_idx = surface_idx;
1445 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1446 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1447 * detailed explanation of these parameters.
1448 */
1449 param->swizzling[0] = 0xff;
1450 param->swizzling[1] = 0xff;
1451 }
1452
1453 static void
1454 update_buffer_image_param(struct brw_context *brw,
1455 struct gl_image_unit *u,
1456 unsigned surface_idx,
1457 struct brw_image_param *param)
1458 {
1459 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1460 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1461 update_default_image_param(brw, u, surface_idx, param);
1462
1463 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1464 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1465 }
1466
1467 static unsigned
1468 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1469 unsigned level)
1470 {
1471 if (target == GL_TEXTURE_CUBE_MAP)
1472 return 6;
1473
1474 return target == GL_TEXTURE_3D ?
1475 minify(mt->surf.logical_level0_px.depth, level) :
1476 mt->surf.logical_level0_px.array_len;
1477 }
1478
1479 static void
1480 update_image_surface(struct brw_context *brw,
1481 struct gl_image_unit *u,
1482 GLenum access,
1483 unsigned surface_idx,
1484 uint32_t *surf_offset,
1485 struct brw_image_param *param)
1486 {
1487 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1488 struct gl_texture_object *obj = u->TexObj;
1489 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1490
1491 if (obj->Target == GL_TEXTURE_BUFFER) {
1492 struct intel_buffer_object *intel_obj =
1493 intel_buffer_object(obj->BufferObject);
1494 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1495 _mesa_get_format_bytes(u->_ActualFormat));
1496
1497 brw_emit_buffer_surface_state(
1498 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1499 format, intel_obj->Base.Size, texel_size,
1500 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1501
1502 update_buffer_image_param(brw, u, surface_idx, param);
1503
1504 } else {
1505 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1506 struct intel_mipmap_tree *mt = intel_obj->mt;
1507 const unsigned num_layers = u->Layered ?
1508 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1509
1510 struct isl_view view = {
1511 .format = format,
1512 .base_level = obj->MinLevel + u->Level,
1513 .levels = 1,
1514 .base_array_layer = obj->MinLayer + u->_Layer,
1515 .array_len = num_layers,
1516 .swizzle = ISL_SWIZZLE_IDENTITY,
1517 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1518 };
1519
1520 if (format == ISL_FORMAT_RAW) {
1521 brw_emit_buffer_surface_state(
1522 brw, surf_offset, mt->bo, mt->offset,
1523 format, mt->bo->size - mt->offset, 1 /* pitch */,
1524 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1525
1526 } else {
1527 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1528 assert(!intel_miptree_has_color_unresolved(mt,
1529 view.base_level, 1,
1530 view.base_array_layer,
1531 view.array_len));
1532 brw_emit_surface_state(brw, mt, mt->target, view,
1533 ISL_AUX_USAGE_NONE,
1534 surf_offset, surf_index,
1535 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1536 }
1537
1538 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1539 param->surface_idx = surface_idx;
1540 }
1541
1542 } else {
1543 emit_null_surface_state(brw, NULL, surf_offset);
1544 update_default_image_param(brw, u, surface_idx, param);
1545 }
1546 }
1547
1548 void
1549 brw_upload_image_surfaces(struct brw_context *brw,
1550 const struct gl_program *prog,
1551 struct brw_stage_state *stage_state,
1552 struct brw_stage_prog_data *prog_data)
1553 {
1554 assert(prog);
1555 struct gl_context *ctx = &brw->ctx;
1556
1557 if (prog->info.num_images) {
1558 for (unsigned i = 0; i < prog->info.num_images; i++) {
1559 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1560 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1561
1562 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1563 surf_idx,
1564 &stage_state->surf_offset[surf_idx],
1565 &stage_state->image_param[i]);
1566 }
1567
1568 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1569 /* This may have changed the image metadata dependent on the context
1570 * image unit state and passed to the program as uniforms, make sure
1571 * that push and pull constants are reuploaded.
1572 */
1573 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1574 }
1575 }
1576
1577 static void
1578 brw_upload_wm_image_surfaces(struct brw_context *brw)
1579 {
1580 /* BRW_NEW_FRAGMENT_PROGRAM */
1581 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1582
1583 if (wm) {
1584 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1585 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1586 brw->wm.base.prog_data);
1587 }
1588 }
1589
1590 const struct brw_tracked_state brw_wm_image_surfaces = {
1591 .dirty = {
1592 .mesa = _NEW_TEXTURE,
1593 .brw = BRW_NEW_BATCH |
1594 BRW_NEW_AUX_STATE |
1595 BRW_NEW_FRAGMENT_PROGRAM |
1596 BRW_NEW_FS_PROG_DATA |
1597 BRW_NEW_IMAGE_UNITS
1598 },
1599 .emit = brw_upload_wm_image_surfaces,
1600 };
1601
1602 static void
1603 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1604 {
1605 struct gl_context *ctx = &brw->ctx;
1606 /* _NEW_PROGRAM */
1607 struct gl_program *prog =
1608 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1609 /* BRW_NEW_CS_PROG_DATA */
1610 const struct brw_cs_prog_data *cs_prog_data =
1611 brw_cs_prog_data(brw->cs.base.prog_data);
1612
1613 if (prog && cs_prog_data->uses_num_work_groups) {
1614 const unsigned surf_idx =
1615 cs_prog_data->binding_table.work_groups_start;
1616 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1617 struct brw_bo *bo;
1618 uint32_t bo_offset;
1619
1620 if (brw->compute.num_work_groups_bo == NULL) {
1621 bo = NULL;
1622 intel_upload_data(brw,
1623 (void *)brw->compute.num_work_groups,
1624 3 * sizeof(GLuint),
1625 sizeof(GLuint),
1626 &bo,
1627 &bo_offset);
1628 } else {
1629 bo = brw->compute.num_work_groups_bo;
1630 bo_offset = brw->compute.num_work_groups_offset;
1631 }
1632
1633 brw_emit_buffer_surface_state(brw, surf_offset,
1634 bo, bo_offset,
1635 ISL_FORMAT_RAW,
1636 3 * sizeof(GLuint), 1,
1637 RELOC_WRITE);
1638 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1639 }
1640 }
1641
1642 const struct brw_tracked_state brw_cs_work_groups_surface = {
1643 .dirty = {
1644 .brw = BRW_NEW_CS_PROG_DATA |
1645 BRW_NEW_CS_WORK_GROUPS
1646 },
1647 .emit = brw_upload_cs_work_groups_surface,
1648 };