i965/wm_surface_state: Use the clear address if clear_bo is non-NULL
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint32_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_bo = mt->aux_buf->clear_color_bo;
170 clear_offset = mt->aux_buf->clear_color_offset;
171 clear_color = mt->fast_clear_color;
172 }
173
174 void *state = brw_state_batch(brw,
175 brw->isl_dev.ss.size,
176 brw->isl_dev.ss.align,
177 surf_offset);
178
179 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
180 .address = brw_state_reloc(&brw->batch,
181 *surf_offset + brw->isl_dev.ss.addr_offset,
182 mt->bo, offset, reloc_flags),
183 .aux_surf = aux_surf, .aux_usage = aux_usage,
184 .aux_address = aux_offset,
185 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
186 .clear_color = clear_color,
187 .use_clear_address = clear_bo != NULL,
188 .clear_address = clear_offset,
189 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
190 if (aux_surf) {
191 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
192 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
193 * contain other control information. Since buffer addresses are always
194 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
195 * an ordinary reloc to do the necessary address translation.
196 *
197 * FIXME: move to the point of assignment.
198 */
199 assert((aux_offset & 0xfff) == 0);
200
201 if (devinfo->gen >= 8) {
202 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
203 *aux_addr = brw_state_reloc(&brw->batch,
204 *surf_offset +
205 brw->isl_dev.ss.aux_addr_offset,
206 aux_bo, *aux_addr,
207 reloc_flags);
208 } else {
209 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
210 *aux_addr = brw_state_reloc(&brw->batch,
211 *surf_offset +
212 brw->isl_dev.ss.aux_addr_offset,
213 aux_bo, *aux_addr,
214 reloc_flags);
215
216 }
217 }
218
219 if (clear_bo != NULL) {
220 /* Make sure the offset is aligned with a cacheline. */
221 assert((clear_offset & 0x3f) == 0);
222 uint32_t *clear_address =
223 state + brw->isl_dev.ss.clear_color_state_offset;
224 *clear_address = brw_state_reloc(&brw->batch,
225 *surf_offset +
226 brw->isl_dev.ss.clear_color_state_offset,
227 clear_bo, *clear_address, reloc_flags);
228 }
229 }
230
231 static uint32_t
232 gen6_update_renderbuffer_surface(struct brw_context *brw,
233 struct gl_renderbuffer *rb,
234 unsigned unit,
235 uint32_t surf_index)
236 {
237 struct gl_context *ctx = &brw->ctx;
238 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
239 struct intel_mipmap_tree *mt = irb->mt;
240
241 assert(brw_render_target_supported(brw, rb));
242
243 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
244 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
245 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
246 __func__, _mesa_get_format_name(rb_format));
247 }
248 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
249
250 struct isl_view view = {
251 .format = isl_format,
252 .base_level = irb->mt_level - irb->mt->first_level,
253 .levels = 1,
254 .base_array_layer = irb->mt_layer,
255 .array_len = MAX2(irb->layer_count, 1),
256 .swizzle = ISL_SWIZZLE_IDENTITY,
257 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
258 };
259
260 uint32_t offset;
261 brw_emit_surface_state(brw, mt, mt->target, view,
262 brw->draw_aux_usage[unit],
263 &offset, surf_index,
264 RELOC_WRITE);
265 return offset;
266 }
267
268 GLuint
269 translate_tex_target(GLenum target)
270 {
271 switch (target) {
272 case GL_TEXTURE_1D:
273 case GL_TEXTURE_1D_ARRAY_EXT:
274 return BRW_SURFACE_1D;
275
276 case GL_TEXTURE_RECTANGLE_NV:
277 return BRW_SURFACE_2D;
278
279 case GL_TEXTURE_2D:
280 case GL_TEXTURE_2D_ARRAY_EXT:
281 case GL_TEXTURE_EXTERNAL_OES:
282 case GL_TEXTURE_2D_MULTISAMPLE:
283 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
284 return BRW_SURFACE_2D;
285
286 case GL_TEXTURE_3D:
287 return BRW_SURFACE_3D;
288
289 case GL_TEXTURE_CUBE_MAP:
290 case GL_TEXTURE_CUBE_MAP_ARRAY:
291 return BRW_SURFACE_CUBE;
292
293 default:
294 unreachable("not reached");
295 }
296 }
297
298 uint32_t
299 brw_get_surface_tiling_bits(enum isl_tiling tiling)
300 {
301 switch (tiling) {
302 case ISL_TILING_X:
303 return BRW_SURFACE_TILED;
304 case ISL_TILING_Y0:
305 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
306 default:
307 return 0;
308 }
309 }
310
311
312 uint32_t
313 brw_get_surface_num_multisamples(unsigned num_samples)
314 {
315 if (num_samples > 1)
316 return BRW_SURFACE_MULTISAMPLECOUNT_4;
317 else
318 return BRW_SURFACE_MULTISAMPLECOUNT_1;
319 }
320
321 /**
322 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
323 * swizzling.
324 */
325 int
326 brw_get_texture_swizzle(const struct gl_context *ctx,
327 const struct gl_texture_object *t)
328 {
329 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
330
331 int swizzles[SWIZZLE_NIL + 1] = {
332 SWIZZLE_X,
333 SWIZZLE_Y,
334 SWIZZLE_Z,
335 SWIZZLE_W,
336 SWIZZLE_ZERO,
337 SWIZZLE_ONE,
338 SWIZZLE_NIL
339 };
340
341 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
342 img->_BaseFormat == GL_DEPTH_STENCIL) {
343 GLenum depth_mode = t->DepthMode;
344
345 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
346 * with depth component data specified with a sized internal format.
347 * Otherwise, it's left at the old default, GL_LUMINANCE.
348 */
349 if (_mesa_is_gles3(ctx) &&
350 img->InternalFormat != GL_DEPTH_COMPONENT &&
351 img->InternalFormat != GL_DEPTH_STENCIL) {
352 depth_mode = GL_RED;
353 }
354
355 switch (depth_mode) {
356 case GL_ALPHA:
357 swizzles[0] = SWIZZLE_ZERO;
358 swizzles[1] = SWIZZLE_ZERO;
359 swizzles[2] = SWIZZLE_ZERO;
360 swizzles[3] = SWIZZLE_X;
361 break;
362 case GL_LUMINANCE:
363 swizzles[0] = SWIZZLE_X;
364 swizzles[1] = SWIZZLE_X;
365 swizzles[2] = SWIZZLE_X;
366 swizzles[3] = SWIZZLE_ONE;
367 break;
368 case GL_INTENSITY:
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_X;
373 break;
374 case GL_RED:
375 swizzles[0] = SWIZZLE_X;
376 swizzles[1] = SWIZZLE_ZERO;
377 swizzles[2] = SWIZZLE_ZERO;
378 swizzles[3] = SWIZZLE_ONE;
379 break;
380 }
381 }
382
383 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
384
385 /* If the texture's format is alpha-only, force R, G, and B to
386 * 0.0. Similarly, if the texture's format has no alpha channel,
387 * force the alpha value read to 1.0. This allows for the
388 * implementation to use an RGBA texture for any of these formats
389 * without leaking any unexpected values.
390 */
391 switch (img->_BaseFormat) {
392 case GL_ALPHA:
393 swizzles[0] = SWIZZLE_ZERO;
394 swizzles[1] = SWIZZLE_ZERO;
395 swizzles[2] = SWIZZLE_ZERO;
396 break;
397 case GL_LUMINANCE:
398 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
399 swizzles[0] = SWIZZLE_X;
400 swizzles[1] = SWIZZLE_X;
401 swizzles[2] = SWIZZLE_X;
402 swizzles[3] = SWIZZLE_ONE;
403 }
404 break;
405 case GL_LUMINANCE_ALPHA:
406 if (datatype == GL_SIGNED_NORMALIZED) {
407 swizzles[0] = SWIZZLE_X;
408 swizzles[1] = SWIZZLE_X;
409 swizzles[2] = SWIZZLE_X;
410 swizzles[3] = SWIZZLE_W;
411 }
412 break;
413 case GL_INTENSITY:
414 if (datatype == GL_SIGNED_NORMALIZED) {
415 swizzles[0] = SWIZZLE_X;
416 swizzles[1] = SWIZZLE_X;
417 swizzles[2] = SWIZZLE_X;
418 swizzles[3] = SWIZZLE_X;
419 }
420 break;
421 case GL_RED:
422 case GL_RG:
423 case GL_RGB:
424 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
425 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
426 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
427 swizzles[3] = SWIZZLE_ONE;
428 break;
429 }
430
431 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
432 swizzles[GET_SWZ(t->_Swizzle, 1)],
433 swizzles[GET_SWZ(t->_Swizzle, 2)],
434 swizzles[GET_SWZ(t->_Swizzle, 3)]);
435 }
436
437 /**
438 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
439 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
440 *
441 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
442 * 0 1 2 3 4 5
443 * 4 5 6 7 0 1
444 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
445 *
446 * which is simply adding 4 then modding by 8 (or anding with 7).
447 *
448 * We then may need to apply workarounds for textureGather hardware bugs.
449 */
450 static unsigned
451 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
452 {
453 unsigned scs = (swizzle + 4) & 7;
454
455 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
456 }
457
458 static void brw_update_texture_surface(struct gl_context *ctx,
459 unsigned unit,
460 uint32_t *surf_offset,
461 bool for_gather,
462 bool for_txf,
463 uint32_t plane)
464 {
465 struct brw_context *brw = brw_context(ctx);
466 const struct gen_device_info *devinfo = &brw->screen->devinfo;
467 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
468
469 if (obj->Target == GL_TEXTURE_BUFFER) {
470 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
471
472 } else {
473 struct intel_texture_object *intel_obj = intel_texture_object(obj);
474 struct intel_mipmap_tree *mt = intel_obj->mt;
475
476 if (plane > 0) {
477 if (mt->plane[plane - 1] == NULL)
478 return;
479 mt = mt->plane[plane - 1];
480 }
481
482 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
483 /* If this is a view with restricted NumLayers, then our effective depth
484 * is not just the miptree depth.
485 */
486 unsigned view_num_layers;
487 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
488 view_num_layers = obj->NumLayers;
489 } else {
490 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
491 mt->surf.logical_level0_px.depth :
492 mt->surf.logical_level0_px.array_len;
493 }
494
495 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
496 * texturing functions that return a float, as our code generation always
497 * selects the .x channel (which would always be 0).
498 */
499 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
500 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
501 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
502 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
503 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
504 brw_get_texture_swizzle(&brw->ctx, obj));
505
506 mesa_format mesa_fmt;
507 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
508 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
509 /* The format from intel_obj may be a combined depth stencil format
510 * when we just want depth. Pull it from the miptree instead. This
511 * is safe because texture views aren't allowed on depth/stencil.
512 */
513 mesa_fmt = mt->format;
514 } else if (mt->etc_format != MESA_FORMAT_NONE) {
515 mesa_fmt = mt->format;
516 } else if (plane > 0) {
517 mesa_fmt = mt->format;
518 } else {
519 mesa_fmt = intel_obj->_Format;
520 }
521 enum isl_format format = translate_tex_format(brw, mesa_fmt,
522 for_txf ? GL_DECODE_EXT :
523 sampler->sRGBDecode);
524
525 /* Implement gen6 and gen7 gather work-around */
526 bool need_green_to_blue = false;
527 if (for_gather) {
528 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
529 format == ISL_FORMAT_R32G32_SINT ||
530 format == ISL_FORMAT_R32G32_UINT)) {
531 format = ISL_FORMAT_R32G32_FLOAT_LD;
532 need_green_to_blue = devinfo->is_haswell;
533 } else if (devinfo->gen == 6) {
534 /* Sandybridge's gather4 message is broken for integer formats.
535 * To work around this, we pretend the surface is UNORM for
536 * 8 or 16-bit formats, and emit shader instructions to recover
537 * the real INT/UINT value. For 32-bit formats, we pretend
538 * the surface is FLOAT, and simply reinterpret the resulting
539 * bits.
540 */
541 switch (format) {
542 case ISL_FORMAT_R8_SINT:
543 case ISL_FORMAT_R8_UINT:
544 format = ISL_FORMAT_R8_UNORM;
545 break;
546
547 case ISL_FORMAT_R16_SINT:
548 case ISL_FORMAT_R16_UINT:
549 format = ISL_FORMAT_R16_UNORM;
550 break;
551
552 case ISL_FORMAT_R32_SINT:
553 case ISL_FORMAT_R32_UINT:
554 format = ISL_FORMAT_R32_FLOAT;
555 break;
556
557 default:
558 break;
559 }
560 }
561 }
562
563 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
564 if (devinfo->gen <= 7) {
565 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
566 mt = mt->r8stencil_mt;
567 } else {
568 mt = mt->stencil_mt;
569 }
570 format = ISL_FORMAT_R8_UINT;
571 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
572 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
573 mt = mt->r8stencil_mt;
574 format = ISL_FORMAT_R8_UINT;
575 }
576
577 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
578
579 struct isl_view view = {
580 .format = format,
581 .base_level = obj->MinLevel + obj->BaseLevel,
582 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
583 .base_array_layer = obj->MinLayer,
584 .array_len = view_num_layers,
585 .swizzle = {
586 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
587 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
588 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
589 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
590 },
591 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
592 };
593
594 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
595 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
596 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
597
598 enum isl_aux_usage aux_usage =
599 intel_miptree_texture_aux_usage(brw, mt, format);
600
601 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
602 surf_offset, surf_index,
603 0);
604 }
605 }
606
607 void
608 brw_emit_buffer_surface_state(struct brw_context *brw,
609 uint32_t *out_offset,
610 struct brw_bo *bo,
611 unsigned buffer_offset,
612 unsigned surface_format,
613 unsigned buffer_size,
614 unsigned pitch,
615 unsigned reloc_flags)
616 {
617 const struct gen_device_info *devinfo = &brw->screen->devinfo;
618 uint32_t *dw = brw_state_batch(brw,
619 brw->isl_dev.ss.size,
620 brw->isl_dev.ss.align,
621 out_offset);
622
623 isl_buffer_fill_state(&brw->isl_dev, dw,
624 .address = !bo ? buffer_offset :
625 brw_state_reloc(&brw->batch,
626 *out_offset + brw->isl_dev.ss.addr_offset,
627 bo, buffer_offset,
628 reloc_flags),
629 .size = buffer_size,
630 .format = surface_format,
631 .stride = pitch,
632 .mocs = brw_get_bo_mocs(devinfo, bo));
633 }
634
635 void
636 brw_update_buffer_texture_surface(struct gl_context *ctx,
637 unsigned unit,
638 uint32_t *surf_offset)
639 {
640 struct brw_context *brw = brw_context(ctx);
641 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
642 struct intel_buffer_object *intel_obj =
643 intel_buffer_object(tObj->BufferObject);
644 uint32_t size = tObj->BufferSize;
645 struct brw_bo *bo = NULL;
646 mesa_format format = tObj->_BufferObjectFormat;
647 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
648 int texel_size = _mesa_get_format_bytes(format);
649
650 if (intel_obj) {
651 size = MIN2(size, intel_obj->Base.Size);
652 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
653 false);
654 }
655
656 /* The ARB_texture_buffer_specification says:
657 *
658 * "The number of texels in the buffer texture's texel array is given by
659 *
660 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
661 *
662 * where <buffer_size> is the size of the buffer object, in basic
663 * machine units and <components> and <base_type> are the element count
664 * and base data type for elements, as specified in Table X.1. The
665 * number of texels in the texel array is then clamped to the
666 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
667 *
668 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
669 * so that when ISL divides by stride to obtain the number of texels, that
670 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
671 */
672 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
673
674 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
675 _mesa_problem(NULL, "bad format %s for texture buffer\n",
676 _mesa_get_format_name(format));
677 }
678
679 brw_emit_buffer_surface_state(brw, surf_offset, bo,
680 tObj->BufferOffset,
681 isl_format,
682 size,
683 texel_size,
684 0);
685 }
686
687 /**
688 * Set up a binding table entry for use by stream output logic (transform
689 * feedback).
690 *
691 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
692 */
693 void
694 brw_update_sol_surface(struct brw_context *brw,
695 struct gl_buffer_object *buffer_obj,
696 uint32_t *out_offset, unsigned num_vector_components,
697 unsigned stride_dwords, unsigned offset_dwords)
698 {
699 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
700 uint32_t offset_bytes = 4 * offset_dwords;
701 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
702 offset_bytes,
703 buffer_obj->Size - offset_bytes,
704 true);
705 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
706 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
707 size_t size_dwords = buffer_obj->Size / 4;
708 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
709
710 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
711 * too big to map using a single binding table entry?
712 */
713 assert((size_dwords - offset_dwords) / stride_dwords
714 <= BRW_MAX_NUM_BUFFER_ENTRIES);
715
716 if (size_dwords > offset_dwords + num_vector_components) {
717 /* There is room for at least 1 transform feedback output in the buffer.
718 * Compute the number of additional transform feedback outputs the
719 * buffer has room for.
720 */
721 buffer_size_minus_1 =
722 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
723 } else {
724 /* There isn't even room for a single transform feedback output in the
725 * buffer. We can't configure the binding table entry to prevent output
726 * entirely; we'll have to rely on the geometry shader to detect
727 * overflow. But to minimize the damage in case of a bug, set up the
728 * binding table entry to just allow a single output.
729 */
730 buffer_size_minus_1 = 0;
731 }
732 width = buffer_size_minus_1 & 0x7f;
733 height = (buffer_size_minus_1 & 0xfff80) >> 7;
734 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
735
736 switch (num_vector_components) {
737 case 1:
738 surface_format = ISL_FORMAT_R32_FLOAT;
739 break;
740 case 2:
741 surface_format = ISL_FORMAT_R32G32_FLOAT;
742 break;
743 case 3:
744 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
745 break;
746 case 4:
747 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
748 break;
749 default:
750 unreachable("Invalid vector size for transform feedback output");
751 }
752
753 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
754 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
755 surface_format << BRW_SURFACE_FORMAT_SHIFT |
756 BRW_SURFACE_RC_READ_WRITE;
757 surf[1] = brw_state_reloc(&brw->batch,
758 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
759 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
760 height << BRW_SURFACE_HEIGHT_SHIFT);
761 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
762 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
763 surf[4] = 0;
764 surf[5] = 0;
765 }
766
767 /* Creates a new WM constant buffer reflecting the current fragment program's
768 * constants, if needed by the fragment program.
769 *
770 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
771 * state atom.
772 */
773 static void
774 brw_upload_wm_pull_constants(struct brw_context *brw)
775 {
776 struct brw_stage_state *stage_state = &brw->wm.base;
777 /* BRW_NEW_FRAGMENT_PROGRAM */
778 struct brw_program *fp =
779 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
780
781 /* BRW_NEW_FS_PROG_DATA */
782 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
783
784 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
785 /* _NEW_PROGRAM_CONSTANTS */
786 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
787 stage_state, prog_data);
788 }
789
790 const struct brw_tracked_state brw_wm_pull_constants = {
791 .dirty = {
792 .mesa = _NEW_PROGRAM_CONSTANTS,
793 .brw = BRW_NEW_BATCH |
794 BRW_NEW_FRAGMENT_PROGRAM |
795 BRW_NEW_FS_PROG_DATA,
796 },
797 .emit = brw_upload_wm_pull_constants,
798 };
799
800 /**
801 * Creates a null renderbuffer surface.
802 *
803 * This is used when the shader doesn't write to any color output. An FB
804 * write to target 0 will still be emitted, because that's how the thread is
805 * terminated (and computed depth is returned), so we need to have the
806 * hardware discard the target 0 color output..
807 */
808 static void
809 emit_null_surface_state(struct brw_context *brw,
810 const struct gl_framebuffer *fb,
811 uint32_t *out_offset)
812 {
813 const struct gen_device_info *devinfo = &brw->screen->devinfo;
814 uint32_t *surf = brw_state_batch(brw,
815 brw->isl_dev.ss.size,
816 brw->isl_dev.ss.align,
817 out_offset);
818
819 /* Use the fb dimensions or 1x1x1 */
820 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
821 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
822 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
823
824 if (devinfo->gen != 6 || samples <= 1) {
825 isl_null_fill_state(&brw->isl_dev, surf,
826 isl_extent3d(width, height, 1));
827 return;
828 }
829
830 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
831 * So work around this problem by rendering into dummy color buffer.
832 *
833 * To decrease the amount of memory needed by the workaround buffer, we
834 * set its pitch to 128 bytes (the width of a Y tile). This means that
835 * the amount of memory needed for the workaround buffer is
836 * (width_in_tiles + height_in_tiles - 1) tiles.
837 *
838 * Note that since the workaround buffer will be interpreted by the
839 * hardware as an interleaved multisampled buffer, we need to compute
840 * width_in_tiles and height_in_tiles by dividing the width and height
841 * by 16 rather than the normal Y-tile size of 32.
842 */
843 unsigned width_in_tiles = ALIGN(width, 16) / 16;
844 unsigned height_in_tiles = ALIGN(height, 16) / 16;
845 unsigned pitch_minus_1 = 127;
846 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
847 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
848 size_needed);
849
850 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
851 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
852 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
853 brw->wm.multisampled_null_render_target_bo,
854 0, RELOC_WRITE);
855
856 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
857 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
858
859 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
860 * Notes):
861 *
862 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
863 */
864 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
865 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
866 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
867 surf[5] = 0;
868 }
869
870 /**
871 * Sets up a surface state structure to point at the given region.
872 * While it is only used for the front/back buffer currently, it should be
873 * usable for further buffers when doing ARB_draw_buffer support.
874 */
875 static uint32_t
876 gen4_update_renderbuffer_surface(struct brw_context *brw,
877 struct gl_renderbuffer *rb,
878 unsigned unit,
879 uint32_t surf_index)
880 {
881 const struct gen_device_info *devinfo = &brw->screen->devinfo;
882 struct gl_context *ctx = &brw->ctx;
883 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
884 struct intel_mipmap_tree *mt = irb->mt;
885 uint32_t *surf;
886 uint32_t tile_x, tile_y;
887 enum isl_format format;
888 uint32_t offset;
889 /* _NEW_BUFFERS */
890 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
891 /* BRW_NEW_FS_PROG_DATA */
892
893 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
894 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
895
896 if (tile_x != 0 || tile_y != 0) {
897 /* Original gen4 hardware couldn't draw to a non-tile-aligned
898 * destination in a miptree unless you actually setup your renderbuffer
899 * as a miptree and used the fragile lod/array_index/etc. controls to
900 * select the image. So, instead, we just make a new single-level
901 * miptree and render into that.
902 */
903 intel_renderbuffer_move_to_temp(brw, irb, false);
904 assert(irb->align_wa_mt);
905 mt = irb->align_wa_mt;
906 }
907 }
908
909 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
910
911 format = brw->mesa_to_isl_render_format[rb_format];
912 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
913 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
914 __func__, _mesa_get_format_name(rb_format));
915 }
916
917 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
918 format << BRW_SURFACE_FORMAT_SHIFT);
919
920 /* reloc */
921 assert(mt->offset % mt->cpp == 0);
922 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
923 mt->offset +
924 intel_renderbuffer_get_tile_offsets(irb,
925 &tile_x,
926 &tile_y),
927 RELOC_WRITE);
928
929 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
930 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
931
932 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
933 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
934
935 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
936
937 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
938 /* Note that the low bits of these fields are missing, so
939 * there's the possibility of getting in trouble.
940 */
941 assert(tile_x % 4 == 0);
942 assert(tile_y % 2 == 0);
943 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
944 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
945 (mt->surf.image_alignment_el.height == 4 ?
946 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
947
948 if (devinfo->gen < 6) {
949 /* _NEW_COLOR */
950 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
951 (ctx->Color.BlendEnabled & (1 << unit)))
952 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
953
954 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
955 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
956 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
957 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
958 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
959 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
960
961 /* As mentioned above, disable writes to the alpha component when the
962 * renderbuffer is XRGB.
963 */
964 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
965 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
966 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
967 }
968 }
969
970 return offset;
971 }
972
973 static void
974 update_renderbuffer_surfaces(struct brw_context *brw)
975 {
976 const struct gen_device_info *devinfo = &brw->screen->devinfo;
977 const struct gl_context *ctx = &brw->ctx;
978
979 /* _NEW_BUFFERS | _NEW_COLOR */
980 const struct gl_framebuffer *fb = ctx->DrawBuffer;
981
982 /* Render targets always start at binding table index 0. */
983 const unsigned rt_start = 0;
984
985 uint32_t *surf_offsets = brw->wm.base.surf_offset;
986
987 /* Update surfaces for drawing buffers */
988 if (fb->_NumColorDrawBuffers >= 1) {
989 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
990 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
991
992 if (intel_renderbuffer(rb)) {
993 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
994 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
995 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
996 } else {
997 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
998 }
999 }
1000 } else {
1001 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1002 }
1003
1004 /* The PIPE_CONTROL command description says:
1005 *
1006 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1007 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1008 * Target Cache Flush by enabling this bit. When render target flush
1009 * is set due to new association of BTI, PS Scoreboard Stall bit must
1010 * be set in this packet."
1011 */
1012 if (devinfo->gen >= 11) {
1013 brw_emit_pipe_control_flush(brw,
1014 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1015 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1016 }
1017
1018 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1019 }
1020
1021 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1022 .dirty = {
1023 .mesa = _NEW_BUFFERS |
1024 _NEW_COLOR,
1025 .brw = BRW_NEW_BATCH,
1026 },
1027 .emit = update_renderbuffer_surfaces,
1028 };
1029
1030 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1031 .dirty = {
1032 .mesa = _NEW_BUFFERS,
1033 .brw = BRW_NEW_BATCH |
1034 BRW_NEW_AUX_STATE,
1035 },
1036 .emit = update_renderbuffer_surfaces,
1037 };
1038
1039 static void
1040 update_renderbuffer_read_surfaces(struct brw_context *brw)
1041 {
1042 const struct gl_context *ctx = &brw->ctx;
1043
1044 /* BRW_NEW_FS_PROG_DATA */
1045 const struct brw_wm_prog_data *wm_prog_data =
1046 brw_wm_prog_data(brw->wm.base.prog_data);
1047
1048 if (wm_prog_data->has_render_target_reads &&
1049 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1050 /* _NEW_BUFFERS */
1051 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1052
1053 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1054 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1055 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1056 const unsigned surf_index =
1057 wm_prog_data->binding_table.render_target_read_start + i;
1058 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1059
1060 if (irb) {
1061 const enum isl_format format = brw->mesa_to_isl_render_format[
1062 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1063 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1064 format));
1065
1066 /* Override the target of the texture if the render buffer is a
1067 * single slice of a 3D texture (since the minimum array element
1068 * field of the surface state structure is ignored by the sampler
1069 * unit for 3D textures on some hardware), or if the render buffer
1070 * is a 1D array (since shaders always provide the array index
1071 * coordinate at the Z component to avoid state-dependent
1072 * recompiles when changing the texture target of the
1073 * framebuffer).
1074 */
1075 const GLenum target =
1076 (irb->mt->target == GL_TEXTURE_3D &&
1077 irb->layer_count == 1) ? GL_TEXTURE_2D :
1078 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1079 irb->mt->target;
1080
1081 const struct isl_view view = {
1082 .format = format,
1083 .base_level = irb->mt_level - irb->mt->first_level,
1084 .levels = 1,
1085 .base_array_layer = irb->mt_layer,
1086 .array_len = irb->layer_count,
1087 .swizzle = ISL_SWIZZLE_IDENTITY,
1088 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1089 };
1090
1091 enum isl_aux_usage aux_usage =
1092 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1093 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1094 aux_usage = ISL_AUX_USAGE_NONE;
1095
1096 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1097 surf_offset, surf_index,
1098 0);
1099
1100 } else {
1101 emit_null_surface_state(brw, fb, surf_offset);
1102 }
1103 }
1104
1105 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1106 }
1107 }
1108
1109 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1110 .dirty = {
1111 .mesa = _NEW_BUFFERS,
1112 .brw = BRW_NEW_BATCH |
1113 BRW_NEW_AUX_STATE |
1114 BRW_NEW_FS_PROG_DATA,
1115 },
1116 .emit = update_renderbuffer_read_surfaces,
1117 };
1118
1119 static bool
1120 is_depth_texture(struct intel_texture_object *iobj)
1121 {
1122 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1123 return base_format == GL_DEPTH_COMPONENT ||
1124 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1125 }
1126
1127 static void
1128 update_stage_texture_surfaces(struct brw_context *brw,
1129 const struct gl_program *prog,
1130 struct brw_stage_state *stage_state,
1131 bool for_gather, uint32_t plane)
1132 {
1133 if (!prog)
1134 return;
1135
1136 struct gl_context *ctx = &brw->ctx;
1137
1138 uint32_t *surf_offset = stage_state->surf_offset;
1139
1140 /* BRW_NEW_*_PROG_DATA */
1141 if (for_gather)
1142 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1143 else
1144 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1145
1146 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1147 for (unsigned s = 0; s < num_samplers; s++) {
1148 surf_offset[s] = 0;
1149
1150 if (prog->SamplersUsed & (1 << s)) {
1151 const unsigned unit = prog->SamplerUnits[s];
1152 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1153 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1154 struct intel_texture_object *iobj = intel_texture_object(obj);
1155
1156 /* _NEW_TEXTURE */
1157 if (!obj)
1158 continue;
1159
1160 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1161 /* A programming note for the sample_c message says:
1162 *
1163 * "The Surface Format of the associated surface must be
1164 * indicated as supporting shadow mapping as indicated in the
1165 * surface format table."
1166 *
1167 * Accessing non-depth textures via a sampler*Shadow type is
1168 * undefined. GLSL 4.50 page 162 says:
1169 *
1170 * "If a shadow texture call is made to a sampler that does not
1171 * represent a depth texture, then results are undefined."
1172 *
1173 * We give them a null surface (zeros) for undefined. We've seen
1174 * GPU hangs with color buffers and sample_c, so we try and avoid
1175 * those with this hack.
1176 */
1177 emit_null_surface_state(brw, NULL, surf_offset + s);
1178 } else {
1179 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1180 used_by_txf, plane);
1181 }
1182 }
1183 }
1184 }
1185
1186
1187 /**
1188 * Construct SURFACE_STATE objects for enabled textures.
1189 */
1190 static void
1191 brw_update_texture_surfaces(struct brw_context *brw)
1192 {
1193 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1194
1195 /* BRW_NEW_VERTEX_PROGRAM */
1196 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1197
1198 /* BRW_NEW_TESS_PROGRAMS */
1199 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1200 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1201
1202 /* BRW_NEW_GEOMETRY_PROGRAM */
1203 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1204
1205 /* BRW_NEW_FRAGMENT_PROGRAM */
1206 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1207
1208 /* _NEW_TEXTURE */
1209 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1210 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1211 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1212 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1213 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1214
1215 /* emit alternate set of surface state for gather. this
1216 * allows the surface format to be overriden for only the
1217 * gather4 messages. */
1218 if (devinfo->gen < 8) {
1219 if (vs && vs->info.uses_texture_gather)
1220 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1221 if (tcs && tcs->info.uses_texture_gather)
1222 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1223 if (tes && tes->info.uses_texture_gather)
1224 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1225 if (gs && gs->info.uses_texture_gather)
1226 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1227 if (fs && fs->info.uses_texture_gather)
1228 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1229 }
1230
1231 if (fs) {
1232 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1233 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1234 }
1235
1236 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1237 }
1238
1239 const struct brw_tracked_state brw_texture_surfaces = {
1240 .dirty = {
1241 .mesa = _NEW_TEXTURE,
1242 .brw = BRW_NEW_BATCH |
1243 BRW_NEW_AUX_STATE |
1244 BRW_NEW_FRAGMENT_PROGRAM |
1245 BRW_NEW_FS_PROG_DATA |
1246 BRW_NEW_GEOMETRY_PROGRAM |
1247 BRW_NEW_GS_PROG_DATA |
1248 BRW_NEW_TESS_PROGRAMS |
1249 BRW_NEW_TCS_PROG_DATA |
1250 BRW_NEW_TES_PROG_DATA |
1251 BRW_NEW_TEXTURE_BUFFER |
1252 BRW_NEW_VERTEX_PROGRAM |
1253 BRW_NEW_VS_PROG_DATA,
1254 },
1255 .emit = brw_update_texture_surfaces,
1256 };
1257
1258 static void
1259 brw_update_cs_texture_surfaces(struct brw_context *brw)
1260 {
1261 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1262
1263 /* BRW_NEW_COMPUTE_PROGRAM */
1264 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1265
1266 /* _NEW_TEXTURE */
1267 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1268
1269 /* emit alternate set of surface state for gather. this
1270 * allows the surface format to be overriden for only the
1271 * gather4 messages.
1272 */
1273 if (devinfo->gen < 8) {
1274 if (cs && cs->info.uses_texture_gather)
1275 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1276 }
1277
1278 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1279 }
1280
1281 const struct brw_tracked_state brw_cs_texture_surfaces = {
1282 .dirty = {
1283 .mesa = _NEW_TEXTURE,
1284 .brw = BRW_NEW_BATCH |
1285 BRW_NEW_COMPUTE_PROGRAM |
1286 BRW_NEW_AUX_STATE,
1287 },
1288 .emit = brw_update_cs_texture_surfaces,
1289 };
1290
1291 static void
1292 upload_buffer_surface(struct brw_context *brw,
1293 struct gl_buffer_binding *binding,
1294 uint32_t *out_offset,
1295 enum isl_format format,
1296 unsigned reloc_flags)
1297 {
1298 struct gl_context *ctx = &brw->ctx;
1299
1300 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1301 emit_null_surface_state(brw, NULL, out_offset);
1302 } else {
1303 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1304 if (!binding->AutomaticSize)
1305 size = MIN2(size, binding->Size);
1306
1307 struct intel_buffer_object *iobj =
1308 intel_buffer_object(binding->BufferObject);
1309 struct brw_bo *bo =
1310 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1311 (reloc_flags & RELOC_WRITE) != 0);
1312
1313 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1314 format, size, 1, reloc_flags);
1315 }
1316 }
1317
1318 void
1319 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1320 struct brw_stage_state *stage_state,
1321 struct brw_stage_prog_data *prog_data)
1322 {
1323 struct gl_context *ctx = &brw->ctx;
1324
1325 if (!prog || (prog->info.num_ubos == 0 &&
1326 prog->info.num_ssbos == 0 &&
1327 prog->info.num_abos == 0))
1328 return;
1329
1330 uint32_t *ubo_surf_offsets =
1331 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1332
1333 for (int i = 0; i < prog->info.num_ubos; i++) {
1334 struct gl_buffer_binding *binding =
1335 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1336 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1337 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1338 }
1339
1340 uint32_t *abo_surf_offsets =
1341 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1342 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1343
1344 for (int i = 0; i < prog->info.num_abos; i++) {
1345 struct gl_buffer_binding *binding =
1346 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1347 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1348 ISL_FORMAT_RAW, RELOC_WRITE);
1349 }
1350
1351 for (int i = 0; i < prog->info.num_ssbos; i++) {
1352 struct gl_buffer_binding *binding =
1353 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1354
1355 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1356 ISL_FORMAT_RAW, RELOC_WRITE);
1357 }
1358
1359 stage_state->push_constants_dirty = true;
1360 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1361 }
1362
1363 static void
1364 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1365 {
1366 struct gl_context *ctx = &brw->ctx;
1367 /* _NEW_PROGRAM */
1368 struct gl_program *prog = ctx->FragmentProgram._Current;
1369
1370 /* BRW_NEW_FS_PROG_DATA */
1371 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1372 }
1373
1374 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1375 .dirty = {
1376 .mesa = _NEW_PROGRAM,
1377 .brw = BRW_NEW_BATCH |
1378 BRW_NEW_FS_PROG_DATA |
1379 BRW_NEW_UNIFORM_BUFFER,
1380 },
1381 .emit = brw_upload_wm_ubo_surfaces,
1382 };
1383
1384 static void
1385 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1386 {
1387 struct gl_context *ctx = &brw->ctx;
1388 /* _NEW_PROGRAM */
1389 struct gl_program *prog =
1390 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1391
1392 /* BRW_NEW_CS_PROG_DATA */
1393 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1394 }
1395
1396 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1397 .dirty = {
1398 .mesa = _NEW_PROGRAM,
1399 .brw = BRW_NEW_BATCH |
1400 BRW_NEW_CS_PROG_DATA |
1401 BRW_NEW_UNIFORM_BUFFER,
1402 },
1403 .emit = brw_upload_cs_ubo_surfaces,
1404 };
1405
1406 static void
1407 brw_upload_cs_image_surfaces(struct brw_context *brw)
1408 {
1409 /* _NEW_PROGRAM */
1410 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1411
1412 if (cp) {
1413 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1414 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1415 brw->cs.base.prog_data);
1416 }
1417 }
1418
1419 const struct brw_tracked_state brw_cs_image_surfaces = {
1420 .dirty = {
1421 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1422 .brw = BRW_NEW_BATCH |
1423 BRW_NEW_CS_PROG_DATA |
1424 BRW_NEW_AUX_STATE |
1425 BRW_NEW_IMAGE_UNITS
1426 },
1427 .emit = brw_upload_cs_image_surfaces,
1428 };
1429
1430 static uint32_t
1431 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1432 {
1433 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1434 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1435 if (access == GL_WRITE_ONLY) {
1436 return hw_format;
1437 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1438 /* Typed surface reads support a very limited subset of the shader
1439 * image formats. Translate it into the closest format the
1440 * hardware supports.
1441 */
1442 return isl_lower_storage_image_format(devinfo, hw_format);
1443 } else {
1444 /* The hardware doesn't actually support a typed format that we can use
1445 * so we have to fall back to untyped read/write messages.
1446 */
1447 return ISL_FORMAT_RAW;
1448 }
1449 }
1450
1451 static void
1452 update_default_image_param(struct brw_context *brw,
1453 struct gl_image_unit *u,
1454 unsigned surface_idx,
1455 struct brw_image_param *param)
1456 {
1457 memset(param, 0, sizeof(*param));
1458 param->surface_idx = surface_idx;
1459 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1460 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1461 * detailed explanation of these parameters.
1462 */
1463 param->swizzling[0] = 0xff;
1464 param->swizzling[1] = 0xff;
1465 }
1466
1467 static void
1468 update_buffer_image_param(struct brw_context *brw,
1469 struct gl_image_unit *u,
1470 unsigned surface_idx,
1471 struct brw_image_param *param)
1472 {
1473 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1474 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1475 update_default_image_param(brw, u, surface_idx, param);
1476
1477 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1478 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1479 }
1480
1481 static unsigned
1482 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1483 unsigned level)
1484 {
1485 if (target == GL_TEXTURE_CUBE_MAP)
1486 return 6;
1487
1488 return target == GL_TEXTURE_3D ?
1489 minify(mt->surf.logical_level0_px.depth, level) :
1490 mt->surf.logical_level0_px.array_len;
1491 }
1492
1493 static void
1494 update_image_surface(struct brw_context *brw,
1495 struct gl_image_unit *u,
1496 GLenum access,
1497 unsigned surface_idx,
1498 uint32_t *surf_offset,
1499 struct brw_image_param *param)
1500 {
1501 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1502 struct gl_texture_object *obj = u->TexObj;
1503 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1504
1505 if (obj->Target == GL_TEXTURE_BUFFER) {
1506 struct intel_buffer_object *intel_obj =
1507 intel_buffer_object(obj->BufferObject);
1508 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1509 _mesa_get_format_bytes(u->_ActualFormat));
1510
1511 brw_emit_buffer_surface_state(
1512 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1513 format, intel_obj->Base.Size, texel_size,
1514 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1515
1516 update_buffer_image_param(brw, u, surface_idx, param);
1517
1518 } else {
1519 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1520 struct intel_mipmap_tree *mt = intel_obj->mt;
1521 const unsigned num_layers = u->Layered ?
1522 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1523
1524 struct isl_view view = {
1525 .format = format,
1526 .base_level = obj->MinLevel + u->Level,
1527 .levels = 1,
1528 .base_array_layer = obj->MinLayer + u->_Layer,
1529 .array_len = num_layers,
1530 .swizzle = ISL_SWIZZLE_IDENTITY,
1531 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1532 };
1533
1534 if (format == ISL_FORMAT_RAW) {
1535 brw_emit_buffer_surface_state(
1536 brw, surf_offset, mt->bo, mt->offset,
1537 format, mt->bo->size - mt->offset, 1 /* pitch */,
1538 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1539
1540 } else {
1541 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1542 assert(!intel_miptree_has_color_unresolved(mt,
1543 view.base_level, 1,
1544 view.base_array_layer,
1545 view.array_len));
1546 brw_emit_surface_state(brw, mt, mt->target, view,
1547 ISL_AUX_USAGE_NONE,
1548 surf_offset, surf_index,
1549 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1550 }
1551
1552 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1553 param->surface_idx = surface_idx;
1554 }
1555
1556 } else {
1557 emit_null_surface_state(brw, NULL, surf_offset);
1558 update_default_image_param(brw, u, surface_idx, param);
1559 }
1560 }
1561
1562 void
1563 brw_upload_image_surfaces(struct brw_context *brw,
1564 const struct gl_program *prog,
1565 struct brw_stage_state *stage_state,
1566 struct brw_stage_prog_data *prog_data)
1567 {
1568 assert(prog);
1569 struct gl_context *ctx = &brw->ctx;
1570
1571 if (prog->info.num_images) {
1572 for (unsigned i = 0; i < prog->info.num_images; i++) {
1573 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1574 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1575
1576 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1577 surf_idx,
1578 &stage_state->surf_offset[surf_idx],
1579 &stage_state->image_param[i]);
1580 }
1581
1582 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1583 /* This may have changed the image metadata dependent on the context
1584 * image unit state and passed to the program as uniforms, make sure
1585 * that push and pull constants are reuploaded.
1586 */
1587 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1588 }
1589 }
1590
1591 static void
1592 brw_upload_wm_image_surfaces(struct brw_context *brw)
1593 {
1594 /* BRW_NEW_FRAGMENT_PROGRAM */
1595 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1596
1597 if (wm) {
1598 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1599 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1600 brw->wm.base.prog_data);
1601 }
1602 }
1603
1604 const struct brw_tracked_state brw_wm_image_surfaces = {
1605 .dirty = {
1606 .mesa = _NEW_TEXTURE,
1607 .brw = BRW_NEW_BATCH |
1608 BRW_NEW_AUX_STATE |
1609 BRW_NEW_FRAGMENT_PROGRAM |
1610 BRW_NEW_FS_PROG_DATA |
1611 BRW_NEW_IMAGE_UNITS
1612 },
1613 .emit = brw_upload_wm_image_surfaces,
1614 };
1615
1616 static void
1617 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1618 {
1619 struct gl_context *ctx = &brw->ctx;
1620 /* _NEW_PROGRAM */
1621 struct gl_program *prog =
1622 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1623 /* BRW_NEW_CS_PROG_DATA */
1624 const struct brw_cs_prog_data *cs_prog_data =
1625 brw_cs_prog_data(brw->cs.base.prog_data);
1626
1627 if (prog && cs_prog_data->uses_num_work_groups) {
1628 const unsigned surf_idx =
1629 cs_prog_data->binding_table.work_groups_start;
1630 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1631 struct brw_bo *bo;
1632 uint32_t bo_offset;
1633
1634 if (brw->compute.num_work_groups_bo == NULL) {
1635 bo = NULL;
1636 brw_upload_data(&brw->upload,
1637 (void *)brw->compute.num_work_groups,
1638 3 * sizeof(GLuint),
1639 sizeof(GLuint),
1640 &bo,
1641 &bo_offset);
1642 } else {
1643 bo = brw->compute.num_work_groups_bo;
1644 bo_offset = brw->compute.num_work_groups_offset;
1645 }
1646
1647 brw_emit_buffer_surface_state(brw, surf_offset,
1648 bo, bo_offset,
1649 ISL_FORMAT_RAW,
1650 3 * sizeof(GLuint), 1,
1651 RELOC_WRITE);
1652 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1653 }
1654 }
1655
1656 const struct brw_tracked_state brw_cs_work_groups_surface = {
1657 .dirty = {
1658 .brw = BRW_NEW_CS_PROG_DATA |
1659 BRW_NEW_CS_WORK_GROUPS
1660 },
1661 .emit = brw_upload_cs_work_groups_surface,
1662 };