i965: Move buffer texture size calculation into a common helper function.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo = NULL;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 struct brw_bo *clear_bo = NULL;
159 uint32_t clear_offset = 0;
160
161 if (aux_usage != ISL_AUX_USAGE_NONE) {
162 aux_surf = &mt->aux_buf->surf;
163 aux_bo = mt->aux_buf->bo;
164 aux_offset = mt->aux_buf->offset;
165
166 /* We only really need a clear color if we also have an auxiliary
167 * surface. Without one, it does nothing.
168 */
169 clear_color =
170 intel_miptree_get_clear_color(devinfo, mt, view.format,
171 view.usage & ISL_SURF_USAGE_TEXTURE_BIT,
172 &clear_bo, &clear_offset);
173 }
174
175 void *state = brw_state_batch(brw,
176 brw->isl_dev.ss.size,
177 brw->isl_dev.ss.align,
178 surf_offset);
179
180 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
181 .address = brw_state_reloc(&brw->batch,
182 *surf_offset + brw->isl_dev.ss.addr_offset,
183 mt->bo, offset, reloc_flags),
184 .aux_surf = aux_surf, .aux_usage = aux_usage,
185 .aux_address = aux_offset,
186 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
187 .clear_color = clear_color,
188 .use_clear_address = clear_bo != NULL,
189 .clear_address = clear_offset,
190 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
191 if (aux_surf) {
192 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
193 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
194 * contain other control information. Since buffer addresses are always
195 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
196 * an ordinary reloc to do the necessary address translation.
197 *
198 * FIXME: move to the point of assignment.
199 */
200 assert((aux_offset & 0xfff) == 0);
201
202 if (devinfo->gen >= 8) {
203 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
204 *aux_addr = brw_state_reloc(&brw->batch,
205 *surf_offset +
206 brw->isl_dev.ss.aux_addr_offset,
207 aux_bo, *aux_addr,
208 reloc_flags);
209 } else {
210 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216
217 }
218 }
219
220 if (clear_bo != NULL) {
221 /* Make sure the offset is aligned with a cacheline. */
222 assert((clear_offset & 0x3f) == 0);
223 uint32_t *clear_address =
224 state + brw->isl_dev.ss.clear_color_state_offset;
225 *clear_address = brw_state_reloc(&brw->batch,
226 *surf_offset +
227 brw->isl_dev.ss.clear_color_state_offset,
228 clear_bo, *clear_address, reloc_flags);
229 }
230 }
231
232 static uint32_t
233 gen6_update_renderbuffer_surface(struct brw_context *brw,
234 struct gl_renderbuffer *rb,
235 unsigned unit,
236 uint32_t surf_index)
237 {
238 struct gl_context *ctx = &brw->ctx;
239 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
240 struct intel_mipmap_tree *mt = irb->mt;
241
242 assert(brw_render_target_supported(brw, rb));
243
244 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
245 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
246 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
247 __func__, _mesa_get_format_name(rb_format));
248 }
249 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
250
251 struct isl_view view = {
252 .format = isl_format,
253 .base_level = irb->mt_level - irb->mt->first_level,
254 .levels = 1,
255 .base_array_layer = irb->mt_layer,
256 .array_len = MAX2(irb->layer_count, 1),
257 .swizzle = ISL_SWIZZLE_IDENTITY,
258 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
259 };
260
261 uint32_t offset;
262 brw_emit_surface_state(brw, mt, mt->target, view,
263 brw->draw_aux_usage[unit],
264 &offset, surf_index,
265 RELOC_WRITE);
266 return offset;
267 }
268
269 GLuint
270 translate_tex_target(GLenum target)
271 {
272 switch (target) {
273 case GL_TEXTURE_1D:
274 case GL_TEXTURE_1D_ARRAY_EXT:
275 return BRW_SURFACE_1D;
276
277 case GL_TEXTURE_RECTANGLE_NV:
278 return BRW_SURFACE_2D;
279
280 case GL_TEXTURE_2D:
281 case GL_TEXTURE_2D_ARRAY_EXT:
282 case GL_TEXTURE_EXTERNAL_OES:
283 case GL_TEXTURE_2D_MULTISAMPLE:
284 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
285 return BRW_SURFACE_2D;
286
287 case GL_TEXTURE_3D:
288 return BRW_SURFACE_3D;
289
290 case GL_TEXTURE_CUBE_MAP:
291 case GL_TEXTURE_CUBE_MAP_ARRAY:
292 return BRW_SURFACE_CUBE;
293
294 default:
295 unreachable("not reached");
296 }
297 }
298
299 uint32_t
300 brw_get_surface_tiling_bits(enum isl_tiling tiling)
301 {
302 switch (tiling) {
303 case ISL_TILING_X:
304 return BRW_SURFACE_TILED;
305 case ISL_TILING_Y0:
306 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
307 default:
308 return 0;
309 }
310 }
311
312
313 uint32_t
314 brw_get_surface_num_multisamples(unsigned num_samples)
315 {
316 if (num_samples > 1)
317 return BRW_SURFACE_MULTISAMPLECOUNT_4;
318 else
319 return BRW_SURFACE_MULTISAMPLECOUNT_1;
320 }
321
322 /**
323 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
324 * swizzling.
325 */
326 int
327 brw_get_texture_swizzle(const struct gl_context *ctx,
328 const struct gl_texture_object *t)
329 {
330 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
331
332 int swizzles[SWIZZLE_NIL + 1] = {
333 SWIZZLE_X,
334 SWIZZLE_Y,
335 SWIZZLE_Z,
336 SWIZZLE_W,
337 SWIZZLE_ZERO,
338 SWIZZLE_ONE,
339 SWIZZLE_NIL
340 };
341
342 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
343 img->_BaseFormat == GL_DEPTH_STENCIL) {
344 GLenum depth_mode = t->DepthMode;
345
346 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
347 * with depth component data specified with a sized internal format.
348 * Otherwise, it's left at the old default, GL_LUMINANCE.
349 */
350 if (_mesa_is_gles3(ctx) &&
351 img->InternalFormat != GL_DEPTH_COMPONENT &&
352 img->InternalFormat != GL_DEPTH_STENCIL) {
353 depth_mode = GL_RED;
354 }
355
356 switch (depth_mode) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 swizzles[3] = SWIZZLE_X;
362 break;
363 case GL_LUMINANCE:
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 break;
369 case GL_INTENSITY:
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_X;
374 break;
375 case GL_RED:
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_ZERO;
378 swizzles[2] = SWIZZLE_ZERO;
379 swizzles[3] = SWIZZLE_ONE;
380 break;
381 }
382 }
383
384 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
385
386 /* If the texture's format is alpha-only, force R, G, and B to
387 * 0.0. Similarly, if the texture's format has no alpha channel,
388 * force the alpha value read to 1.0. This allows for the
389 * implementation to use an RGBA texture for any of these formats
390 * without leaking any unexpected values.
391 */
392 switch (img->_BaseFormat) {
393 case GL_ALPHA:
394 swizzles[0] = SWIZZLE_ZERO;
395 swizzles[1] = SWIZZLE_ZERO;
396 swizzles[2] = SWIZZLE_ZERO;
397 break;
398 case GL_LUMINANCE:
399 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
400 swizzles[0] = SWIZZLE_X;
401 swizzles[1] = SWIZZLE_X;
402 swizzles[2] = SWIZZLE_X;
403 swizzles[3] = SWIZZLE_ONE;
404 }
405 break;
406 case GL_LUMINANCE_ALPHA:
407 if (datatype == GL_SIGNED_NORMALIZED) {
408 swizzles[0] = SWIZZLE_X;
409 swizzles[1] = SWIZZLE_X;
410 swizzles[2] = SWIZZLE_X;
411 swizzles[3] = SWIZZLE_W;
412 }
413 break;
414 case GL_INTENSITY:
415 if (datatype == GL_SIGNED_NORMALIZED) {
416 swizzles[0] = SWIZZLE_X;
417 swizzles[1] = SWIZZLE_X;
418 swizzles[2] = SWIZZLE_X;
419 swizzles[3] = SWIZZLE_X;
420 }
421 break;
422 case GL_RED:
423 case GL_RG:
424 case GL_RGB:
425 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
426 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
427 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
428 swizzles[3] = SWIZZLE_ONE;
429 break;
430 }
431
432 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
433 swizzles[GET_SWZ(t->_Swizzle, 1)],
434 swizzles[GET_SWZ(t->_Swizzle, 2)],
435 swizzles[GET_SWZ(t->_Swizzle, 3)]);
436 }
437
438 /**
439 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
440 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
441 *
442 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
443 * 0 1 2 3 4 5
444 * 4 5 6 7 0 1
445 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
446 *
447 * which is simply adding 4 then modding by 8 (or anding with 7).
448 *
449 * We then may need to apply workarounds for textureGather hardware bugs.
450 */
451 static unsigned
452 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
453 {
454 unsigned scs = (swizzle + 4) & 7;
455
456 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
457 }
458
459 static void brw_update_texture_surface(struct gl_context *ctx,
460 unsigned unit,
461 uint32_t *surf_offset,
462 bool for_gather,
463 bool for_txf,
464 uint32_t plane)
465 {
466 struct brw_context *brw = brw_context(ctx);
467 const struct gen_device_info *devinfo = &brw->screen->devinfo;
468 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
469
470 if (obj->Target == GL_TEXTURE_BUFFER) {
471 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
472
473 } else {
474 struct intel_texture_object *intel_obj = intel_texture_object(obj);
475 struct intel_mipmap_tree *mt = intel_obj->mt;
476
477 if (plane > 0) {
478 if (mt->plane[plane - 1] == NULL)
479 return;
480 mt = mt->plane[plane - 1];
481 }
482
483 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
484 /* If this is a view with restricted NumLayers, then our effective depth
485 * is not just the miptree depth.
486 */
487 unsigned view_num_layers;
488 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
489 view_num_layers = obj->NumLayers;
490 } else {
491 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
492 mt->surf.logical_level0_px.depth :
493 mt->surf.logical_level0_px.array_len;
494 }
495
496 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
497 * texturing functions that return a float, as our code generation always
498 * selects the .x channel (which would always be 0).
499 */
500 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
501 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
502 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
503 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
504 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
505 brw_get_texture_swizzle(&brw->ctx, obj));
506
507 mesa_format mesa_fmt;
508 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
509 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
510 /* The format from intel_obj may be a combined depth stencil format
511 * when we just want depth. Pull it from the miptree instead. This
512 * is safe because texture views aren't allowed on depth/stencil.
513 */
514 mesa_fmt = mt->format;
515 } else if (mt->etc_format != MESA_FORMAT_NONE) {
516 mesa_fmt = mt->format;
517 } else if (plane > 0) {
518 mesa_fmt = mt->format;
519 } else {
520 mesa_fmt = intel_obj->_Format;
521 }
522 enum isl_format format = translate_tex_format(brw, mesa_fmt,
523 for_txf ? GL_DECODE_EXT :
524 sampler->sRGBDecode);
525
526 /* Implement gen6 and gen7 gather work-around */
527 bool need_green_to_blue = false;
528 if (for_gather) {
529 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
530 format == ISL_FORMAT_R32G32_SINT ||
531 format == ISL_FORMAT_R32G32_UINT)) {
532 format = ISL_FORMAT_R32G32_FLOAT_LD;
533 need_green_to_blue = devinfo->is_haswell;
534 } else if (devinfo->gen == 6) {
535 /* Sandybridge's gather4 message is broken for integer formats.
536 * To work around this, we pretend the surface is UNORM for
537 * 8 or 16-bit formats, and emit shader instructions to recover
538 * the real INT/UINT value. For 32-bit formats, we pretend
539 * the surface is FLOAT, and simply reinterpret the resulting
540 * bits.
541 */
542 switch (format) {
543 case ISL_FORMAT_R8_SINT:
544 case ISL_FORMAT_R8_UINT:
545 format = ISL_FORMAT_R8_UNORM;
546 break;
547
548 case ISL_FORMAT_R16_SINT:
549 case ISL_FORMAT_R16_UINT:
550 format = ISL_FORMAT_R16_UNORM;
551 break;
552
553 case ISL_FORMAT_R32_SINT:
554 case ISL_FORMAT_R32_UINT:
555 format = ISL_FORMAT_R32_FLOAT;
556 break;
557
558 default:
559 break;
560 }
561 }
562 }
563
564 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
565 if (devinfo->gen <= 7) {
566 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
567 mt = mt->r8stencil_mt;
568 } else {
569 mt = mt->stencil_mt;
570 }
571 format = ISL_FORMAT_R8_UINT;
572 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
573 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
574 mt = mt->r8stencil_mt;
575 format = ISL_FORMAT_R8_UINT;
576 }
577
578 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
579
580 struct isl_view view = {
581 .format = format,
582 .base_level = obj->MinLevel + obj->BaseLevel,
583 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
584 .base_array_layer = obj->MinLayer,
585 .array_len = view_num_layers,
586 .swizzle = {
587 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
588 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
589 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
590 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
591 },
592 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
593 };
594
595 /* On Ivy Bridge and earlier, we handle texture swizzle with shader
596 * code. The actual surface swizzle should be identity.
597 */
598 if (devinfo->gen <= 7 && !devinfo->is_haswell)
599 view.swizzle = ISL_SWIZZLE_IDENTITY;
600
601 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
602 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
603 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
604
605 enum isl_aux_usage aux_usage =
606 intel_miptree_texture_aux_usage(brw, mt, format);
607
608 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
609 surf_offset, surf_index,
610 0);
611 }
612 }
613
614 void
615 brw_emit_buffer_surface_state(struct brw_context *brw,
616 uint32_t *out_offset,
617 struct brw_bo *bo,
618 unsigned buffer_offset,
619 unsigned surface_format,
620 unsigned buffer_size,
621 unsigned pitch,
622 unsigned reloc_flags)
623 {
624 const struct gen_device_info *devinfo = &brw->screen->devinfo;
625 uint32_t *dw = brw_state_batch(brw,
626 brw->isl_dev.ss.size,
627 brw->isl_dev.ss.align,
628 out_offset);
629
630 isl_buffer_fill_state(&brw->isl_dev, dw,
631 .address = !bo ? buffer_offset :
632 brw_state_reloc(&brw->batch,
633 *out_offset + brw->isl_dev.ss.addr_offset,
634 bo, buffer_offset,
635 reloc_flags),
636 .size = buffer_size,
637 .format = surface_format,
638 .stride = pitch,
639 .mocs = brw_get_bo_mocs(devinfo, bo));
640 }
641
642 static unsigned
643 buffer_texture_range_size(struct brw_context *brw,
644 struct gl_texture_object *obj)
645 {
646 assert(obj->Target == GL_TEXTURE_BUFFER);
647 const unsigned texel_size = _mesa_get_format_bytes(obj->_BufferObjectFormat);
648 const unsigned buffer_size = (!obj->BufferObject ? 0 :
649 obj->BufferObject->Size);
650
651 /* The ARB_texture_buffer_specification says:
652 *
653 * "The number of texels in the buffer texture's texel array is given by
654 *
655 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
656 *
657 * where <buffer_size> is the size of the buffer object, in basic
658 * machine units and <components> and <base_type> are the element count
659 * and base data type for elements, as specified in Table X.1. The
660 * number of texels in the texel array is then clamped to the
661 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
662 *
663 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
664 * so that when ISL divides by stride to obtain the number of texels, that
665 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
666 */
667 return MIN3((unsigned)obj->BufferSize, buffer_size,
668 brw->ctx.Const.MaxTextureBufferSize * texel_size);
669 }
670
671 void
672 brw_update_buffer_texture_surface(struct gl_context *ctx,
673 unsigned unit,
674 uint32_t *surf_offset)
675 {
676 struct brw_context *brw = brw_context(ctx);
677 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
678 struct intel_buffer_object *intel_obj =
679 intel_buffer_object(tObj->BufferObject);
680 const unsigned size = buffer_texture_range_size(brw, tObj);
681 struct brw_bo *bo = NULL;
682 mesa_format format = tObj->_BufferObjectFormat;
683 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
684 int texel_size = _mesa_get_format_bytes(format);
685
686 if (intel_obj)
687 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
688 false);
689
690 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
691 _mesa_problem(NULL, "bad format %s for texture buffer\n",
692 _mesa_get_format_name(format));
693 }
694
695 brw_emit_buffer_surface_state(brw, surf_offset, bo,
696 tObj->BufferOffset,
697 isl_format,
698 size,
699 texel_size,
700 0);
701 }
702
703 /**
704 * Set up a binding table entry for use by stream output logic (transform
705 * feedback).
706 *
707 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
708 */
709 void
710 brw_update_sol_surface(struct brw_context *brw,
711 struct gl_buffer_object *buffer_obj,
712 uint32_t *out_offset, unsigned num_vector_components,
713 unsigned stride_dwords, unsigned offset_dwords)
714 {
715 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
716 uint32_t offset_bytes = 4 * offset_dwords;
717 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
718 offset_bytes,
719 buffer_obj->Size - offset_bytes,
720 true);
721 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
722 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
723 size_t size_dwords = buffer_obj->Size / 4;
724 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
725
726 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
727 * too big to map using a single binding table entry?
728 */
729 assert((size_dwords - offset_dwords) / stride_dwords
730 <= BRW_MAX_NUM_BUFFER_ENTRIES);
731
732 if (size_dwords > offset_dwords + num_vector_components) {
733 /* There is room for at least 1 transform feedback output in the buffer.
734 * Compute the number of additional transform feedback outputs the
735 * buffer has room for.
736 */
737 buffer_size_minus_1 =
738 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
739 } else {
740 /* There isn't even room for a single transform feedback output in the
741 * buffer. We can't configure the binding table entry to prevent output
742 * entirely; we'll have to rely on the geometry shader to detect
743 * overflow. But to minimize the damage in case of a bug, set up the
744 * binding table entry to just allow a single output.
745 */
746 buffer_size_minus_1 = 0;
747 }
748 width = buffer_size_minus_1 & 0x7f;
749 height = (buffer_size_minus_1 & 0xfff80) >> 7;
750 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
751
752 switch (num_vector_components) {
753 case 1:
754 surface_format = ISL_FORMAT_R32_FLOAT;
755 break;
756 case 2:
757 surface_format = ISL_FORMAT_R32G32_FLOAT;
758 break;
759 case 3:
760 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
761 break;
762 case 4:
763 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
764 break;
765 default:
766 unreachable("Invalid vector size for transform feedback output");
767 }
768
769 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
770 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
771 surface_format << BRW_SURFACE_FORMAT_SHIFT |
772 BRW_SURFACE_RC_READ_WRITE;
773 surf[1] = brw_state_reloc(&brw->batch,
774 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
775 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
776 height << BRW_SURFACE_HEIGHT_SHIFT);
777 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
778 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
779 surf[4] = 0;
780 surf[5] = 0;
781 }
782
783 /* Creates a new WM constant buffer reflecting the current fragment program's
784 * constants, if needed by the fragment program.
785 *
786 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
787 * state atom.
788 */
789 static void
790 brw_upload_wm_pull_constants(struct brw_context *brw)
791 {
792 struct brw_stage_state *stage_state = &brw->wm.base;
793 /* BRW_NEW_FRAGMENT_PROGRAM */
794 struct brw_program *fp =
795 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
796
797 /* BRW_NEW_FS_PROG_DATA */
798 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
799
800 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
801 /* _NEW_PROGRAM_CONSTANTS */
802 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
803 stage_state, prog_data);
804 }
805
806 const struct brw_tracked_state brw_wm_pull_constants = {
807 .dirty = {
808 .mesa = _NEW_PROGRAM_CONSTANTS,
809 .brw = BRW_NEW_BATCH |
810 BRW_NEW_FRAGMENT_PROGRAM |
811 BRW_NEW_FS_PROG_DATA,
812 },
813 .emit = brw_upload_wm_pull_constants,
814 };
815
816 /**
817 * Creates a null renderbuffer surface.
818 *
819 * This is used when the shader doesn't write to any color output. An FB
820 * write to target 0 will still be emitted, because that's how the thread is
821 * terminated (and computed depth is returned), so we need to have the
822 * hardware discard the target 0 color output..
823 */
824 static void
825 emit_null_surface_state(struct brw_context *brw,
826 const struct gl_framebuffer *fb,
827 uint32_t *out_offset)
828 {
829 const struct gen_device_info *devinfo = &brw->screen->devinfo;
830 uint32_t *surf = brw_state_batch(brw,
831 brw->isl_dev.ss.size,
832 brw->isl_dev.ss.align,
833 out_offset);
834
835 /* Use the fb dimensions or 1x1x1 */
836 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
837 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
838 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
839
840 if (devinfo->gen != 6 || samples <= 1) {
841 isl_null_fill_state(&brw->isl_dev, surf,
842 isl_extent3d(width, height, 1));
843 return;
844 }
845
846 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
847 * So work around this problem by rendering into dummy color buffer.
848 *
849 * To decrease the amount of memory needed by the workaround buffer, we
850 * set its pitch to 128 bytes (the width of a Y tile). This means that
851 * the amount of memory needed for the workaround buffer is
852 * (width_in_tiles + height_in_tiles - 1) tiles.
853 *
854 * Note that since the workaround buffer will be interpreted by the
855 * hardware as an interleaved multisampled buffer, we need to compute
856 * width_in_tiles and height_in_tiles by dividing the width and height
857 * by 16 rather than the normal Y-tile size of 32.
858 */
859 unsigned width_in_tiles = ALIGN(width, 16) / 16;
860 unsigned height_in_tiles = ALIGN(height, 16) / 16;
861 unsigned pitch_minus_1 = 127;
862 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
863 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
864 size_needed);
865
866 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
867 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
868 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
869 brw->wm.multisampled_null_render_target_bo,
870 0, RELOC_WRITE);
871
872 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
873 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
874
875 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
876 * Notes):
877 *
878 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
879 */
880 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
881 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
882 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
883 surf[5] = 0;
884 }
885
886 /**
887 * Sets up a surface state structure to point at the given region.
888 * While it is only used for the front/back buffer currently, it should be
889 * usable for further buffers when doing ARB_draw_buffer support.
890 */
891 static uint32_t
892 gen4_update_renderbuffer_surface(struct brw_context *brw,
893 struct gl_renderbuffer *rb,
894 unsigned unit,
895 uint32_t surf_index)
896 {
897 const struct gen_device_info *devinfo = &brw->screen->devinfo;
898 struct gl_context *ctx = &brw->ctx;
899 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
900 struct intel_mipmap_tree *mt = irb->mt;
901 uint32_t *surf;
902 uint32_t tile_x, tile_y;
903 enum isl_format format;
904 uint32_t offset;
905 /* _NEW_BUFFERS */
906 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
907 /* BRW_NEW_FS_PROG_DATA */
908
909 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
910 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
911
912 if (tile_x != 0 || tile_y != 0) {
913 /* Original gen4 hardware couldn't draw to a non-tile-aligned
914 * destination in a miptree unless you actually setup your renderbuffer
915 * as a miptree and used the fragile lod/array_index/etc. controls to
916 * select the image. So, instead, we just make a new single-level
917 * miptree and render into that.
918 */
919 intel_renderbuffer_move_to_temp(brw, irb, false);
920 assert(irb->align_wa_mt);
921 mt = irb->align_wa_mt;
922 }
923 }
924
925 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
926
927 format = brw->mesa_to_isl_render_format[rb_format];
928 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
929 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
930 __func__, _mesa_get_format_name(rb_format));
931 }
932
933 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
934 format << BRW_SURFACE_FORMAT_SHIFT);
935
936 /* reloc */
937 assert(mt->offset % mt->cpp == 0);
938 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
939 mt->offset +
940 intel_renderbuffer_get_tile_offsets(irb,
941 &tile_x,
942 &tile_y),
943 RELOC_WRITE);
944
945 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
946 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
947
948 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
949 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
950
951 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
952
953 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
954 /* Note that the low bits of these fields are missing, so
955 * there's the possibility of getting in trouble.
956 */
957 assert(tile_x % 4 == 0);
958 assert(tile_y % 2 == 0);
959 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
960 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
961 (mt->surf.image_alignment_el.height == 4 ?
962 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
963
964 if (devinfo->gen < 6) {
965 /* _NEW_COLOR */
966 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
967 (ctx->Color.BlendEnabled & (1 << unit)))
968 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
969
970 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
971 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
972 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
973 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
974 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
975 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
976
977 /* As mentioned above, disable writes to the alpha component when the
978 * renderbuffer is XRGB.
979 */
980 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
981 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
983 }
984 }
985
986 return offset;
987 }
988
989 static void
990 update_renderbuffer_surfaces(struct brw_context *brw)
991 {
992 const struct gen_device_info *devinfo = &brw->screen->devinfo;
993 const struct gl_context *ctx = &brw->ctx;
994
995 /* _NEW_BUFFERS | _NEW_COLOR */
996 const struct gl_framebuffer *fb = ctx->DrawBuffer;
997
998 /* Render targets always start at binding table index 0. */
999 const unsigned rt_start = 0;
1000
1001 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1002
1003 /* Update surfaces for drawing buffers */
1004 if (fb->_NumColorDrawBuffers >= 1) {
1005 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1006 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1007
1008 if (intel_renderbuffer(rb)) {
1009 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1010 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1011 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1012 } else {
1013 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1014 }
1015 }
1016 } else {
1017 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1018 }
1019
1020 /* The PIPE_CONTROL command description says:
1021 *
1022 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1023 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1024 * Target Cache Flush by enabling this bit. When render target flush
1025 * is set due to new association of BTI, PS Scoreboard Stall bit must
1026 * be set in this packet."
1027 */
1028 if (devinfo->gen >= 11) {
1029 brw_emit_pipe_control_flush(brw,
1030 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1031 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1032 }
1033
1034 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1035 }
1036
1037 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1038 .dirty = {
1039 .mesa = _NEW_BUFFERS |
1040 _NEW_COLOR,
1041 .brw = BRW_NEW_BATCH,
1042 },
1043 .emit = update_renderbuffer_surfaces,
1044 };
1045
1046 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1047 .dirty = {
1048 .mesa = _NEW_BUFFERS,
1049 .brw = BRW_NEW_BATCH |
1050 BRW_NEW_AUX_STATE,
1051 },
1052 .emit = update_renderbuffer_surfaces,
1053 };
1054
1055 static void
1056 update_renderbuffer_read_surfaces(struct brw_context *brw)
1057 {
1058 const struct gl_context *ctx = &brw->ctx;
1059
1060 /* BRW_NEW_FS_PROG_DATA */
1061 const struct brw_wm_prog_data *wm_prog_data =
1062 brw_wm_prog_data(brw->wm.base.prog_data);
1063
1064 if (wm_prog_data->has_render_target_reads &&
1065 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1066 /* _NEW_BUFFERS */
1067 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1068
1069 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1070 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1071 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1072 const unsigned surf_index =
1073 wm_prog_data->binding_table.render_target_read_start + i;
1074 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1075
1076 if (irb) {
1077 const enum isl_format format = brw->mesa_to_isl_render_format[
1078 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1079 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1080 format));
1081
1082 /* Override the target of the texture if the render buffer is a
1083 * single slice of a 3D texture (since the minimum array element
1084 * field of the surface state structure is ignored by the sampler
1085 * unit for 3D textures on some hardware), or if the render buffer
1086 * is a 1D array (since shaders always provide the array index
1087 * coordinate at the Z component to avoid state-dependent
1088 * recompiles when changing the texture target of the
1089 * framebuffer).
1090 */
1091 const GLenum target =
1092 (irb->mt->target == GL_TEXTURE_3D &&
1093 irb->layer_count == 1) ? GL_TEXTURE_2D :
1094 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1095 irb->mt->target;
1096
1097 const struct isl_view view = {
1098 .format = format,
1099 .base_level = irb->mt_level - irb->mt->first_level,
1100 .levels = 1,
1101 .base_array_layer = irb->mt_layer,
1102 .array_len = irb->layer_count,
1103 .swizzle = ISL_SWIZZLE_IDENTITY,
1104 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1105 };
1106
1107 enum isl_aux_usage aux_usage =
1108 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1109 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1110 aux_usage = ISL_AUX_USAGE_NONE;
1111
1112 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1113 surf_offset, surf_index,
1114 0);
1115
1116 } else {
1117 emit_null_surface_state(brw, fb, surf_offset);
1118 }
1119 }
1120
1121 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1122 }
1123 }
1124
1125 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1126 .dirty = {
1127 .mesa = _NEW_BUFFERS,
1128 .brw = BRW_NEW_BATCH |
1129 BRW_NEW_AUX_STATE |
1130 BRW_NEW_FS_PROG_DATA,
1131 },
1132 .emit = update_renderbuffer_read_surfaces,
1133 };
1134
1135 static bool
1136 is_depth_texture(struct intel_texture_object *iobj)
1137 {
1138 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1139 return base_format == GL_DEPTH_COMPONENT ||
1140 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1141 }
1142
1143 static void
1144 update_stage_texture_surfaces(struct brw_context *brw,
1145 const struct gl_program *prog,
1146 struct brw_stage_state *stage_state,
1147 bool for_gather, uint32_t plane)
1148 {
1149 if (!prog)
1150 return;
1151
1152 struct gl_context *ctx = &brw->ctx;
1153
1154 uint32_t *surf_offset = stage_state->surf_offset;
1155
1156 /* BRW_NEW_*_PROG_DATA */
1157 if (for_gather)
1158 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1159 else
1160 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1161
1162 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1163 for (unsigned s = 0; s < num_samplers; s++) {
1164 surf_offset[s] = 0;
1165
1166 if (prog->SamplersUsed & (1 << s)) {
1167 const unsigned unit = prog->SamplerUnits[s];
1168 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1169 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1170 struct intel_texture_object *iobj = intel_texture_object(obj);
1171
1172 /* _NEW_TEXTURE */
1173 if (!obj)
1174 continue;
1175
1176 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1177 /* A programming note for the sample_c message says:
1178 *
1179 * "The Surface Format of the associated surface must be
1180 * indicated as supporting shadow mapping as indicated in the
1181 * surface format table."
1182 *
1183 * Accessing non-depth textures via a sampler*Shadow type is
1184 * undefined. GLSL 4.50 page 162 says:
1185 *
1186 * "If a shadow texture call is made to a sampler that does not
1187 * represent a depth texture, then results are undefined."
1188 *
1189 * We give them a null surface (zeros) for undefined. We've seen
1190 * GPU hangs with color buffers and sample_c, so we try and avoid
1191 * those with this hack.
1192 */
1193 emit_null_surface_state(brw, NULL, surf_offset + s);
1194 } else {
1195 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1196 used_by_txf, plane);
1197 }
1198 }
1199 }
1200 }
1201
1202
1203 /**
1204 * Construct SURFACE_STATE objects for enabled textures.
1205 */
1206 static void
1207 brw_update_texture_surfaces(struct brw_context *brw)
1208 {
1209 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1210
1211 /* BRW_NEW_VERTEX_PROGRAM */
1212 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1213
1214 /* BRW_NEW_TESS_PROGRAMS */
1215 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1216 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1217
1218 /* BRW_NEW_GEOMETRY_PROGRAM */
1219 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1220
1221 /* BRW_NEW_FRAGMENT_PROGRAM */
1222 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1223
1224 /* _NEW_TEXTURE */
1225 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1226 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1227 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1228 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1229 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1230
1231 /* emit alternate set of surface state for gather. this
1232 * allows the surface format to be overriden for only the
1233 * gather4 messages. */
1234 if (devinfo->gen < 8) {
1235 if (vs && vs->info.uses_texture_gather)
1236 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1237 if (tcs && tcs->info.uses_texture_gather)
1238 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1239 if (tes && tes->info.uses_texture_gather)
1240 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1241 if (gs && gs->info.uses_texture_gather)
1242 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1243 if (fs && fs->info.uses_texture_gather)
1244 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1245 }
1246
1247 if (fs) {
1248 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1249 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1250 }
1251
1252 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1253 }
1254
1255 const struct brw_tracked_state brw_texture_surfaces = {
1256 .dirty = {
1257 .mesa = _NEW_TEXTURE,
1258 .brw = BRW_NEW_BATCH |
1259 BRW_NEW_AUX_STATE |
1260 BRW_NEW_FRAGMENT_PROGRAM |
1261 BRW_NEW_FS_PROG_DATA |
1262 BRW_NEW_GEOMETRY_PROGRAM |
1263 BRW_NEW_GS_PROG_DATA |
1264 BRW_NEW_TESS_PROGRAMS |
1265 BRW_NEW_TCS_PROG_DATA |
1266 BRW_NEW_TES_PROG_DATA |
1267 BRW_NEW_TEXTURE_BUFFER |
1268 BRW_NEW_VERTEX_PROGRAM |
1269 BRW_NEW_VS_PROG_DATA,
1270 },
1271 .emit = brw_update_texture_surfaces,
1272 };
1273
1274 static void
1275 brw_update_cs_texture_surfaces(struct brw_context *brw)
1276 {
1277 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1278
1279 /* BRW_NEW_COMPUTE_PROGRAM */
1280 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1281
1282 /* _NEW_TEXTURE */
1283 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1284
1285 /* emit alternate set of surface state for gather. this
1286 * allows the surface format to be overriden for only the
1287 * gather4 messages.
1288 */
1289 if (devinfo->gen < 8) {
1290 if (cs && cs->info.uses_texture_gather)
1291 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1292 }
1293
1294 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1295 }
1296
1297 const struct brw_tracked_state brw_cs_texture_surfaces = {
1298 .dirty = {
1299 .mesa = _NEW_TEXTURE,
1300 .brw = BRW_NEW_BATCH |
1301 BRW_NEW_COMPUTE_PROGRAM |
1302 BRW_NEW_AUX_STATE,
1303 },
1304 .emit = brw_update_cs_texture_surfaces,
1305 };
1306
1307 static void
1308 upload_buffer_surface(struct brw_context *brw,
1309 struct gl_buffer_binding *binding,
1310 uint32_t *out_offset,
1311 enum isl_format format,
1312 unsigned reloc_flags)
1313 {
1314 struct gl_context *ctx = &brw->ctx;
1315
1316 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1317 emit_null_surface_state(brw, NULL, out_offset);
1318 } else {
1319 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1320 if (!binding->AutomaticSize)
1321 size = MIN2(size, binding->Size);
1322
1323 struct intel_buffer_object *iobj =
1324 intel_buffer_object(binding->BufferObject);
1325 struct brw_bo *bo =
1326 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1327 (reloc_flags & RELOC_WRITE) != 0);
1328
1329 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1330 format, size, 1, reloc_flags);
1331 }
1332 }
1333
1334 void
1335 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1336 struct brw_stage_state *stage_state,
1337 struct brw_stage_prog_data *prog_data)
1338 {
1339 struct gl_context *ctx = &brw->ctx;
1340
1341 if (!prog || (prog->info.num_ubos == 0 &&
1342 prog->info.num_ssbos == 0 &&
1343 prog->info.num_abos == 0))
1344 return;
1345
1346 uint32_t *ubo_surf_offsets =
1347 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1348
1349 for (int i = 0; i < prog->info.num_ubos; i++) {
1350 struct gl_buffer_binding *binding =
1351 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1352 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1353 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1354 }
1355
1356 uint32_t *abo_surf_offsets =
1357 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1358 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1359
1360 for (int i = 0; i < prog->info.num_abos; i++) {
1361 struct gl_buffer_binding *binding =
1362 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1363 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1364 ISL_FORMAT_RAW, RELOC_WRITE);
1365 }
1366
1367 for (int i = 0; i < prog->info.num_ssbos; i++) {
1368 struct gl_buffer_binding *binding =
1369 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1370
1371 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1372 ISL_FORMAT_RAW, RELOC_WRITE);
1373 }
1374
1375 stage_state->push_constants_dirty = true;
1376 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1377 }
1378
1379 static void
1380 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1381 {
1382 struct gl_context *ctx = &brw->ctx;
1383 /* _NEW_PROGRAM */
1384 struct gl_program *prog = ctx->FragmentProgram._Current;
1385
1386 /* BRW_NEW_FS_PROG_DATA */
1387 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1388 }
1389
1390 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1391 .dirty = {
1392 .mesa = _NEW_PROGRAM,
1393 .brw = BRW_NEW_BATCH |
1394 BRW_NEW_FS_PROG_DATA |
1395 BRW_NEW_UNIFORM_BUFFER,
1396 },
1397 .emit = brw_upload_wm_ubo_surfaces,
1398 };
1399
1400 static void
1401 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1402 {
1403 struct gl_context *ctx = &brw->ctx;
1404 /* _NEW_PROGRAM */
1405 struct gl_program *prog =
1406 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1407
1408 /* BRW_NEW_CS_PROG_DATA */
1409 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1410 }
1411
1412 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1413 .dirty = {
1414 .mesa = _NEW_PROGRAM,
1415 .brw = BRW_NEW_BATCH |
1416 BRW_NEW_CS_PROG_DATA |
1417 BRW_NEW_UNIFORM_BUFFER,
1418 },
1419 .emit = brw_upload_cs_ubo_surfaces,
1420 };
1421
1422 static void
1423 brw_upload_cs_image_surfaces(struct brw_context *brw)
1424 {
1425 /* _NEW_PROGRAM */
1426 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1427
1428 if (cp) {
1429 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1430 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1431 brw->cs.base.prog_data);
1432 }
1433 }
1434
1435 const struct brw_tracked_state brw_cs_image_surfaces = {
1436 .dirty = {
1437 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1438 .brw = BRW_NEW_BATCH |
1439 BRW_NEW_CS_PROG_DATA |
1440 BRW_NEW_AUX_STATE |
1441 BRW_NEW_IMAGE_UNITS
1442 },
1443 .emit = brw_upload_cs_image_surfaces,
1444 };
1445
1446 static uint32_t
1447 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1448 {
1449 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1450 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1451 if (access == GL_WRITE_ONLY) {
1452 return hw_format;
1453 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1454 /* Typed surface reads support a very limited subset of the shader
1455 * image formats. Translate it into the closest format the
1456 * hardware supports.
1457 */
1458 return isl_lower_storage_image_format(devinfo, hw_format);
1459 } else {
1460 /* The hardware doesn't actually support a typed format that we can use
1461 * so we have to fall back to untyped read/write messages.
1462 */
1463 return ISL_FORMAT_RAW;
1464 }
1465 }
1466
1467 static void
1468 update_default_image_param(struct brw_context *brw,
1469 struct gl_image_unit *u,
1470 unsigned surface_idx,
1471 struct brw_image_param *param)
1472 {
1473 memset(param, 0, sizeof(*param));
1474 param->surface_idx = surface_idx;
1475 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1476 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1477 * detailed explanation of these parameters.
1478 */
1479 param->swizzling[0] = 0xff;
1480 param->swizzling[1] = 0xff;
1481 }
1482
1483 static void
1484 update_buffer_image_param(struct brw_context *brw,
1485 struct gl_image_unit *u,
1486 unsigned surface_idx,
1487 struct brw_image_param *param)
1488 {
1489 const unsigned size = buffer_texture_range_size(brw, u->TexObj);
1490 update_default_image_param(brw, u, surface_idx, param);
1491
1492 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1493 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1494 }
1495
1496 static unsigned
1497 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1498 unsigned level)
1499 {
1500 if (target == GL_TEXTURE_CUBE_MAP)
1501 return 6;
1502
1503 return target == GL_TEXTURE_3D ?
1504 minify(mt->surf.logical_level0_px.depth, level) :
1505 mt->surf.logical_level0_px.array_len;
1506 }
1507
1508 static void
1509 update_image_surface(struct brw_context *brw,
1510 struct gl_image_unit *u,
1511 GLenum access,
1512 unsigned surface_idx,
1513 uint32_t *surf_offset,
1514 struct brw_image_param *param)
1515 {
1516 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1517 struct gl_texture_object *obj = u->TexObj;
1518 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1519
1520 if (obj->Target == GL_TEXTURE_BUFFER) {
1521 struct intel_buffer_object *intel_obj =
1522 intel_buffer_object(obj->BufferObject);
1523 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1524 _mesa_get_format_bytes(u->_ActualFormat));
1525 const unsigned buffer_size = buffer_texture_range_size(brw, obj);
1526
1527 brw_emit_buffer_surface_state(
1528 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1529 format, buffer_size, texel_size,
1530 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1531
1532 update_buffer_image_param(brw, u, surface_idx, param);
1533
1534 } else {
1535 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1536 struct intel_mipmap_tree *mt = intel_obj->mt;
1537 const unsigned num_layers = u->Layered ?
1538 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1539
1540 struct isl_view view = {
1541 .format = format,
1542 .base_level = obj->MinLevel + u->Level,
1543 .levels = 1,
1544 .base_array_layer = obj->MinLayer + u->_Layer,
1545 .array_len = num_layers,
1546 .swizzle = ISL_SWIZZLE_IDENTITY,
1547 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1548 };
1549
1550 if (format == ISL_FORMAT_RAW) {
1551 brw_emit_buffer_surface_state(
1552 brw, surf_offset, mt->bo, mt->offset,
1553 format, mt->bo->size - mt->offset, 1 /* pitch */,
1554 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1555
1556 } else {
1557 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1558 assert(!intel_miptree_has_color_unresolved(mt,
1559 view.base_level, 1,
1560 view.base_array_layer,
1561 view.array_len));
1562 brw_emit_surface_state(brw, mt, mt->target, view,
1563 ISL_AUX_USAGE_NONE,
1564 surf_offset, surf_index,
1565 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1566 }
1567
1568 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1569 param->surface_idx = surface_idx;
1570 }
1571
1572 } else {
1573 emit_null_surface_state(brw, NULL, surf_offset);
1574 update_default_image_param(brw, u, surface_idx, param);
1575 }
1576 }
1577
1578 void
1579 brw_upload_image_surfaces(struct brw_context *brw,
1580 const struct gl_program *prog,
1581 struct brw_stage_state *stage_state,
1582 struct brw_stage_prog_data *prog_data)
1583 {
1584 assert(prog);
1585 struct gl_context *ctx = &brw->ctx;
1586
1587 if (prog->info.num_images) {
1588 for (unsigned i = 0; i < prog->info.num_images; i++) {
1589 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1590 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1591
1592 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1593 surf_idx,
1594 &stage_state->surf_offset[surf_idx],
1595 &stage_state->image_param[i]);
1596 }
1597
1598 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1599 /* This may have changed the image metadata dependent on the context
1600 * image unit state and passed to the program as uniforms, make sure
1601 * that push and pull constants are reuploaded.
1602 */
1603 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1604 }
1605 }
1606
1607 static void
1608 brw_upload_wm_image_surfaces(struct brw_context *brw)
1609 {
1610 /* BRW_NEW_FRAGMENT_PROGRAM */
1611 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1612
1613 if (wm) {
1614 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1615 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1616 brw->wm.base.prog_data);
1617 }
1618 }
1619
1620 const struct brw_tracked_state brw_wm_image_surfaces = {
1621 .dirty = {
1622 .mesa = _NEW_TEXTURE,
1623 .brw = BRW_NEW_BATCH |
1624 BRW_NEW_AUX_STATE |
1625 BRW_NEW_FRAGMENT_PROGRAM |
1626 BRW_NEW_FS_PROG_DATA |
1627 BRW_NEW_IMAGE_UNITS
1628 },
1629 .emit = brw_upload_wm_image_surfaces,
1630 };
1631
1632 static void
1633 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1634 {
1635 struct gl_context *ctx = &brw->ctx;
1636 /* _NEW_PROGRAM */
1637 struct gl_program *prog =
1638 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1639 /* BRW_NEW_CS_PROG_DATA */
1640 const struct brw_cs_prog_data *cs_prog_data =
1641 brw_cs_prog_data(brw->cs.base.prog_data);
1642
1643 if (prog && cs_prog_data->uses_num_work_groups) {
1644 const unsigned surf_idx =
1645 cs_prog_data->binding_table.work_groups_start;
1646 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1647 struct brw_bo *bo;
1648 uint32_t bo_offset;
1649
1650 if (brw->compute.num_work_groups_bo == NULL) {
1651 bo = NULL;
1652 brw_upload_data(&brw->upload,
1653 (void *)brw->compute.num_work_groups,
1654 3 * sizeof(GLuint),
1655 sizeof(GLuint),
1656 &bo,
1657 &bo_offset);
1658 } else {
1659 bo = brw->compute.num_work_groups_bo;
1660 bo_offset = brw->compute.num_work_groups_offset;
1661 }
1662
1663 brw_emit_buffer_surface_state(brw, surf_offset,
1664 bo, bo_offset,
1665 ISL_FORMAT_RAW,
1666 3 * sizeof(GLuint), 1,
1667 RELOC_WRITE);
1668 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1669 }
1670 }
1671
1672 const struct brw_tracked_state brw_cs_work_groups_surface = {
1673 .dirty = {
1674 .brw = BRW_NEW_CS_PROG_DATA |
1675 BRW_NEW_CS_WORK_GROUPS
1676 },
1677 .emit = brw_upload_cs_work_groups_surface,
1678 };