i965: Generalize intel_upload.c to support multiple uploaders.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 [11] = ICL_MOCS_WB,
64 };
65
66 uint32_t pte_mocs[] = {
67 [7] = GEN7_MOCS_L3,
68 [8] = BDW_MOCS_PTE,
69 [9] = SKL_MOCS_PTE,
70 [10] = CNL_MOCS_PTE,
71 [11] = ICL_MOCS_PTE,
72 };
73
74 uint32_t
75 brw_get_bo_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
76 {
77 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
78 }
79
80 static void
81 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
82 GLenum target, struct isl_view *view,
83 uint32_t *tile_x, uint32_t *tile_y,
84 uint32_t *offset, struct isl_surf *surf)
85 {
86 *surf = mt->surf;
87
88 const struct gen_device_info *devinfo = &brw->screen->devinfo;
89 const enum isl_dim_layout dim_layout =
90 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
91
92 surf->dim = get_isl_surf_dim(target);
93
94 if (surf->dim_layout == dim_layout)
95 return;
96
97 /* The layout of the specified texture target is not compatible with the
98 * actual layout of the miptree structure in memory -- You're entering
99 * dangerous territory, this can only possibly work if you only intended
100 * to access a single level and slice of the texture, and the hardware
101 * supports the tile offset feature in order to allow non-tile-aligned
102 * base offsets, since we'll have to point the hardware to the first
103 * texel of the level instead of relying on the usual base level/layer
104 * controls.
105 */
106 assert(devinfo->has_surface_tile_offset);
107 assert(view->levels == 1 && view->array_len == 1);
108 assert(*tile_x == 0 && *tile_y == 0);
109
110 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
111 view->base_array_layer,
112 tile_x, tile_y);
113
114 /* Minify the logical dimensions of the texture. */
115 const unsigned l = view->base_level - mt->first_level;
116 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
117 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
118 minify(surf->logical_level0_px.height, l);
119 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
120 minify(surf->logical_level0_px.depth, l);
121
122 /* Only the base level and layer can be addressed with the overridden
123 * layout.
124 */
125 surf->logical_level0_px.array_len = 1;
126 surf->levels = 1;
127 surf->dim_layout = dim_layout;
128
129 /* The requested slice of the texture is now at the base level and
130 * layer.
131 */
132 view->base_level = 0;
133 view->base_array_layer = 0;
134 }
135
136 static void
137 brw_emit_surface_state(struct brw_context *brw,
138 struct intel_mipmap_tree *mt,
139 GLenum target, struct isl_view view,
140 enum isl_aux_usage aux_usage,
141 uint32_t *surf_offset, int surf_index,
142 unsigned reloc_flags)
143 {
144 const struct gen_device_info *devinfo = &brw->screen->devinfo;
145 uint32_t tile_x = mt->level[0].level_x;
146 uint32_t tile_y = mt->level[0].level_y;
147 uint32_t offset = mt->offset;
148
149 struct isl_surf surf;
150
151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
152
153 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
154
155 struct brw_bo *aux_bo;
156 struct isl_surf *aux_surf = NULL;
157 uint64_t aux_offset = 0;
158 switch (aux_usage) {
159 case ISL_AUX_USAGE_MCS:
160 case ISL_AUX_USAGE_CCS_D:
161 case ISL_AUX_USAGE_CCS_E:
162 aux_surf = &mt->mcs_buf->surf;
163 aux_bo = mt->mcs_buf->bo;
164 aux_offset = mt->mcs_buf->offset;
165 break;
166
167 case ISL_AUX_USAGE_HIZ:
168 aux_surf = &mt->hiz_buf->surf;
169 aux_bo = mt->hiz_buf->bo;
170 aux_offset = 0;
171 break;
172
173 case ISL_AUX_USAGE_NONE:
174 break;
175 }
176
177 if (aux_usage != ISL_AUX_USAGE_NONE) {
178 /* We only really need a clear color if we also have an auxiliary
179 * surface. Without one, it does nothing.
180 */
181 clear_color = mt->fast_clear_color;
182 }
183
184 void *state = brw_state_batch(brw,
185 brw->isl_dev.ss.size,
186 brw->isl_dev.ss.align,
187 surf_offset);
188
189 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
190 .address = brw_state_reloc(&brw->batch,
191 *surf_offset + brw->isl_dev.ss.addr_offset,
192 mt->bo, offset, reloc_flags),
193 .aux_surf = aux_surf, .aux_usage = aux_usage,
194 .aux_address = aux_offset,
195 .mocs = brw_get_bo_mocs(devinfo, mt->bo),
196 .clear_color = clear_color,
197 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
198 if (aux_surf) {
199 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
200 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
201 * contain other control information. Since buffer addresses are always
202 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
203 * an ordinary reloc to do the necessary address translation.
204 *
205 * FIXME: move to the point of assignment.
206 */
207 assert((aux_offset & 0xfff) == 0);
208
209 if (devinfo->gen >= 8) {
210 uint64_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
211 *aux_addr = brw_state_reloc(&brw->batch,
212 *surf_offset +
213 brw->isl_dev.ss.aux_addr_offset,
214 aux_bo, *aux_addr,
215 reloc_flags);
216 } else {
217 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
218 *aux_addr = brw_state_reloc(&brw->batch,
219 *surf_offset +
220 brw->isl_dev.ss.aux_addr_offset,
221 aux_bo, *aux_addr,
222 reloc_flags);
223
224 }
225 }
226 }
227
228 static uint32_t
229 gen6_update_renderbuffer_surface(struct brw_context *brw,
230 struct gl_renderbuffer *rb,
231 unsigned unit,
232 uint32_t surf_index)
233 {
234 struct gl_context *ctx = &brw->ctx;
235 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
236 struct intel_mipmap_tree *mt = irb->mt;
237
238 assert(brw_render_target_supported(brw, rb));
239
240 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
241 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
242 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
243 __func__, _mesa_get_format_name(rb_format));
244 }
245 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
246
247 struct isl_view view = {
248 .format = isl_format,
249 .base_level = irb->mt_level - irb->mt->first_level,
250 .levels = 1,
251 .base_array_layer = irb->mt_layer,
252 .array_len = MAX2(irb->layer_count, 1),
253 .swizzle = ISL_SWIZZLE_IDENTITY,
254 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
255 };
256
257 uint32_t offset;
258 brw_emit_surface_state(brw, mt, mt->target, view,
259 brw->draw_aux_usage[unit],
260 &offset, surf_index,
261 RELOC_WRITE);
262 return offset;
263 }
264
265 GLuint
266 translate_tex_target(GLenum target)
267 {
268 switch (target) {
269 case GL_TEXTURE_1D:
270 case GL_TEXTURE_1D_ARRAY_EXT:
271 return BRW_SURFACE_1D;
272
273 case GL_TEXTURE_RECTANGLE_NV:
274 return BRW_SURFACE_2D;
275
276 case GL_TEXTURE_2D:
277 case GL_TEXTURE_2D_ARRAY_EXT:
278 case GL_TEXTURE_EXTERNAL_OES:
279 case GL_TEXTURE_2D_MULTISAMPLE:
280 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
281 return BRW_SURFACE_2D;
282
283 case GL_TEXTURE_3D:
284 return BRW_SURFACE_3D;
285
286 case GL_TEXTURE_CUBE_MAP:
287 case GL_TEXTURE_CUBE_MAP_ARRAY:
288 return BRW_SURFACE_CUBE;
289
290 default:
291 unreachable("not reached");
292 }
293 }
294
295 uint32_t
296 brw_get_surface_tiling_bits(enum isl_tiling tiling)
297 {
298 switch (tiling) {
299 case ISL_TILING_X:
300 return BRW_SURFACE_TILED;
301 case ISL_TILING_Y0:
302 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
303 default:
304 return 0;
305 }
306 }
307
308
309 uint32_t
310 brw_get_surface_num_multisamples(unsigned num_samples)
311 {
312 if (num_samples > 1)
313 return BRW_SURFACE_MULTISAMPLECOUNT_4;
314 else
315 return BRW_SURFACE_MULTISAMPLECOUNT_1;
316 }
317
318 /**
319 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
320 * swizzling.
321 */
322 int
323 brw_get_texture_swizzle(const struct gl_context *ctx,
324 const struct gl_texture_object *t)
325 {
326 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
327
328 int swizzles[SWIZZLE_NIL + 1] = {
329 SWIZZLE_X,
330 SWIZZLE_Y,
331 SWIZZLE_Z,
332 SWIZZLE_W,
333 SWIZZLE_ZERO,
334 SWIZZLE_ONE,
335 SWIZZLE_NIL
336 };
337
338 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
339 img->_BaseFormat == GL_DEPTH_STENCIL) {
340 GLenum depth_mode = t->DepthMode;
341
342 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
343 * with depth component data specified with a sized internal format.
344 * Otherwise, it's left at the old default, GL_LUMINANCE.
345 */
346 if (_mesa_is_gles3(ctx) &&
347 img->InternalFormat != GL_DEPTH_COMPONENT &&
348 img->InternalFormat != GL_DEPTH_STENCIL) {
349 depth_mode = GL_RED;
350 }
351
352 switch (depth_mode) {
353 case GL_ALPHA:
354 swizzles[0] = SWIZZLE_ZERO;
355 swizzles[1] = SWIZZLE_ZERO;
356 swizzles[2] = SWIZZLE_ZERO;
357 swizzles[3] = SWIZZLE_X;
358 break;
359 case GL_LUMINANCE:
360 swizzles[0] = SWIZZLE_X;
361 swizzles[1] = SWIZZLE_X;
362 swizzles[2] = SWIZZLE_X;
363 swizzles[3] = SWIZZLE_ONE;
364 break;
365 case GL_INTENSITY:
366 swizzles[0] = SWIZZLE_X;
367 swizzles[1] = SWIZZLE_X;
368 swizzles[2] = SWIZZLE_X;
369 swizzles[3] = SWIZZLE_X;
370 break;
371 case GL_RED:
372 swizzles[0] = SWIZZLE_X;
373 swizzles[1] = SWIZZLE_ZERO;
374 swizzles[2] = SWIZZLE_ZERO;
375 swizzles[3] = SWIZZLE_ONE;
376 break;
377 }
378 }
379
380 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
381
382 /* If the texture's format is alpha-only, force R, G, and B to
383 * 0.0. Similarly, if the texture's format has no alpha channel,
384 * force the alpha value read to 1.0. This allows for the
385 * implementation to use an RGBA texture for any of these formats
386 * without leaking any unexpected values.
387 */
388 switch (img->_BaseFormat) {
389 case GL_ALPHA:
390 swizzles[0] = SWIZZLE_ZERO;
391 swizzles[1] = SWIZZLE_ZERO;
392 swizzles[2] = SWIZZLE_ZERO;
393 break;
394 case GL_LUMINANCE:
395 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
396 swizzles[0] = SWIZZLE_X;
397 swizzles[1] = SWIZZLE_X;
398 swizzles[2] = SWIZZLE_X;
399 swizzles[3] = SWIZZLE_ONE;
400 }
401 break;
402 case GL_LUMINANCE_ALPHA:
403 if (datatype == GL_SIGNED_NORMALIZED) {
404 swizzles[0] = SWIZZLE_X;
405 swizzles[1] = SWIZZLE_X;
406 swizzles[2] = SWIZZLE_X;
407 swizzles[3] = SWIZZLE_W;
408 }
409 break;
410 case GL_INTENSITY:
411 if (datatype == GL_SIGNED_NORMALIZED) {
412 swizzles[0] = SWIZZLE_X;
413 swizzles[1] = SWIZZLE_X;
414 swizzles[2] = SWIZZLE_X;
415 swizzles[3] = SWIZZLE_X;
416 }
417 break;
418 case GL_RED:
419 case GL_RG:
420 case GL_RGB:
421 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
422 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
423 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
424 swizzles[3] = SWIZZLE_ONE;
425 break;
426 }
427
428 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
429 swizzles[GET_SWZ(t->_Swizzle, 1)],
430 swizzles[GET_SWZ(t->_Swizzle, 2)],
431 swizzles[GET_SWZ(t->_Swizzle, 3)]);
432 }
433
434 /**
435 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
436 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
437 *
438 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
439 * 0 1 2 3 4 5
440 * 4 5 6 7 0 1
441 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
442 *
443 * which is simply adding 4 then modding by 8 (or anding with 7).
444 *
445 * We then may need to apply workarounds for textureGather hardware bugs.
446 */
447 static unsigned
448 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
449 {
450 unsigned scs = (swizzle + 4) & 7;
451
452 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
453 }
454
455 static void brw_update_texture_surface(struct gl_context *ctx,
456 unsigned unit,
457 uint32_t *surf_offset,
458 bool for_gather,
459 bool for_txf,
460 uint32_t plane)
461 {
462 struct brw_context *brw = brw_context(ctx);
463 const struct gen_device_info *devinfo = &brw->screen->devinfo;
464 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
465
466 if (obj->Target == GL_TEXTURE_BUFFER) {
467 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
468
469 } else {
470 struct intel_texture_object *intel_obj = intel_texture_object(obj);
471 struct intel_mipmap_tree *mt = intel_obj->mt;
472
473 if (plane > 0) {
474 if (mt->plane[plane - 1] == NULL)
475 return;
476 mt = mt->plane[plane - 1];
477 }
478
479 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
480 /* If this is a view with restricted NumLayers, then our effective depth
481 * is not just the miptree depth.
482 */
483 unsigned view_num_layers;
484 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
485 view_num_layers = obj->NumLayers;
486 } else {
487 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
488 mt->surf.logical_level0_px.depth :
489 mt->surf.logical_level0_px.array_len;
490 }
491
492 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
493 * texturing functions that return a float, as our code generation always
494 * selects the .x channel (which would always be 0).
495 */
496 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
497 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
498 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
499 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
500 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
501 brw_get_texture_swizzle(&brw->ctx, obj));
502
503 mesa_format mesa_fmt;
504 if (firstImage->_BaseFormat == GL_DEPTH_STENCIL ||
505 firstImage->_BaseFormat == GL_DEPTH_COMPONENT) {
506 /* The format from intel_obj may be a combined depth stencil format
507 * when we just want depth. Pull it from the miptree instead. This
508 * is safe because texture views aren't allowed on depth/stencil.
509 */
510 mesa_fmt = mt->format;
511 } else if (mt->etc_format != MESA_FORMAT_NONE) {
512 mesa_fmt = mt->format;
513 } else if (plane > 0) {
514 mesa_fmt = mt->format;
515 } else {
516 mesa_fmt = intel_obj->_Format;
517 }
518 enum isl_format format = translate_tex_format(brw, mesa_fmt,
519 for_txf ? GL_DECODE_EXT :
520 sampler->sRGBDecode);
521
522 /* Implement gen6 and gen7 gather work-around */
523 bool need_green_to_blue = false;
524 if (for_gather) {
525 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
526 format == ISL_FORMAT_R32G32_SINT ||
527 format == ISL_FORMAT_R32G32_UINT)) {
528 format = ISL_FORMAT_R32G32_FLOAT_LD;
529 need_green_to_blue = devinfo->is_haswell;
530 } else if (devinfo->gen == 6) {
531 /* Sandybridge's gather4 message is broken for integer formats.
532 * To work around this, we pretend the surface is UNORM for
533 * 8 or 16-bit formats, and emit shader instructions to recover
534 * the real INT/UINT value. For 32-bit formats, we pretend
535 * the surface is FLOAT, and simply reinterpret the resulting
536 * bits.
537 */
538 switch (format) {
539 case ISL_FORMAT_R8_SINT:
540 case ISL_FORMAT_R8_UINT:
541 format = ISL_FORMAT_R8_UNORM;
542 break;
543
544 case ISL_FORMAT_R16_SINT:
545 case ISL_FORMAT_R16_UINT:
546 format = ISL_FORMAT_R16_UNORM;
547 break;
548
549 case ISL_FORMAT_R32_SINT:
550 case ISL_FORMAT_R32_UINT:
551 format = ISL_FORMAT_R32_FLOAT;
552 break;
553
554 default:
555 break;
556 }
557 }
558 }
559
560 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
561 if (devinfo->gen <= 7) {
562 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
563 mt = mt->r8stencil_mt;
564 } else {
565 mt = mt->stencil_mt;
566 }
567 format = ISL_FORMAT_R8_UINT;
568 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
569 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
570 mt = mt->r8stencil_mt;
571 format = ISL_FORMAT_R8_UINT;
572 }
573
574 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
575
576 struct isl_view view = {
577 .format = format,
578 .base_level = obj->MinLevel + obj->BaseLevel,
579 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
580 .base_array_layer = obj->MinLayer,
581 .array_len = view_num_layers,
582 .swizzle = {
583 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
584 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
585 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
586 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
587 },
588 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
589 };
590
591 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
592 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
593 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
594
595 enum isl_aux_usage aux_usage =
596 intel_miptree_texture_aux_usage(brw, mt, format);
597
598 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
599 surf_offset, surf_index,
600 0);
601 }
602 }
603
604 void
605 brw_emit_buffer_surface_state(struct brw_context *brw,
606 uint32_t *out_offset,
607 struct brw_bo *bo,
608 unsigned buffer_offset,
609 unsigned surface_format,
610 unsigned buffer_size,
611 unsigned pitch,
612 unsigned reloc_flags)
613 {
614 const struct gen_device_info *devinfo = &brw->screen->devinfo;
615 uint32_t *dw = brw_state_batch(brw,
616 brw->isl_dev.ss.size,
617 brw->isl_dev.ss.align,
618 out_offset);
619
620 isl_buffer_fill_state(&brw->isl_dev, dw,
621 .address = !bo ? buffer_offset :
622 brw_state_reloc(&brw->batch,
623 *out_offset + brw->isl_dev.ss.addr_offset,
624 bo, buffer_offset,
625 reloc_flags),
626 .size = buffer_size,
627 .format = surface_format,
628 .stride = pitch,
629 .mocs = brw_get_bo_mocs(devinfo, bo));
630 }
631
632 void
633 brw_update_buffer_texture_surface(struct gl_context *ctx,
634 unsigned unit,
635 uint32_t *surf_offset)
636 {
637 struct brw_context *brw = brw_context(ctx);
638 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
639 struct intel_buffer_object *intel_obj =
640 intel_buffer_object(tObj->BufferObject);
641 uint32_t size = tObj->BufferSize;
642 struct brw_bo *bo = NULL;
643 mesa_format format = tObj->_BufferObjectFormat;
644 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
645 int texel_size = _mesa_get_format_bytes(format);
646
647 if (intel_obj) {
648 size = MIN2(size, intel_obj->Base.Size);
649 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
650 false);
651 }
652
653 /* The ARB_texture_buffer_specification says:
654 *
655 * "The number of texels in the buffer texture's texel array is given by
656 *
657 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
658 *
659 * where <buffer_size> is the size of the buffer object, in basic
660 * machine units and <components> and <base_type> are the element count
661 * and base data type for elements, as specified in Table X.1. The
662 * number of texels in the texel array is then clamped to the
663 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
664 *
665 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
666 * so that when ISL divides by stride to obtain the number of texels, that
667 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
668 */
669 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
670
671 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
672 _mesa_problem(NULL, "bad format %s for texture buffer\n",
673 _mesa_get_format_name(format));
674 }
675
676 brw_emit_buffer_surface_state(brw, surf_offset, bo,
677 tObj->BufferOffset,
678 isl_format,
679 size,
680 texel_size,
681 0);
682 }
683
684 /**
685 * Set up a binding table entry for use by stream output logic (transform
686 * feedback).
687 *
688 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
689 */
690 void
691 brw_update_sol_surface(struct brw_context *brw,
692 struct gl_buffer_object *buffer_obj,
693 uint32_t *out_offset, unsigned num_vector_components,
694 unsigned stride_dwords, unsigned offset_dwords)
695 {
696 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
697 uint32_t offset_bytes = 4 * offset_dwords;
698 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
699 offset_bytes,
700 buffer_obj->Size - offset_bytes,
701 true);
702 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
703 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
704 size_t size_dwords = buffer_obj->Size / 4;
705 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
706
707 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
708 * too big to map using a single binding table entry?
709 */
710 assert((size_dwords - offset_dwords) / stride_dwords
711 <= BRW_MAX_NUM_BUFFER_ENTRIES);
712
713 if (size_dwords > offset_dwords + num_vector_components) {
714 /* There is room for at least 1 transform feedback output in the buffer.
715 * Compute the number of additional transform feedback outputs the
716 * buffer has room for.
717 */
718 buffer_size_minus_1 =
719 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
720 } else {
721 /* There isn't even room for a single transform feedback output in the
722 * buffer. We can't configure the binding table entry to prevent output
723 * entirely; we'll have to rely on the geometry shader to detect
724 * overflow. But to minimize the damage in case of a bug, set up the
725 * binding table entry to just allow a single output.
726 */
727 buffer_size_minus_1 = 0;
728 }
729 width = buffer_size_minus_1 & 0x7f;
730 height = (buffer_size_minus_1 & 0xfff80) >> 7;
731 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
732
733 switch (num_vector_components) {
734 case 1:
735 surface_format = ISL_FORMAT_R32_FLOAT;
736 break;
737 case 2:
738 surface_format = ISL_FORMAT_R32G32_FLOAT;
739 break;
740 case 3:
741 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
742 break;
743 case 4:
744 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
745 break;
746 default:
747 unreachable("Invalid vector size for transform feedback output");
748 }
749
750 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
751 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
752 surface_format << BRW_SURFACE_FORMAT_SHIFT |
753 BRW_SURFACE_RC_READ_WRITE;
754 surf[1] = brw_state_reloc(&brw->batch,
755 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
756 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
757 height << BRW_SURFACE_HEIGHT_SHIFT);
758 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
759 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
760 surf[4] = 0;
761 surf[5] = 0;
762 }
763
764 /* Creates a new WM constant buffer reflecting the current fragment program's
765 * constants, if needed by the fragment program.
766 *
767 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
768 * state atom.
769 */
770 static void
771 brw_upload_wm_pull_constants(struct brw_context *brw)
772 {
773 struct brw_stage_state *stage_state = &brw->wm.base;
774 /* BRW_NEW_FRAGMENT_PROGRAM */
775 struct brw_program *fp =
776 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
777
778 /* BRW_NEW_FS_PROG_DATA */
779 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
780
781 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
782 /* _NEW_PROGRAM_CONSTANTS */
783 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
784 stage_state, prog_data);
785 }
786
787 const struct brw_tracked_state brw_wm_pull_constants = {
788 .dirty = {
789 .mesa = _NEW_PROGRAM_CONSTANTS,
790 .brw = BRW_NEW_BATCH |
791 BRW_NEW_FRAGMENT_PROGRAM |
792 BRW_NEW_FS_PROG_DATA,
793 },
794 .emit = brw_upload_wm_pull_constants,
795 };
796
797 /**
798 * Creates a null renderbuffer surface.
799 *
800 * This is used when the shader doesn't write to any color output. An FB
801 * write to target 0 will still be emitted, because that's how the thread is
802 * terminated (and computed depth is returned), so we need to have the
803 * hardware discard the target 0 color output..
804 */
805 static void
806 emit_null_surface_state(struct brw_context *brw,
807 const struct gl_framebuffer *fb,
808 uint32_t *out_offset)
809 {
810 const struct gen_device_info *devinfo = &brw->screen->devinfo;
811 uint32_t *surf = brw_state_batch(brw,
812 brw->isl_dev.ss.size,
813 brw->isl_dev.ss.align,
814 out_offset);
815
816 /* Use the fb dimensions or 1x1x1 */
817 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
818 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
819 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
820
821 if (devinfo->gen != 6 || samples <= 1) {
822 isl_null_fill_state(&brw->isl_dev, surf,
823 isl_extent3d(width, height, 1));
824 return;
825 }
826
827 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
828 * So work around this problem by rendering into dummy color buffer.
829 *
830 * To decrease the amount of memory needed by the workaround buffer, we
831 * set its pitch to 128 bytes (the width of a Y tile). This means that
832 * the amount of memory needed for the workaround buffer is
833 * (width_in_tiles + height_in_tiles - 1) tiles.
834 *
835 * Note that since the workaround buffer will be interpreted by the
836 * hardware as an interleaved multisampled buffer, we need to compute
837 * width_in_tiles and height_in_tiles by dividing the width and height
838 * by 16 rather than the normal Y-tile size of 32.
839 */
840 unsigned width_in_tiles = ALIGN(width, 16) / 16;
841 unsigned height_in_tiles = ALIGN(height, 16) / 16;
842 unsigned pitch_minus_1 = 127;
843 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
844 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
845 size_needed);
846
847 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
848 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
849 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
850 brw->wm.multisampled_null_render_target_bo,
851 0, RELOC_WRITE);
852
853 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
854 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
855
856 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
857 * Notes):
858 *
859 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
860 */
861 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
862 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
863 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
864 surf[5] = 0;
865 }
866
867 /**
868 * Sets up a surface state structure to point at the given region.
869 * While it is only used for the front/back buffer currently, it should be
870 * usable for further buffers when doing ARB_draw_buffer support.
871 */
872 static uint32_t
873 gen4_update_renderbuffer_surface(struct brw_context *brw,
874 struct gl_renderbuffer *rb,
875 unsigned unit,
876 uint32_t surf_index)
877 {
878 const struct gen_device_info *devinfo = &brw->screen->devinfo;
879 struct gl_context *ctx = &brw->ctx;
880 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
881 struct intel_mipmap_tree *mt = irb->mt;
882 uint32_t *surf;
883 uint32_t tile_x, tile_y;
884 enum isl_format format;
885 uint32_t offset;
886 /* _NEW_BUFFERS */
887 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
888 /* BRW_NEW_FS_PROG_DATA */
889
890 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
891 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
892
893 if (tile_x != 0 || tile_y != 0) {
894 /* Original gen4 hardware couldn't draw to a non-tile-aligned
895 * destination in a miptree unless you actually setup your renderbuffer
896 * as a miptree and used the fragile lod/array_index/etc. controls to
897 * select the image. So, instead, we just make a new single-level
898 * miptree and render into that.
899 */
900 intel_renderbuffer_move_to_temp(brw, irb, false);
901 assert(irb->align_wa_mt);
902 mt = irb->align_wa_mt;
903 }
904 }
905
906 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
907
908 format = brw->mesa_to_isl_render_format[rb_format];
909 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
910 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
911 __func__, _mesa_get_format_name(rb_format));
912 }
913
914 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
915 format << BRW_SURFACE_FORMAT_SHIFT);
916
917 /* reloc */
918 assert(mt->offset % mt->cpp == 0);
919 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
920 mt->offset +
921 intel_renderbuffer_get_tile_offsets(irb,
922 &tile_x,
923 &tile_y),
924 RELOC_WRITE);
925
926 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
927 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
928
929 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
930 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
931
932 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
933
934 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
935 /* Note that the low bits of these fields are missing, so
936 * there's the possibility of getting in trouble.
937 */
938 assert(tile_x % 4 == 0);
939 assert(tile_y % 2 == 0);
940 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
941 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
942 (mt->surf.image_alignment_el.height == 4 ?
943 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
944
945 if (devinfo->gen < 6) {
946 /* _NEW_COLOR */
947 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
948 (ctx->Color.BlendEnabled & (1 << unit)))
949 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
950
951 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 0))
952 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
953 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 1))
954 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
955 if (!GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 2))
956 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
957
958 /* As mentioned above, disable writes to the alpha component when the
959 * renderbuffer is XRGB.
960 */
961 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
962 !GET_COLORMASK_BIT(ctx->Color.ColorMask, unit, 3)) {
963 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
964 }
965 }
966
967 return offset;
968 }
969
970 static void
971 update_renderbuffer_surfaces(struct brw_context *brw)
972 {
973 const struct gen_device_info *devinfo = &brw->screen->devinfo;
974 const struct gl_context *ctx = &brw->ctx;
975
976 /* _NEW_BUFFERS | _NEW_COLOR */
977 const struct gl_framebuffer *fb = ctx->DrawBuffer;
978
979 /* Render targets always start at binding table index 0. */
980 const unsigned rt_start = 0;
981
982 uint32_t *surf_offsets = brw->wm.base.surf_offset;
983
984 /* Update surfaces for drawing buffers */
985 if (fb->_NumColorDrawBuffers >= 1) {
986 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
987 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
988
989 if (intel_renderbuffer(rb)) {
990 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
991 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
992 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
993 } else {
994 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
995 }
996 }
997 } else {
998 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
999 }
1000
1001 /* The PIPE_CONTROL command description says:
1002 *
1003 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
1004 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1005 * Target Cache Flush by enabling this bit. When render target flush
1006 * is set due to new association of BTI, PS Scoreboard Stall bit must
1007 * be set in this packet."
1008 */
1009 if (devinfo->gen >= 11) {
1010 brw_emit_pipe_control_flush(brw,
1011 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1012 PIPE_CONTROL_STALL_AT_SCOREBOARD);
1013 }
1014
1015 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1016 }
1017
1018 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1019 .dirty = {
1020 .mesa = _NEW_BUFFERS |
1021 _NEW_COLOR,
1022 .brw = BRW_NEW_BATCH,
1023 },
1024 .emit = update_renderbuffer_surfaces,
1025 };
1026
1027 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1028 .dirty = {
1029 .mesa = _NEW_BUFFERS,
1030 .brw = BRW_NEW_BATCH |
1031 BRW_NEW_AUX_STATE,
1032 },
1033 .emit = update_renderbuffer_surfaces,
1034 };
1035
1036 static void
1037 update_renderbuffer_read_surfaces(struct brw_context *brw)
1038 {
1039 const struct gl_context *ctx = &brw->ctx;
1040
1041 /* BRW_NEW_FS_PROG_DATA */
1042 const struct brw_wm_prog_data *wm_prog_data =
1043 brw_wm_prog_data(brw->wm.base.prog_data);
1044
1045 if (wm_prog_data->has_render_target_reads &&
1046 !ctx->Extensions.EXT_shader_framebuffer_fetch) {
1047 /* _NEW_BUFFERS */
1048 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1049
1050 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1051 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1052 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1053 const unsigned surf_index =
1054 wm_prog_data->binding_table.render_target_read_start + i;
1055 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1056
1057 if (irb) {
1058 const enum isl_format format = brw->mesa_to_isl_render_format[
1059 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1060 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1061 format));
1062
1063 /* Override the target of the texture if the render buffer is a
1064 * single slice of a 3D texture (since the minimum array element
1065 * field of the surface state structure is ignored by the sampler
1066 * unit for 3D textures on some hardware), or if the render buffer
1067 * is a 1D array (since shaders always provide the array index
1068 * coordinate at the Z component to avoid state-dependent
1069 * recompiles when changing the texture target of the
1070 * framebuffer).
1071 */
1072 const GLenum target =
1073 (irb->mt->target == GL_TEXTURE_3D &&
1074 irb->layer_count == 1) ? GL_TEXTURE_2D :
1075 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1076 irb->mt->target;
1077
1078 const struct isl_view view = {
1079 .format = format,
1080 .base_level = irb->mt_level - irb->mt->first_level,
1081 .levels = 1,
1082 .base_array_layer = irb->mt_layer,
1083 .array_len = irb->layer_count,
1084 .swizzle = ISL_SWIZZLE_IDENTITY,
1085 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1086 };
1087
1088 enum isl_aux_usage aux_usage =
1089 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1090 if (brw->draw_aux_usage[i] == ISL_AUX_USAGE_NONE)
1091 aux_usage = ISL_AUX_USAGE_NONE;
1092
1093 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1094 surf_offset, surf_index,
1095 0);
1096
1097 } else {
1098 emit_null_surface_state(brw, fb, surf_offset);
1099 }
1100 }
1101
1102 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1103 }
1104 }
1105
1106 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1107 .dirty = {
1108 .mesa = _NEW_BUFFERS,
1109 .brw = BRW_NEW_BATCH |
1110 BRW_NEW_AUX_STATE |
1111 BRW_NEW_FS_PROG_DATA,
1112 },
1113 .emit = update_renderbuffer_read_surfaces,
1114 };
1115
1116 static bool
1117 is_depth_texture(struct intel_texture_object *iobj)
1118 {
1119 GLenum base_format = _mesa_get_format_base_format(iobj->_Format);
1120 return base_format == GL_DEPTH_COMPONENT ||
1121 (base_format == GL_DEPTH_STENCIL && !iobj->base.StencilSampling);
1122 }
1123
1124 static void
1125 update_stage_texture_surfaces(struct brw_context *brw,
1126 const struct gl_program *prog,
1127 struct brw_stage_state *stage_state,
1128 bool for_gather, uint32_t plane)
1129 {
1130 if (!prog)
1131 return;
1132
1133 struct gl_context *ctx = &brw->ctx;
1134
1135 uint32_t *surf_offset = stage_state->surf_offset;
1136
1137 /* BRW_NEW_*_PROG_DATA */
1138 if (for_gather)
1139 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1140 else
1141 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1142
1143 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1144 for (unsigned s = 0; s < num_samplers; s++) {
1145 surf_offset[s] = 0;
1146
1147 if (prog->SamplersUsed & (1 << s)) {
1148 const unsigned unit = prog->SamplerUnits[s];
1149 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1150 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
1151 struct intel_texture_object *iobj = intel_texture_object(obj);
1152
1153 /* _NEW_TEXTURE */
1154 if (!obj)
1155 continue;
1156
1157 if ((prog->ShadowSamplers & (1 << s)) && !is_depth_texture(iobj)) {
1158 /* A programming note for the sample_c message says:
1159 *
1160 * "The Surface Format of the associated surface must be
1161 * indicated as supporting shadow mapping as indicated in the
1162 * surface format table."
1163 *
1164 * Accessing non-depth textures via a sampler*Shadow type is
1165 * undefined. GLSL 4.50 page 162 says:
1166 *
1167 * "If a shadow texture call is made to a sampler that does not
1168 * represent a depth texture, then results are undefined."
1169 *
1170 * We give them a null surface (zeros) for undefined. We've seen
1171 * GPU hangs with color buffers and sample_c, so we try and avoid
1172 * those with this hack.
1173 */
1174 emit_null_surface_state(brw, NULL, surf_offset + s);
1175 } else {
1176 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1177 used_by_txf, plane);
1178 }
1179 }
1180 }
1181 }
1182
1183
1184 /**
1185 * Construct SURFACE_STATE objects for enabled textures.
1186 */
1187 static void
1188 brw_update_texture_surfaces(struct brw_context *brw)
1189 {
1190 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1191
1192 /* BRW_NEW_VERTEX_PROGRAM */
1193 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1194
1195 /* BRW_NEW_TESS_PROGRAMS */
1196 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1197 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1198
1199 /* BRW_NEW_GEOMETRY_PROGRAM */
1200 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1201
1202 /* BRW_NEW_FRAGMENT_PROGRAM */
1203 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1204
1205 /* _NEW_TEXTURE */
1206 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1207 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1208 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1209 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1210 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1211
1212 /* emit alternate set of surface state for gather. this
1213 * allows the surface format to be overriden for only the
1214 * gather4 messages. */
1215 if (devinfo->gen < 8) {
1216 if (vs && vs->info.uses_texture_gather)
1217 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1218 if (tcs && tcs->info.uses_texture_gather)
1219 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1220 if (tes && tes->info.uses_texture_gather)
1221 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1222 if (gs && gs->info.uses_texture_gather)
1223 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1224 if (fs && fs->info.uses_texture_gather)
1225 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1226 }
1227
1228 if (fs) {
1229 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1230 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1231 }
1232
1233 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1234 }
1235
1236 const struct brw_tracked_state brw_texture_surfaces = {
1237 .dirty = {
1238 .mesa = _NEW_TEXTURE,
1239 .brw = BRW_NEW_BATCH |
1240 BRW_NEW_AUX_STATE |
1241 BRW_NEW_FRAGMENT_PROGRAM |
1242 BRW_NEW_FS_PROG_DATA |
1243 BRW_NEW_GEOMETRY_PROGRAM |
1244 BRW_NEW_GS_PROG_DATA |
1245 BRW_NEW_TESS_PROGRAMS |
1246 BRW_NEW_TCS_PROG_DATA |
1247 BRW_NEW_TES_PROG_DATA |
1248 BRW_NEW_TEXTURE_BUFFER |
1249 BRW_NEW_VERTEX_PROGRAM |
1250 BRW_NEW_VS_PROG_DATA,
1251 },
1252 .emit = brw_update_texture_surfaces,
1253 };
1254
1255 static void
1256 brw_update_cs_texture_surfaces(struct brw_context *brw)
1257 {
1258 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1259
1260 /* BRW_NEW_COMPUTE_PROGRAM */
1261 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1262
1263 /* _NEW_TEXTURE */
1264 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1265
1266 /* emit alternate set of surface state for gather. this
1267 * allows the surface format to be overriden for only the
1268 * gather4 messages.
1269 */
1270 if (devinfo->gen < 8) {
1271 if (cs && cs->info.uses_texture_gather)
1272 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1273 }
1274
1275 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1276 }
1277
1278 const struct brw_tracked_state brw_cs_texture_surfaces = {
1279 .dirty = {
1280 .mesa = _NEW_TEXTURE,
1281 .brw = BRW_NEW_BATCH |
1282 BRW_NEW_COMPUTE_PROGRAM |
1283 BRW_NEW_AUX_STATE,
1284 },
1285 .emit = brw_update_cs_texture_surfaces,
1286 };
1287
1288 static void
1289 upload_buffer_surface(struct brw_context *brw,
1290 struct gl_buffer_binding *binding,
1291 uint32_t *out_offset,
1292 enum isl_format format,
1293 unsigned reloc_flags)
1294 {
1295 struct gl_context *ctx = &brw->ctx;
1296
1297 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1298 emit_null_surface_state(brw, NULL, out_offset);
1299 } else {
1300 ptrdiff_t size = binding->BufferObject->Size - binding->Offset;
1301 if (!binding->AutomaticSize)
1302 size = MIN2(size, binding->Size);
1303
1304 struct intel_buffer_object *iobj =
1305 intel_buffer_object(binding->BufferObject);
1306 struct brw_bo *bo =
1307 intel_bufferobj_buffer(brw, iobj, binding->Offset, size,
1308 (reloc_flags & RELOC_WRITE) != 0);
1309
1310 brw_emit_buffer_surface_state(brw, out_offset, bo, binding->Offset,
1311 format, size, 1, reloc_flags);
1312 }
1313 }
1314
1315 void
1316 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1317 struct brw_stage_state *stage_state,
1318 struct brw_stage_prog_data *prog_data)
1319 {
1320 struct gl_context *ctx = &brw->ctx;
1321
1322 if (!prog || (prog->info.num_ubos == 0 &&
1323 prog->info.num_ssbos == 0 &&
1324 prog->info.num_abos == 0))
1325 return;
1326
1327 uint32_t *ubo_surf_offsets =
1328 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1329
1330 for (int i = 0; i < prog->info.num_ubos; i++) {
1331 struct gl_buffer_binding *binding =
1332 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1333 upload_buffer_surface(brw, binding, &ubo_surf_offsets[i],
1334 ISL_FORMAT_R32G32B32A32_FLOAT, 0);
1335 }
1336
1337 uint32_t *abo_surf_offsets =
1338 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1339 uint32_t *ssbo_surf_offsets = abo_surf_offsets + prog->info.num_abos;
1340
1341 for (int i = 0; i < prog->info.num_abos; i++) {
1342 struct gl_buffer_binding *binding =
1343 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1344 upload_buffer_surface(brw, binding, &abo_surf_offsets[i],
1345 ISL_FORMAT_RAW, RELOC_WRITE);
1346 }
1347
1348 for (int i = 0; i < prog->info.num_ssbos; i++) {
1349 struct gl_buffer_binding *binding =
1350 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1351
1352 upload_buffer_surface(brw, binding, &ssbo_surf_offsets[i],
1353 ISL_FORMAT_RAW, RELOC_WRITE);
1354 }
1355
1356 stage_state->push_constants_dirty = true;
1357 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1358 }
1359
1360 static void
1361 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1362 {
1363 struct gl_context *ctx = &brw->ctx;
1364 /* _NEW_PROGRAM */
1365 struct gl_program *prog = ctx->FragmentProgram._Current;
1366
1367 /* BRW_NEW_FS_PROG_DATA */
1368 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1369 }
1370
1371 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1372 .dirty = {
1373 .mesa = _NEW_PROGRAM,
1374 .brw = BRW_NEW_BATCH |
1375 BRW_NEW_FS_PROG_DATA |
1376 BRW_NEW_UNIFORM_BUFFER,
1377 },
1378 .emit = brw_upload_wm_ubo_surfaces,
1379 };
1380
1381 static void
1382 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1383 {
1384 struct gl_context *ctx = &brw->ctx;
1385 /* _NEW_PROGRAM */
1386 struct gl_program *prog =
1387 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1388
1389 /* BRW_NEW_CS_PROG_DATA */
1390 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1391 }
1392
1393 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1394 .dirty = {
1395 .mesa = _NEW_PROGRAM,
1396 .brw = BRW_NEW_BATCH |
1397 BRW_NEW_CS_PROG_DATA |
1398 BRW_NEW_UNIFORM_BUFFER,
1399 },
1400 .emit = brw_upload_cs_ubo_surfaces,
1401 };
1402
1403 static void
1404 brw_upload_cs_image_surfaces(struct brw_context *brw)
1405 {
1406 /* _NEW_PROGRAM */
1407 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1408
1409 if (cp) {
1410 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1411 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1412 brw->cs.base.prog_data);
1413 }
1414 }
1415
1416 const struct brw_tracked_state brw_cs_image_surfaces = {
1417 .dirty = {
1418 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1419 .brw = BRW_NEW_BATCH |
1420 BRW_NEW_CS_PROG_DATA |
1421 BRW_NEW_AUX_STATE |
1422 BRW_NEW_IMAGE_UNITS
1423 },
1424 .emit = brw_upload_cs_image_surfaces,
1425 };
1426
1427 static uint32_t
1428 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1429 {
1430 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1431 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1432 if (access == GL_WRITE_ONLY) {
1433 return hw_format;
1434 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1435 /* Typed surface reads support a very limited subset of the shader
1436 * image formats. Translate it into the closest format the
1437 * hardware supports.
1438 */
1439 return isl_lower_storage_image_format(devinfo, hw_format);
1440 } else {
1441 /* The hardware doesn't actually support a typed format that we can use
1442 * so we have to fall back to untyped read/write messages.
1443 */
1444 return ISL_FORMAT_RAW;
1445 }
1446 }
1447
1448 static void
1449 update_default_image_param(struct brw_context *brw,
1450 struct gl_image_unit *u,
1451 unsigned surface_idx,
1452 struct brw_image_param *param)
1453 {
1454 memset(param, 0, sizeof(*param));
1455 param->surface_idx = surface_idx;
1456 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1457 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1458 * detailed explanation of these parameters.
1459 */
1460 param->swizzling[0] = 0xff;
1461 param->swizzling[1] = 0xff;
1462 }
1463
1464 static void
1465 update_buffer_image_param(struct brw_context *brw,
1466 struct gl_image_unit *u,
1467 unsigned surface_idx,
1468 struct brw_image_param *param)
1469 {
1470 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1471 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1472 update_default_image_param(brw, u, surface_idx, param);
1473
1474 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1475 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1476 }
1477
1478 static unsigned
1479 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1480 unsigned level)
1481 {
1482 if (target == GL_TEXTURE_CUBE_MAP)
1483 return 6;
1484
1485 return target == GL_TEXTURE_3D ?
1486 minify(mt->surf.logical_level0_px.depth, level) :
1487 mt->surf.logical_level0_px.array_len;
1488 }
1489
1490 static void
1491 update_image_surface(struct brw_context *brw,
1492 struct gl_image_unit *u,
1493 GLenum access,
1494 unsigned surface_idx,
1495 uint32_t *surf_offset,
1496 struct brw_image_param *param)
1497 {
1498 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1499 struct gl_texture_object *obj = u->TexObj;
1500 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1501
1502 if (obj->Target == GL_TEXTURE_BUFFER) {
1503 struct intel_buffer_object *intel_obj =
1504 intel_buffer_object(obj->BufferObject);
1505 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1506 _mesa_get_format_bytes(u->_ActualFormat));
1507
1508 brw_emit_buffer_surface_state(
1509 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1510 format, intel_obj->Base.Size, texel_size,
1511 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1512
1513 update_buffer_image_param(brw, u, surface_idx, param);
1514
1515 } else {
1516 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1517 struct intel_mipmap_tree *mt = intel_obj->mt;
1518 const unsigned num_layers = u->Layered ?
1519 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1520
1521 struct isl_view view = {
1522 .format = format,
1523 .base_level = obj->MinLevel + u->Level,
1524 .levels = 1,
1525 .base_array_layer = obj->MinLayer + u->_Layer,
1526 .array_len = num_layers,
1527 .swizzle = ISL_SWIZZLE_IDENTITY,
1528 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1529 };
1530
1531 if (format == ISL_FORMAT_RAW) {
1532 brw_emit_buffer_surface_state(
1533 brw, surf_offset, mt->bo, mt->offset,
1534 format, mt->bo->size - mt->offset, 1 /* pitch */,
1535 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1536
1537 } else {
1538 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1539 assert(!intel_miptree_has_color_unresolved(mt,
1540 view.base_level, 1,
1541 view.base_array_layer,
1542 view.array_len));
1543 brw_emit_surface_state(brw, mt, mt->target, view,
1544 ISL_AUX_USAGE_NONE,
1545 surf_offset, surf_index,
1546 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1547 }
1548
1549 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1550 param->surface_idx = surface_idx;
1551 }
1552
1553 } else {
1554 emit_null_surface_state(brw, NULL, surf_offset);
1555 update_default_image_param(brw, u, surface_idx, param);
1556 }
1557 }
1558
1559 void
1560 brw_upload_image_surfaces(struct brw_context *brw,
1561 const struct gl_program *prog,
1562 struct brw_stage_state *stage_state,
1563 struct brw_stage_prog_data *prog_data)
1564 {
1565 assert(prog);
1566 struct gl_context *ctx = &brw->ctx;
1567
1568 if (prog->info.num_images) {
1569 for (unsigned i = 0; i < prog->info.num_images; i++) {
1570 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1571 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1572
1573 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1574 surf_idx,
1575 &stage_state->surf_offset[surf_idx],
1576 &stage_state->image_param[i]);
1577 }
1578
1579 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1580 /* This may have changed the image metadata dependent on the context
1581 * image unit state and passed to the program as uniforms, make sure
1582 * that push and pull constants are reuploaded.
1583 */
1584 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1585 }
1586 }
1587
1588 static void
1589 brw_upload_wm_image_surfaces(struct brw_context *brw)
1590 {
1591 /* BRW_NEW_FRAGMENT_PROGRAM */
1592 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1593
1594 if (wm) {
1595 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1596 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1597 brw->wm.base.prog_data);
1598 }
1599 }
1600
1601 const struct brw_tracked_state brw_wm_image_surfaces = {
1602 .dirty = {
1603 .mesa = _NEW_TEXTURE,
1604 .brw = BRW_NEW_BATCH |
1605 BRW_NEW_AUX_STATE |
1606 BRW_NEW_FRAGMENT_PROGRAM |
1607 BRW_NEW_FS_PROG_DATA |
1608 BRW_NEW_IMAGE_UNITS
1609 },
1610 .emit = brw_upload_wm_image_surfaces,
1611 };
1612
1613 static void
1614 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1615 {
1616 struct gl_context *ctx = &brw->ctx;
1617 /* _NEW_PROGRAM */
1618 struct gl_program *prog =
1619 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1620 /* BRW_NEW_CS_PROG_DATA */
1621 const struct brw_cs_prog_data *cs_prog_data =
1622 brw_cs_prog_data(brw->cs.base.prog_data);
1623
1624 if (prog && cs_prog_data->uses_num_work_groups) {
1625 const unsigned surf_idx =
1626 cs_prog_data->binding_table.work_groups_start;
1627 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1628 struct brw_bo *bo;
1629 uint32_t bo_offset;
1630
1631 if (brw->compute.num_work_groups_bo == NULL) {
1632 bo = NULL;
1633 brw_upload_data(&brw->upload,
1634 (void *)brw->compute.num_work_groups,
1635 3 * sizeof(GLuint),
1636 sizeof(GLuint),
1637 &bo,
1638 &bo_offset);
1639 } else {
1640 bo = brw->compute.num_work_groups_bo;
1641 bo_offset = brw->compute.num_work_groups_offset;
1642 }
1643
1644 brw_emit_buffer_surface_state(brw, surf_offset,
1645 bo, bo_offset,
1646 ISL_FORMAT_RAW,
1647 3 * sizeof(GLuint), 1,
1648 RELOC_WRITE);
1649 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1650 }
1651 }
1652
1653 const struct brw_tracked_state brw_cs_work_groups_surface = {
1654 .dirty = {
1655 .brw = BRW_NEW_CS_PROG_DATA |
1656 BRW_NEW_CS_WORK_GROUPS
1657 },
1658 .emit = brw_upload_cs_work_groups_surface,
1659 };