i965: Use ISL for emitting null surface states.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 [10] = CNL_MOCS_WB,
68 };
69
70 uint32_t rb_mocs[] = {
71 [7] = GEN7_MOCS_L3,
72 [8] = BDW_MOCS_PTE,
73 [9] = SKL_MOCS_PTE,
74 [10] = CNL_MOCS_PTE,
75 };
76
77 static void
78 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
79 GLenum target, struct isl_view *view,
80 uint32_t *tile_x, uint32_t *tile_y,
81 uint32_t *offset, struct isl_surf *surf)
82 {
83 *surf = mt->surf;
84
85 const enum isl_dim_layout dim_layout =
86 get_isl_dim_layout(&brw->screen->devinfo, mt->surf.tiling, target);
87
88 if (surf->dim_layout == dim_layout)
89 return;
90
91 /* The layout of the specified texture target is not compatible with the
92 * actual layout of the miptree structure in memory -- You're entering
93 * dangerous territory, this can only possibly work if you only intended
94 * to access a single level and slice of the texture, and the hardware
95 * supports the tile offset feature in order to allow non-tile-aligned
96 * base offsets, since we'll have to point the hardware to the first
97 * texel of the level instead of relying on the usual base level/layer
98 * controls.
99 */
100 assert(brw->has_surface_tile_offset);
101 assert(view->levels == 1 && view->array_len == 1);
102 assert(*tile_x == 0 && *tile_y == 0);
103
104 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
105 view->base_array_layer,
106 tile_x, tile_y);
107
108 /* Minify the logical dimensions of the texture. */
109 const unsigned l = view->base_level - mt->first_level;
110 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
111 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
112 minify(surf->logical_level0_px.height, l);
113 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
114 minify(surf->logical_level0_px.depth, l);
115
116 /* Only the base level and layer can be addressed with the overridden
117 * layout.
118 */
119 surf->logical_level0_px.array_len = 1;
120 surf->levels = 1;
121 surf->dim_layout = dim_layout;
122
123 /* The requested slice of the texture is now at the base level and
124 * layer.
125 */
126 view->base_level = 0;
127 view->base_array_layer = 0;
128 }
129
130 static void
131 brw_emit_surface_state(struct brw_context *brw,
132 struct intel_mipmap_tree *mt,
133 GLenum target, struct isl_view view,
134 enum isl_aux_usage aux_usage,
135 uint32_t mocs, uint32_t *surf_offset, int surf_index,
136 unsigned reloc_flags)
137 {
138 uint32_t tile_x = mt->level[0].level_x;
139 uint32_t tile_y = mt->level[0].level_y;
140 uint32_t offset = mt->offset;
141
142 struct isl_surf surf;
143
144 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
145
146 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
147
148 struct brw_bo *aux_bo;
149 struct isl_surf *aux_surf = NULL;
150 uint64_t aux_offset = 0;
151 switch (aux_usage) {
152 case ISL_AUX_USAGE_MCS:
153 case ISL_AUX_USAGE_CCS_D:
154 case ISL_AUX_USAGE_CCS_E:
155 aux_surf = &mt->mcs_buf->surf;
156 aux_bo = mt->mcs_buf->bo;
157 aux_offset = mt->mcs_buf->offset;
158 break;
159
160 case ISL_AUX_USAGE_HIZ:
161 aux_surf = &mt->hiz_buf->surf;
162 aux_bo = mt->hiz_buf->bo;
163 aux_offset = 0;
164 break;
165
166 case ISL_AUX_USAGE_NONE:
167 break;
168 }
169
170 if (aux_usage != ISL_AUX_USAGE_NONE) {
171 /* We only really need a clear color if we also have an auxiliary
172 * surface. Without one, it does nothing.
173 */
174 clear_color = mt->fast_clear_color;
175 }
176
177 void *state = brw_state_batch(brw,
178 brw->isl_dev.ss.size,
179 brw->isl_dev.ss.align,
180 surf_offset);
181
182 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
183 .address = brw_emit_reloc(&brw->batch,
184 *surf_offset + brw->isl_dev.ss.addr_offset,
185 mt->bo, offset, reloc_flags),
186 .aux_surf = aux_surf, .aux_usage = aux_usage,
187 .aux_address = aux_offset,
188 .mocs = mocs, .clear_color = clear_color,
189 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
190 if (aux_surf) {
191 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
192 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
193 * contain other control information. Since buffer addresses are always
194 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
195 * an ordinary reloc to do the necessary address translation.
196 *
197 * FIXME: move to the point of assignment.
198 */
199 assert((aux_offset & 0xfff) == 0);
200 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
201 *aux_addr = brw_emit_reloc(&brw->batch,
202 *surf_offset +
203 brw->isl_dev.ss.aux_addr_offset,
204 aux_bo, *aux_addr,
205 reloc_flags);
206 }
207 }
208
209 uint32_t
210 brw_update_renderbuffer_surface(struct brw_context *brw,
211 struct gl_renderbuffer *rb,
212 uint32_t flags, unsigned unit,
213 uint32_t surf_index)
214 {
215 struct gl_context *ctx = &brw->ctx;
216 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
217 struct intel_mipmap_tree *mt = irb->mt;
218
219 enum isl_aux_usage aux_usage =
220 intel_miptree_render_aux_usage(brw, mt, ctx->Color.sRGBEnabled,
221 ctx->Color.BlendEnabled & (1 << unit));
222
223 if (flags & INTEL_AUX_BUFFER_DISABLED) {
224 assert(brw->gen >= 9);
225 aux_usage = ISL_AUX_USAGE_NONE;
226 }
227
228 assert(brw_render_target_supported(brw, rb));
229
230 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
231 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
232 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
233 __func__, _mesa_get_format_name(rb_format));
234 }
235
236 struct isl_view view = {
237 .format = brw->mesa_to_isl_render_format[rb_format],
238 .base_level = irb->mt_level - irb->mt->first_level,
239 .levels = 1,
240 .base_array_layer = irb->mt_layer,
241 .array_len = MAX2(irb->layer_count, 1),
242 .swizzle = ISL_SWIZZLE_IDENTITY,
243 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
244 };
245
246 uint32_t offset;
247 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
248 rb_mocs[brw->gen],
249 &offset, surf_index,
250 RELOC_WRITE);
251 return offset;
252 }
253
254 GLuint
255 translate_tex_target(GLenum target)
256 {
257 switch (target) {
258 case GL_TEXTURE_1D:
259 case GL_TEXTURE_1D_ARRAY_EXT:
260 return BRW_SURFACE_1D;
261
262 case GL_TEXTURE_RECTANGLE_NV:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_2D:
266 case GL_TEXTURE_2D_ARRAY_EXT:
267 case GL_TEXTURE_EXTERNAL_OES:
268 case GL_TEXTURE_2D_MULTISAMPLE:
269 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
270 return BRW_SURFACE_2D;
271
272 case GL_TEXTURE_3D:
273 return BRW_SURFACE_3D;
274
275 case GL_TEXTURE_CUBE_MAP:
276 case GL_TEXTURE_CUBE_MAP_ARRAY:
277 return BRW_SURFACE_CUBE;
278
279 default:
280 unreachable("not reached");
281 }
282 }
283
284 uint32_t
285 brw_get_surface_tiling_bits(enum isl_tiling tiling)
286 {
287 switch (tiling) {
288 case ISL_TILING_X:
289 return BRW_SURFACE_TILED;
290 case ISL_TILING_Y0:
291 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
292 default:
293 return 0;
294 }
295 }
296
297
298 uint32_t
299 brw_get_surface_num_multisamples(unsigned num_samples)
300 {
301 if (num_samples > 1)
302 return BRW_SURFACE_MULTISAMPLECOUNT_4;
303 else
304 return BRW_SURFACE_MULTISAMPLECOUNT_1;
305 }
306
307 /**
308 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
309 * swizzling.
310 */
311 int
312 brw_get_texture_swizzle(const struct gl_context *ctx,
313 const struct gl_texture_object *t)
314 {
315 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
316
317 int swizzles[SWIZZLE_NIL + 1] = {
318 SWIZZLE_X,
319 SWIZZLE_Y,
320 SWIZZLE_Z,
321 SWIZZLE_W,
322 SWIZZLE_ZERO,
323 SWIZZLE_ONE,
324 SWIZZLE_NIL
325 };
326
327 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
328 img->_BaseFormat == GL_DEPTH_STENCIL) {
329 GLenum depth_mode = t->DepthMode;
330
331 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
332 * with depth component data specified with a sized internal format.
333 * Otherwise, it's left at the old default, GL_LUMINANCE.
334 */
335 if (_mesa_is_gles3(ctx) &&
336 img->InternalFormat != GL_DEPTH_COMPONENT &&
337 img->InternalFormat != GL_DEPTH_STENCIL) {
338 depth_mode = GL_RED;
339 }
340
341 switch (depth_mode) {
342 case GL_ALPHA:
343 swizzles[0] = SWIZZLE_ZERO;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_X;
347 break;
348 case GL_LUMINANCE:
349 swizzles[0] = SWIZZLE_X;
350 swizzles[1] = SWIZZLE_X;
351 swizzles[2] = SWIZZLE_X;
352 swizzles[3] = SWIZZLE_ONE;
353 break;
354 case GL_INTENSITY:
355 swizzles[0] = SWIZZLE_X;
356 swizzles[1] = SWIZZLE_X;
357 swizzles[2] = SWIZZLE_X;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_RED:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 }
367 }
368
369 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
370
371 /* If the texture's format is alpha-only, force R, G, and B to
372 * 0.0. Similarly, if the texture's format has no alpha channel,
373 * force the alpha value read to 1.0. This allows for the
374 * implementation to use an RGBA texture for any of these formats
375 * without leaking any unexpected values.
376 */
377 switch (img->_BaseFormat) {
378 case GL_ALPHA:
379 swizzles[0] = SWIZZLE_ZERO;
380 swizzles[1] = SWIZZLE_ZERO;
381 swizzles[2] = SWIZZLE_ZERO;
382 break;
383 case GL_LUMINANCE:
384 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_ONE;
389 }
390 break;
391 case GL_LUMINANCE_ALPHA:
392 if (datatype == GL_SIGNED_NORMALIZED) {
393 swizzles[0] = SWIZZLE_X;
394 swizzles[1] = SWIZZLE_X;
395 swizzles[2] = SWIZZLE_X;
396 swizzles[3] = SWIZZLE_W;
397 }
398 break;
399 case GL_INTENSITY:
400 if (datatype == GL_SIGNED_NORMALIZED) {
401 swizzles[0] = SWIZZLE_X;
402 swizzles[1] = SWIZZLE_X;
403 swizzles[2] = SWIZZLE_X;
404 swizzles[3] = SWIZZLE_X;
405 }
406 break;
407 case GL_RED:
408 case GL_RG:
409 case GL_RGB:
410 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
411 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
412 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
413 swizzles[3] = SWIZZLE_ONE;
414 break;
415 }
416
417 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
418 swizzles[GET_SWZ(t->_Swizzle, 1)],
419 swizzles[GET_SWZ(t->_Swizzle, 2)],
420 swizzles[GET_SWZ(t->_Swizzle, 3)]);
421 }
422
423 /**
424 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
425 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
426 *
427 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
428 * 0 1 2 3 4 5
429 * 4 5 6 7 0 1
430 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
431 *
432 * which is simply adding 4 then modding by 8 (or anding with 7).
433 *
434 * We then may need to apply workarounds for textureGather hardware bugs.
435 */
436 static unsigned
437 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
438 {
439 unsigned scs = (swizzle + 4) & 7;
440
441 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
442 }
443
444 static bool
445 brw_aux_surface_disabled(const struct brw_context *brw,
446 const struct intel_mipmap_tree *mt)
447 {
448 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
449
450 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
451 const struct intel_renderbuffer *irb =
452 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
453
454 if (irb && irb->mt == mt)
455 return brw->draw_aux_buffer_disabled[i];
456 }
457
458 return false;
459 }
460
461 void
462 brw_update_texture_surface(struct gl_context *ctx,
463 unsigned unit,
464 uint32_t *surf_offset,
465 bool for_gather,
466 uint32_t plane)
467 {
468 struct brw_context *brw = brw_context(ctx);
469 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
470
471 if (obj->Target == GL_TEXTURE_BUFFER) {
472 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
473
474 } else {
475 struct intel_texture_object *intel_obj = intel_texture_object(obj);
476 struct intel_mipmap_tree *mt = intel_obj->mt;
477
478 if (plane > 0) {
479 if (mt->plane[plane - 1] == NULL)
480 return;
481 mt = mt->plane[plane - 1];
482 }
483
484 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
485 /* If this is a view with restricted NumLayers, then our effective depth
486 * is not just the miptree depth.
487 */
488 unsigned view_num_layers;
489 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
490 view_num_layers = obj->NumLayers;
491 } else {
492 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
493 mt->surf.logical_level0_px.depth :
494 mt->surf.logical_level0_px.array_len;
495 }
496
497 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
498 * texturing functions that return a float, as our code generation always
499 * selects the .x channel (which would always be 0).
500 */
501 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
502 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
503 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
504 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
505 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
506 brw_get_texture_swizzle(&brw->ctx, obj));
507
508 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
509 enum isl_format format = translate_tex_format(brw, mesa_fmt,
510 sampler->sRGBDecode);
511
512 /* Implement gen6 and gen7 gather work-around */
513 bool need_green_to_blue = false;
514 if (for_gather) {
515 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
516 format == ISL_FORMAT_R32G32_SINT ||
517 format == ISL_FORMAT_R32G32_UINT)) {
518 format = ISL_FORMAT_R32G32_FLOAT_LD;
519 need_green_to_blue = brw->is_haswell;
520 } else if (brw->gen == 6) {
521 /* Sandybridge's gather4 message is broken for integer formats.
522 * To work around this, we pretend the surface is UNORM for
523 * 8 or 16-bit formats, and emit shader instructions to recover
524 * the real INT/UINT value. For 32-bit formats, we pretend
525 * the surface is FLOAT, and simply reinterpret the resulting
526 * bits.
527 */
528 switch (format) {
529 case ISL_FORMAT_R8_SINT:
530 case ISL_FORMAT_R8_UINT:
531 format = ISL_FORMAT_R8_UNORM;
532 break;
533
534 case ISL_FORMAT_R16_SINT:
535 case ISL_FORMAT_R16_UINT:
536 format = ISL_FORMAT_R16_UNORM;
537 break;
538
539 case ISL_FORMAT_R32_SINT:
540 case ISL_FORMAT_R32_UINT:
541 format = ISL_FORMAT_R32_FLOAT;
542 break;
543
544 default:
545 break;
546 }
547 }
548 }
549
550 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
551 if (brw->gen <= 7) {
552 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
553 mt = mt->r8stencil_mt;
554 } else {
555 mt = mt->stencil_mt;
556 }
557 format = ISL_FORMAT_R8_UINT;
558 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
559 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
560 mt = mt->r8stencil_mt;
561 format = ISL_FORMAT_R8_UINT;
562 }
563
564 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
565
566 struct isl_view view = {
567 .format = format,
568 .base_level = obj->MinLevel + obj->BaseLevel,
569 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
570 .base_array_layer = obj->MinLayer,
571 .array_len = view_num_layers,
572 .swizzle = {
573 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
574 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
575 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
576 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
577 },
578 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
579 };
580
581 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
582 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
583 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
584
585 enum isl_aux_usage aux_usage =
586 intel_miptree_texture_aux_usage(brw, mt, format);
587
588 if (brw_aux_surface_disabled(brw, mt))
589 aux_usage = ISL_AUX_USAGE_NONE;
590
591 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
592 tex_mocs[brw->gen],
593 surf_offset, surf_index,
594 0);
595 }
596 }
597
598 void
599 brw_emit_buffer_surface_state(struct brw_context *brw,
600 uint32_t *out_offset,
601 struct brw_bo *bo,
602 unsigned buffer_offset,
603 unsigned surface_format,
604 unsigned buffer_size,
605 unsigned pitch,
606 unsigned reloc_flags)
607 {
608 uint32_t *dw = brw_state_batch(brw,
609 brw->isl_dev.ss.size,
610 brw->isl_dev.ss.align,
611 out_offset);
612
613 isl_buffer_fill_state(&brw->isl_dev, dw,
614 .address = !bo ? buffer_offset :
615 brw_emit_reloc(&brw->batch,
616 *out_offset + brw->isl_dev.ss.addr_offset,
617 bo, buffer_offset,
618 reloc_flags),
619 .size = buffer_size,
620 .format = surface_format,
621 .stride = pitch,
622 .mocs = tex_mocs[brw->gen]);
623 }
624
625 void
626 brw_update_buffer_texture_surface(struct gl_context *ctx,
627 unsigned unit,
628 uint32_t *surf_offset)
629 {
630 struct brw_context *brw = brw_context(ctx);
631 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
632 struct intel_buffer_object *intel_obj =
633 intel_buffer_object(tObj->BufferObject);
634 uint32_t size = tObj->BufferSize;
635 struct brw_bo *bo = NULL;
636 mesa_format format = tObj->_BufferObjectFormat;
637 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
638 int texel_size = _mesa_get_format_bytes(format);
639
640 if (intel_obj) {
641 size = MIN2(size, intel_obj->Base.Size);
642 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
643 false);
644 }
645
646 /* The ARB_texture_buffer_specification says:
647 *
648 * "The number of texels in the buffer texture's texel array is given by
649 *
650 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
651 *
652 * where <buffer_size> is the size of the buffer object, in basic
653 * machine units and <components> and <base_type> are the element count
654 * and base data type for elements, as specified in Table X.1. The
655 * number of texels in the texel array is then clamped to the
656 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
657 *
658 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
659 * so that when ISL divides by stride to obtain the number of texels, that
660 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
661 */
662 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
663
664 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
665 _mesa_problem(NULL, "bad format %s for texture buffer\n",
666 _mesa_get_format_name(format));
667 }
668
669 brw_emit_buffer_surface_state(brw, surf_offset, bo,
670 tObj->BufferOffset,
671 isl_format,
672 size,
673 texel_size,
674 0);
675 }
676
677 /**
678 * Create the constant buffer surface. Vertex/fragment shader constants will be
679 * read from this buffer with Data Port Read instructions/messages.
680 */
681 void
682 brw_create_constant_surface(struct brw_context *brw,
683 struct brw_bo *bo,
684 uint32_t offset,
685 uint32_t size,
686 uint32_t *out_offset)
687 {
688 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
689 ISL_FORMAT_R32G32B32A32_FLOAT,
690 size, 1, 0);
691 }
692
693 /**
694 * Create the buffer surface. Shader buffer variables will be
695 * read from / write to this buffer with Data Port Read/Write
696 * instructions/messages.
697 */
698 void
699 brw_create_buffer_surface(struct brw_context *brw,
700 struct brw_bo *bo,
701 uint32_t offset,
702 uint32_t size,
703 uint32_t *out_offset)
704 {
705 /* Use a raw surface so we can reuse existing untyped read/write/atomic
706 * messages. We need these specifically for the fragment shader since they
707 * include a pixel mask header that we need to ensure correct behavior
708 * with helper invocations, which cannot write to the buffer.
709 */
710 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
711 ISL_FORMAT_RAW,
712 size, 1, RELOC_WRITE);
713 }
714
715 /**
716 * Set up a binding table entry for use by stream output logic (transform
717 * feedback).
718 *
719 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
720 */
721 void
722 brw_update_sol_surface(struct brw_context *brw,
723 struct gl_buffer_object *buffer_obj,
724 uint32_t *out_offset, unsigned num_vector_components,
725 unsigned stride_dwords, unsigned offset_dwords)
726 {
727 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
728 uint32_t offset_bytes = 4 * offset_dwords;
729 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
730 offset_bytes,
731 buffer_obj->Size - offset_bytes,
732 true);
733 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
734 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
735 size_t size_dwords = buffer_obj->Size / 4;
736 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
737
738 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
739 * too big to map using a single binding table entry?
740 */
741 assert((size_dwords - offset_dwords) / stride_dwords
742 <= BRW_MAX_NUM_BUFFER_ENTRIES);
743
744 if (size_dwords > offset_dwords + num_vector_components) {
745 /* There is room for at least 1 transform feedback output in the buffer.
746 * Compute the number of additional transform feedback outputs the
747 * buffer has room for.
748 */
749 buffer_size_minus_1 =
750 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
751 } else {
752 /* There isn't even room for a single transform feedback output in the
753 * buffer. We can't configure the binding table entry to prevent output
754 * entirely; we'll have to rely on the geometry shader to detect
755 * overflow. But to minimize the damage in case of a bug, set up the
756 * binding table entry to just allow a single output.
757 */
758 buffer_size_minus_1 = 0;
759 }
760 width = buffer_size_minus_1 & 0x7f;
761 height = (buffer_size_minus_1 & 0xfff80) >> 7;
762 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
763
764 switch (num_vector_components) {
765 case 1:
766 surface_format = ISL_FORMAT_R32_FLOAT;
767 break;
768 case 2:
769 surface_format = ISL_FORMAT_R32G32_FLOAT;
770 break;
771 case 3:
772 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
773 break;
774 case 4:
775 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
776 break;
777 default:
778 unreachable("Invalid vector size for transform feedback output");
779 }
780
781 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
782 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
783 surface_format << BRW_SURFACE_FORMAT_SHIFT |
784 BRW_SURFACE_RC_READ_WRITE;
785 surf[1] = brw_emit_reloc(&brw->batch,
786 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
787 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
788 height << BRW_SURFACE_HEIGHT_SHIFT);
789 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
790 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
791 surf[4] = 0;
792 surf[5] = 0;
793 }
794
795 /* Creates a new WM constant buffer reflecting the current fragment program's
796 * constants, if needed by the fragment program.
797 *
798 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
799 * state atom.
800 */
801 static void
802 brw_upload_wm_pull_constants(struct brw_context *brw)
803 {
804 struct brw_stage_state *stage_state = &brw->wm.base;
805 /* BRW_NEW_FRAGMENT_PROGRAM */
806 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
807 /* BRW_NEW_FS_PROG_DATA */
808 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
809
810 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
811 /* _NEW_PROGRAM_CONSTANTS */
812 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
813 stage_state, prog_data);
814 }
815
816 const struct brw_tracked_state brw_wm_pull_constants = {
817 .dirty = {
818 .mesa = _NEW_PROGRAM_CONSTANTS,
819 .brw = BRW_NEW_BATCH |
820 BRW_NEW_BLORP |
821 BRW_NEW_FRAGMENT_PROGRAM |
822 BRW_NEW_FS_PROG_DATA,
823 },
824 .emit = brw_upload_wm_pull_constants,
825 };
826
827 /**
828 * Creates a null renderbuffer surface.
829 *
830 * This is used when the shader doesn't write to any color output. An FB
831 * write to target 0 will still be emitted, because that's how the thread is
832 * terminated (and computed depth is returned), so we need to have the
833 * hardware discard the target 0 color output..
834 */
835 static void
836 emit_null_surface_state(struct brw_context *brw,
837 unsigned width,
838 unsigned height,
839 unsigned samples,
840 uint32_t *out_offset)
841 {
842 uint32_t *surf = brw_state_batch(brw,
843 brw->isl_dev.ss.size,
844 brw->isl_dev.ss.align,
845 out_offset);
846
847 if (brw->gen != 6 || samples <= 1) {
848 isl_null_fill_state(&brw->isl_dev, surf,
849 isl_extent3d(width, height, 1));
850 return;
851 }
852
853 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
854 * So work around this problem by rendering into dummy color buffer.
855 *
856 * To decrease the amount of memory needed by the workaround buffer, we
857 * set its pitch to 128 bytes (the width of a Y tile). This means that
858 * the amount of memory needed for the workaround buffer is
859 * (width_in_tiles + height_in_tiles - 1) tiles.
860 *
861 * Note that since the workaround buffer will be interpreted by the
862 * hardware as an interleaved multisampled buffer, we need to compute
863 * width_in_tiles and height_in_tiles by dividing the width and height
864 * by 16 rather than the normal Y-tile size of 32.
865 */
866 unsigned width_in_tiles = ALIGN(width, 16) / 16;
867 unsigned height_in_tiles = ALIGN(height, 16) / 16;
868 unsigned pitch_minus_1 = 127;
869 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
870 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
871 size_needed);
872
873 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
874 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
875 surf[1] = brw_emit_reloc(&brw->batch, *out_offset + 4,
876 brw->wm.multisampled_null_render_target_bo,
877 0, RELOC_WRITE);
878
879 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
880 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
881
882 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
883 * Notes):
884 *
885 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
886 */
887 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
888 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
889 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
890 surf[5] = 0;
891 }
892
893 /**
894 * Sets up a surface state structure to point at the given region.
895 * While it is only used for the front/back buffer currently, it should be
896 * usable for further buffers when doing ARB_draw_buffer support.
897 */
898 static uint32_t
899 gen4_update_renderbuffer_surface(struct brw_context *brw,
900 struct gl_renderbuffer *rb,
901 uint32_t flags, unsigned unit,
902 uint32_t surf_index)
903 {
904 struct gl_context *ctx = &brw->ctx;
905 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
906 struct intel_mipmap_tree *mt = irb->mt;
907 uint32_t *surf;
908 uint32_t tile_x, tile_y;
909 enum isl_format format;
910 uint32_t offset;
911 /* _NEW_BUFFERS */
912 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
913 /* BRW_NEW_FS_PROG_DATA */
914
915 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
916 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
917
918 if (rb->TexImage && !brw->has_surface_tile_offset) {
919 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
920
921 if (tile_x != 0 || tile_y != 0) {
922 /* Original gen4 hardware couldn't draw to a non-tile-aligned
923 * destination in a miptree unless you actually setup your renderbuffer
924 * as a miptree and used the fragile lod/array_index/etc. controls to
925 * select the image. So, instead, we just make a new single-level
926 * miptree and render into that.
927 */
928 intel_renderbuffer_move_to_temp(brw, irb, false);
929 assert(irb->align_wa_mt);
930 mt = irb->align_wa_mt;
931 }
932 }
933
934 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
935
936 format = brw->mesa_to_isl_render_format[rb_format];
937 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
938 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
939 __func__, _mesa_get_format_name(rb_format));
940 }
941
942 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
943 format << BRW_SURFACE_FORMAT_SHIFT);
944
945 /* reloc */
946 assert(mt->offset % mt->cpp == 0);
947 surf[1] = brw_emit_reloc(&brw->batch, offset + 4, mt->bo,
948 mt->offset +
949 intel_renderbuffer_get_tile_offsets(irb,
950 &tile_x,
951 &tile_y),
952 RELOC_WRITE);
953
954 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
955 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
956
957 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
958 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
959
960 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
961
962 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
963 /* Note that the low bits of these fields are missing, so
964 * there's the possibility of getting in trouble.
965 */
966 assert(tile_x % 4 == 0);
967 assert(tile_y % 2 == 0);
968 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
969 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
970 (mt->surf.image_alignment_el.height == 4 ?
971 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
972
973 if (brw->gen < 6) {
974 /* _NEW_COLOR */
975 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
976 (ctx->Color.BlendEnabled & (1 << unit)))
977 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
978
979 if (!ctx->Color.ColorMask[unit][0])
980 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
981 if (!ctx->Color.ColorMask[unit][1])
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
983 if (!ctx->Color.ColorMask[unit][2])
984 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
985
986 /* As mentioned above, disable writes to the alpha component when the
987 * renderbuffer is XRGB.
988 */
989 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
990 !ctx->Color.ColorMask[unit][3]) {
991 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
992 }
993 }
994
995 return offset;
996 }
997
998 /**
999 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1000 */
1001 void
1002 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1003 const struct gl_framebuffer *fb,
1004 uint32_t render_target_start,
1005 uint32_t *surf_offset)
1006 {
1007 GLuint i;
1008 const unsigned int w = _mesa_geometric_width(fb);
1009 const unsigned int h = _mesa_geometric_height(fb);
1010 const unsigned int s = _mesa_geometric_samples(fb);
1011
1012 /* Update surfaces for drawing buffers */
1013 if (fb->_NumColorDrawBuffers >= 1) {
1014 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1015 const uint32_t surf_index = render_target_start + i;
1016 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1017 INTEL_RENDERBUFFER_LAYERED : 0) |
1018 (brw->draw_aux_buffer_disabled[i] ?
1019 INTEL_AUX_BUFFER_DISABLED : 0);
1020
1021 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1022 surf_offset[surf_index] =
1023 brw->vtbl.update_renderbuffer_surface(
1024 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1025 } else {
1026 emit_null_surface_state(brw, w, h, s, &surf_offset[surf_index]);
1027 }
1028 }
1029 } else {
1030 const uint32_t surf_index = render_target_start;
1031 emit_null_surface_state(brw, w, h, s, &surf_offset[surf_index]);
1032 }
1033 }
1034
1035 static void
1036 update_renderbuffer_surfaces(struct brw_context *brw)
1037 {
1038 const struct gl_context *ctx = &brw->ctx;
1039
1040 /* BRW_NEW_FS_PROG_DATA */
1041 const struct brw_wm_prog_data *wm_prog_data =
1042 brw_wm_prog_data(brw->wm.base.prog_data);
1043
1044 /* _NEW_BUFFERS | _NEW_COLOR */
1045 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1046 brw_update_renderbuffer_surfaces(
1047 brw, fb,
1048 wm_prog_data->binding_table.render_target_start,
1049 brw->wm.base.surf_offset);
1050 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1051 }
1052
1053 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1054 .dirty = {
1055 .mesa = _NEW_BUFFERS |
1056 _NEW_COLOR,
1057 .brw = BRW_NEW_BATCH |
1058 BRW_NEW_BLORP |
1059 BRW_NEW_FS_PROG_DATA,
1060 },
1061 .emit = update_renderbuffer_surfaces,
1062 };
1063
1064 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1065 .dirty = {
1066 .mesa = _NEW_BUFFERS,
1067 .brw = BRW_NEW_BATCH |
1068 BRW_NEW_BLORP,
1069 },
1070 .emit = update_renderbuffer_surfaces,
1071 };
1072
1073 static void
1074 update_renderbuffer_read_surfaces(struct brw_context *brw)
1075 {
1076 const struct gl_context *ctx = &brw->ctx;
1077
1078 /* BRW_NEW_FS_PROG_DATA */
1079 const struct brw_wm_prog_data *wm_prog_data =
1080 brw_wm_prog_data(brw->wm.base.prog_data);
1081
1082 /* BRW_NEW_FRAGMENT_PROGRAM */
1083 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1084 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1085 /* _NEW_BUFFERS */
1086 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1087
1088 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1089 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1090 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1091 const unsigned surf_index =
1092 wm_prog_data->binding_table.render_target_read_start + i;
1093 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1094
1095 if (irb) {
1096 const enum isl_format format = brw->mesa_to_isl_render_format[
1097 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1098 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1099 format));
1100
1101 /* Override the target of the texture if the render buffer is a
1102 * single slice of a 3D texture (since the minimum array element
1103 * field of the surface state structure is ignored by the sampler
1104 * unit for 3D textures on some hardware), or if the render buffer
1105 * is a 1D array (since shaders always provide the array index
1106 * coordinate at the Z component to avoid state-dependent
1107 * recompiles when changing the texture target of the
1108 * framebuffer).
1109 */
1110 const GLenum target =
1111 (irb->mt->target == GL_TEXTURE_3D &&
1112 irb->layer_count == 1) ? GL_TEXTURE_2D :
1113 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1114 irb->mt->target;
1115
1116 const struct isl_view view = {
1117 .format = format,
1118 .base_level = irb->mt_level - irb->mt->first_level,
1119 .levels = 1,
1120 .base_array_layer = irb->mt_layer,
1121 .array_len = irb->layer_count,
1122 .swizzle = ISL_SWIZZLE_IDENTITY,
1123 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1124 };
1125
1126 enum isl_aux_usage aux_usage =
1127 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1128 if (brw->draw_aux_buffer_disabled[i])
1129 aux_usage = ISL_AUX_USAGE_NONE;
1130
1131 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1132 tex_mocs[brw->gen],
1133 surf_offset, surf_index,
1134 0);
1135
1136 } else {
1137 emit_null_surface_state(brw,
1138 _mesa_geometric_width(fb),
1139 _mesa_geometric_height(fb),
1140 _mesa_geometric_samples(fb),
1141 surf_offset);
1142 }
1143 }
1144
1145 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1146 }
1147 }
1148
1149 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1150 .dirty = {
1151 .mesa = _NEW_BUFFERS,
1152 .brw = BRW_NEW_BATCH |
1153 BRW_NEW_FRAGMENT_PROGRAM |
1154 BRW_NEW_FS_PROG_DATA,
1155 },
1156 .emit = update_renderbuffer_read_surfaces,
1157 };
1158
1159 static void
1160 update_stage_texture_surfaces(struct brw_context *brw,
1161 const struct gl_program *prog,
1162 struct brw_stage_state *stage_state,
1163 bool for_gather, uint32_t plane)
1164 {
1165 if (!prog)
1166 return;
1167
1168 struct gl_context *ctx = &brw->ctx;
1169
1170 uint32_t *surf_offset = stage_state->surf_offset;
1171
1172 /* BRW_NEW_*_PROG_DATA */
1173 if (for_gather)
1174 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1175 else
1176 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1177
1178 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1179 for (unsigned s = 0; s < num_samplers; s++) {
1180 surf_offset[s] = 0;
1181
1182 if (prog->SamplersUsed & (1 << s)) {
1183 const unsigned unit = prog->SamplerUnits[s];
1184
1185 /* _NEW_TEXTURE */
1186 if (ctx->Texture.Unit[unit]._Current) {
1187 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1188 }
1189 }
1190 }
1191 }
1192
1193
1194 /**
1195 * Construct SURFACE_STATE objects for enabled textures.
1196 */
1197 static void
1198 brw_update_texture_surfaces(struct brw_context *brw)
1199 {
1200 /* BRW_NEW_VERTEX_PROGRAM */
1201 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1202
1203 /* BRW_NEW_TESS_PROGRAMS */
1204 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1205 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1206
1207 /* BRW_NEW_GEOMETRY_PROGRAM */
1208 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1209
1210 /* BRW_NEW_FRAGMENT_PROGRAM */
1211 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1212
1213 /* _NEW_TEXTURE */
1214 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1215 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1216 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1217 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1218 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1219
1220 /* emit alternate set of surface state for gather. this
1221 * allows the surface format to be overriden for only the
1222 * gather4 messages. */
1223 if (brw->gen < 8) {
1224 if (vs && vs->nir->info.uses_texture_gather)
1225 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1226 if (tcs && tcs->nir->info.uses_texture_gather)
1227 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1228 if (tes && tes->nir->info.uses_texture_gather)
1229 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1230 if (gs && gs->nir->info.uses_texture_gather)
1231 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1232 if (fs && fs->nir->info.uses_texture_gather)
1233 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1234 }
1235
1236 if (fs) {
1237 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1238 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1239 }
1240
1241 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1242 }
1243
1244 const struct brw_tracked_state brw_texture_surfaces = {
1245 .dirty = {
1246 .mesa = _NEW_TEXTURE,
1247 .brw = BRW_NEW_BATCH |
1248 BRW_NEW_BLORP |
1249 BRW_NEW_FRAGMENT_PROGRAM |
1250 BRW_NEW_FS_PROG_DATA |
1251 BRW_NEW_GEOMETRY_PROGRAM |
1252 BRW_NEW_GS_PROG_DATA |
1253 BRW_NEW_TESS_PROGRAMS |
1254 BRW_NEW_TCS_PROG_DATA |
1255 BRW_NEW_TES_PROG_DATA |
1256 BRW_NEW_TEXTURE_BUFFER |
1257 BRW_NEW_VERTEX_PROGRAM |
1258 BRW_NEW_VS_PROG_DATA,
1259 },
1260 .emit = brw_update_texture_surfaces,
1261 };
1262
1263 static void
1264 brw_update_cs_texture_surfaces(struct brw_context *brw)
1265 {
1266 /* BRW_NEW_COMPUTE_PROGRAM */
1267 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1268
1269 /* _NEW_TEXTURE */
1270 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1271
1272 /* emit alternate set of surface state for gather. this
1273 * allows the surface format to be overriden for only the
1274 * gather4 messages.
1275 */
1276 if (brw->gen < 8) {
1277 if (cs && cs->nir->info.uses_texture_gather)
1278 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1279 }
1280
1281 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1282 }
1283
1284 const struct brw_tracked_state brw_cs_texture_surfaces = {
1285 .dirty = {
1286 .mesa = _NEW_TEXTURE,
1287 .brw = BRW_NEW_BATCH |
1288 BRW_NEW_BLORP |
1289 BRW_NEW_COMPUTE_PROGRAM,
1290 },
1291 .emit = brw_update_cs_texture_surfaces,
1292 };
1293
1294
1295 void
1296 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1297 struct brw_stage_state *stage_state,
1298 struct brw_stage_prog_data *prog_data)
1299 {
1300 struct gl_context *ctx = &brw->ctx;
1301
1302 if (!prog)
1303 return;
1304
1305 uint32_t *ubo_surf_offsets =
1306 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1307
1308 for (int i = 0; i < prog->info.num_ubos; i++) {
1309 struct gl_uniform_buffer_binding *binding =
1310 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1311
1312 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1313 emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1314 } else {
1315 struct intel_buffer_object *intel_bo =
1316 intel_buffer_object(binding->BufferObject);
1317 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1318 if (!binding->AutomaticSize)
1319 size = MIN2(size, binding->Size);
1320 struct brw_bo *bo =
1321 intel_bufferobj_buffer(brw, intel_bo,
1322 binding->Offset,
1323 size, false);
1324 brw_create_constant_surface(brw, bo, binding->Offset,
1325 size,
1326 &ubo_surf_offsets[i]);
1327 }
1328 }
1329
1330 uint32_t *ssbo_surf_offsets =
1331 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1332
1333 for (int i = 0; i < prog->info.num_ssbos; i++) {
1334 struct gl_shader_storage_buffer_binding *binding =
1335 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1336
1337 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1338 emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1339 } else {
1340 struct intel_buffer_object *intel_bo =
1341 intel_buffer_object(binding->BufferObject);
1342 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1343 if (!binding->AutomaticSize)
1344 size = MIN2(size, binding->Size);
1345 struct brw_bo *bo =
1346 intel_bufferobj_buffer(brw, intel_bo,
1347 binding->Offset,
1348 size, true);
1349 brw_create_buffer_surface(brw, bo, binding->Offset,
1350 size,
1351 &ssbo_surf_offsets[i]);
1352 }
1353 }
1354
1355 stage_state->push_constants_dirty = true;
1356
1357 if (prog->info.num_ubos || prog->info.num_ssbos)
1358 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1359 }
1360
1361 static void
1362 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1363 {
1364 struct gl_context *ctx = &brw->ctx;
1365 /* _NEW_PROGRAM */
1366 struct gl_program *prog = ctx->FragmentProgram._Current;
1367
1368 /* BRW_NEW_FS_PROG_DATA */
1369 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1370 }
1371
1372 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1373 .dirty = {
1374 .mesa = _NEW_PROGRAM,
1375 .brw = BRW_NEW_BATCH |
1376 BRW_NEW_BLORP |
1377 BRW_NEW_FS_PROG_DATA |
1378 BRW_NEW_UNIFORM_BUFFER,
1379 },
1380 .emit = brw_upload_wm_ubo_surfaces,
1381 };
1382
1383 static void
1384 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1385 {
1386 struct gl_context *ctx = &brw->ctx;
1387 /* _NEW_PROGRAM */
1388 struct gl_program *prog =
1389 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1390
1391 /* BRW_NEW_CS_PROG_DATA */
1392 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1393 }
1394
1395 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1396 .dirty = {
1397 .mesa = _NEW_PROGRAM,
1398 .brw = BRW_NEW_BATCH |
1399 BRW_NEW_BLORP |
1400 BRW_NEW_CS_PROG_DATA |
1401 BRW_NEW_UNIFORM_BUFFER,
1402 },
1403 .emit = brw_upload_cs_ubo_surfaces,
1404 };
1405
1406 void
1407 brw_upload_abo_surfaces(struct brw_context *brw,
1408 const struct gl_program *prog,
1409 struct brw_stage_state *stage_state,
1410 struct brw_stage_prog_data *prog_data)
1411 {
1412 struct gl_context *ctx = &brw->ctx;
1413 uint32_t *surf_offsets =
1414 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1415
1416 if (prog->info.num_abos) {
1417 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1418 struct gl_atomic_buffer_binding *binding =
1419 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1420 struct intel_buffer_object *intel_bo =
1421 intel_buffer_object(binding->BufferObject);
1422 struct brw_bo *bo =
1423 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1424 intel_bo->Base.Size - binding->Offset,
1425 true);
1426
1427 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1428 binding->Offset, ISL_FORMAT_RAW,
1429 bo->size - binding->Offset, 1,
1430 RELOC_WRITE);
1431 }
1432
1433 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1434 }
1435 }
1436
1437 static void
1438 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1439 {
1440 /* _NEW_PROGRAM */
1441 const struct gl_program *wm = brw->fragment_program;
1442
1443 if (wm) {
1444 /* BRW_NEW_FS_PROG_DATA */
1445 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1446 }
1447 }
1448
1449 const struct brw_tracked_state brw_wm_abo_surfaces = {
1450 .dirty = {
1451 .mesa = _NEW_PROGRAM,
1452 .brw = BRW_NEW_ATOMIC_BUFFER |
1453 BRW_NEW_BLORP |
1454 BRW_NEW_BATCH |
1455 BRW_NEW_FS_PROG_DATA,
1456 },
1457 .emit = brw_upload_wm_abo_surfaces,
1458 };
1459
1460 static void
1461 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1462 {
1463 /* _NEW_PROGRAM */
1464 const struct gl_program *cp = brw->compute_program;
1465
1466 if (cp) {
1467 /* BRW_NEW_CS_PROG_DATA */
1468 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1469 }
1470 }
1471
1472 const struct brw_tracked_state brw_cs_abo_surfaces = {
1473 .dirty = {
1474 .mesa = _NEW_PROGRAM,
1475 .brw = BRW_NEW_ATOMIC_BUFFER |
1476 BRW_NEW_BLORP |
1477 BRW_NEW_BATCH |
1478 BRW_NEW_CS_PROG_DATA,
1479 },
1480 .emit = brw_upload_cs_abo_surfaces,
1481 };
1482
1483 static void
1484 brw_upload_cs_image_surfaces(struct brw_context *brw)
1485 {
1486 /* _NEW_PROGRAM */
1487 const struct gl_program *cp = brw->compute_program;
1488
1489 if (cp) {
1490 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1491 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1492 brw->cs.base.prog_data);
1493 }
1494 }
1495
1496 const struct brw_tracked_state brw_cs_image_surfaces = {
1497 .dirty = {
1498 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1499 .brw = BRW_NEW_BATCH |
1500 BRW_NEW_BLORP |
1501 BRW_NEW_CS_PROG_DATA |
1502 BRW_NEW_IMAGE_UNITS
1503 },
1504 .emit = brw_upload_cs_image_surfaces,
1505 };
1506
1507 static uint32_t
1508 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1509 {
1510 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1511 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1512 if (access == GL_WRITE_ONLY) {
1513 return hw_format;
1514 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1515 /* Typed surface reads support a very limited subset of the shader
1516 * image formats. Translate it into the closest format the
1517 * hardware supports.
1518 */
1519 return isl_lower_storage_image_format(devinfo, hw_format);
1520 } else {
1521 /* The hardware doesn't actually support a typed format that we can use
1522 * so we have to fall back to untyped read/write messages.
1523 */
1524 return ISL_FORMAT_RAW;
1525 }
1526 }
1527
1528 static void
1529 update_default_image_param(struct brw_context *brw,
1530 struct gl_image_unit *u,
1531 unsigned surface_idx,
1532 struct brw_image_param *param)
1533 {
1534 memset(param, 0, sizeof(*param));
1535 param->surface_idx = surface_idx;
1536 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1537 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1538 * detailed explanation of these parameters.
1539 */
1540 param->swizzling[0] = 0xff;
1541 param->swizzling[1] = 0xff;
1542 }
1543
1544 static void
1545 update_buffer_image_param(struct brw_context *brw,
1546 struct gl_image_unit *u,
1547 unsigned surface_idx,
1548 struct brw_image_param *param)
1549 {
1550 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1551 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1552 update_default_image_param(brw, u, surface_idx, param);
1553
1554 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1555 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1556 }
1557
1558 static unsigned
1559 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1560 unsigned level)
1561 {
1562 if (target == GL_TEXTURE_CUBE_MAP)
1563 return 6;
1564
1565 return target == GL_TEXTURE_3D ?
1566 minify(mt->surf.logical_level0_px.depth, level) :
1567 mt->surf.logical_level0_px.array_len;
1568 }
1569
1570 static void
1571 update_image_surface(struct brw_context *brw,
1572 struct gl_image_unit *u,
1573 GLenum access,
1574 unsigned surface_idx,
1575 uint32_t *surf_offset,
1576 struct brw_image_param *param)
1577 {
1578 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1579 struct gl_texture_object *obj = u->TexObj;
1580 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1581
1582 if (obj->Target == GL_TEXTURE_BUFFER) {
1583 struct intel_buffer_object *intel_obj =
1584 intel_buffer_object(obj->BufferObject);
1585 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1586 _mesa_get_format_bytes(u->_ActualFormat));
1587
1588 brw_emit_buffer_surface_state(
1589 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1590 format, intel_obj->Base.Size, texel_size,
1591 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1592
1593 update_buffer_image_param(brw, u, surface_idx, param);
1594
1595 } else {
1596 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1597 struct intel_mipmap_tree *mt = intel_obj->mt;
1598 const unsigned num_layers = u->Layered ?
1599 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1600
1601 struct isl_view view = {
1602 .format = format,
1603 .base_level = obj->MinLevel + u->Level,
1604 .levels = 1,
1605 .base_array_layer = obj->MinLayer + u->_Layer,
1606 .array_len = num_layers,
1607 .swizzle = ISL_SWIZZLE_IDENTITY,
1608 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1609 };
1610
1611 if (format == ISL_FORMAT_RAW) {
1612 brw_emit_buffer_surface_state(
1613 brw, surf_offset, mt->bo, mt->offset,
1614 format, mt->bo->size - mt->offset, 1 /* pitch */,
1615 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1616
1617 } else {
1618 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1619 assert(!intel_miptree_has_color_unresolved(mt,
1620 view.base_level, 1,
1621 view.base_array_layer,
1622 view.array_len));
1623 brw_emit_surface_state(brw, mt, mt->target, view,
1624 ISL_AUX_USAGE_NONE, tex_mocs[brw->gen],
1625 surf_offset, surf_index,
1626 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1627 }
1628
1629 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1630 param->surface_idx = surface_idx;
1631 }
1632
1633 } else {
1634 emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1635 update_default_image_param(brw, u, surface_idx, param);
1636 }
1637 }
1638
1639 void
1640 brw_upload_image_surfaces(struct brw_context *brw,
1641 const struct gl_program *prog,
1642 struct brw_stage_state *stage_state,
1643 struct brw_stage_prog_data *prog_data)
1644 {
1645 assert(prog);
1646 struct gl_context *ctx = &brw->ctx;
1647
1648 if (prog->info.num_images) {
1649 for (unsigned i = 0; i < prog->info.num_images; i++) {
1650 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1651 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1652
1653 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1654 surf_idx,
1655 &stage_state->surf_offset[surf_idx],
1656 &prog_data->image_param[i]);
1657 }
1658
1659 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1660 /* This may have changed the image metadata dependent on the context
1661 * image unit state and passed to the program as uniforms, make sure
1662 * that push and pull constants are reuploaded.
1663 */
1664 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1665 }
1666 }
1667
1668 static void
1669 brw_upload_wm_image_surfaces(struct brw_context *brw)
1670 {
1671 /* BRW_NEW_FRAGMENT_PROGRAM */
1672 const struct gl_program *wm = brw->fragment_program;
1673
1674 if (wm) {
1675 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1676 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1677 brw->wm.base.prog_data);
1678 }
1679 }
1680
1681 const struct brw_tracked_state brw_wm_image_surfaces = {
1682 .dirty = {
1683 .mesa = _NEW_TEXTURE,
1684 .brw = BRW_NEW_BATCH |
1685 BRW_NEW_BLORP |
1686 BRW_NEW_FRAGMENT_PROGRAM |
1687 BRW_NEW_FS_PROG_DATA |
1688 BRW_NEW_IMAGE_UNITS
1689 },
1690 .emit = brw_upload_wm_image_surfaces,
1691 };
1692
1693 void
1694 gen4_init_vtable_surface_functions(struct brw_context *brw)
1695 {
1696 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1697 }
1698
1699 void
1700 gen6_init_vtable_surface_functions(struct brw_context *brw)
1701 {
1702 gen4_init_vtable_surface_functions(brw);
1703 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1704 }
1705
1706 static void
1707 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1708 {
1709 struct gl_context *ctx = &brw->ctx;
1710 /* _NEW_PROGRAM */
1711 struct gl_program *prog =
1712 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1713 /* BRW_NEW_CS_PROG_DATA */
1714 const struct brw_cs_prog_data *cs_prog_data =
1715 brw_cs_prog_data(brw->cs.base.prog_data);
1716
1717 if (prog && cs_prog_data->uses_num_work_groups) {
1718 const unsigned surf_idx =
1719 cs_prog_data->binding_table.work_groups_start;
1720 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1721 struct brw_bo *bo;
1722 uint32_t bo_offset;
1723
1724 if (brw->compute.num_work_groups_bo == NULL) {
1725 bo = NULL;
1726 intel_upload_data(brw,
1727 (void *)brw->compute.num_work_groups,
1728 3 * sizeof(GLuint),
1729 sizeof(GLuint),
1730 &bo,
1731 &bo_offset);
1732 } else {
1733 bo = brw->compute.num_work_groups_bo;
1734 bo_offset = brw->compute.num_work_groups_offset;
1735 }
1736
1737 brw_emit_buffer_surface_state(brw, surf_offset,
1738 bo, bo_offset,
1739 ISL_FORMAT_RAW,
1740 3 * sizeof(GLuint), 1,
1741 RELOC_WRITE);
1742 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1743 }
1744 }
1745
1746 const struct brw_tracked_state brw_cs_work_groups_surface = {
1747 .dirty = {
1748 .brw = BRW_NEW_BLORP |
1749 BRW_NEW_CS_PROG_DATA |
1750 BRW_NEW_CS_WORK_GROUPS
1751 },
1752 .emit = brw_upload_cs_work_groups_surface,
1753 };