i965: Make a BRW_NEW_FAST_CLEAR_COLOR dirty bit.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 [10] = CNL_MOCS_WB,
68 };
69
70 uint32_t rb_mocs[] = {
71 [7] = GEN7_MOCS_L3,
72 [8] = BDW_MOCS_PTE,
73 [9] = SKL_MOCS_PTE,
74 [10] = CNL_MOCS_PTE,
75 };
76
77 static void
78 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
79 GLenum target, struct isl_view *view,
80 uint32_t *tile_x, uint32_t *tile_y,
81 uint32_t *offset, struct isl_surf *surf)
82 {
83 *surf = mt->surf;
84
85 const enum isl_dim_layout dim_layout =
86 get_isl_dim_layout(&brw->screen->devinfo, mt->surf.tiling, target);
87
88 if (surf->dim_layout == dim_layout)
89 return;
90
91 /* The layout of the specified texture target is not compatible with the
92 * actual layout of the miptree structure in memory -- You're entering
93 * dangerous territory, this can only possibly work if you only intended
94 * to access a single level and slice of the texture, and the hardware
95 * supports the tile offset feature in order to allow non-tile-aligned
96 * base offsets, since we'll have to point the hardware to the first
97 * texel of the level instead of relying on the usual base level/layer
98 * controls.
99 */
100 assert(brw->has_surface_tile_offset);
101 assert(view->levels == 1 && view->array_len == 1);
102 assert(*tile_x == 0 && *tile_y == 0);
103
104 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
105 view->base_array_layer,
106 tile_x, tile_y);
107
108 /* Minify the logical dimensions of the texture. */
109 const unsigned l = view->base_level - mt->first_level;
110 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
111 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
112 minify(surf->logical_level0_px.height, l);
113 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
114 minify(surf->logical_level0_px.depth, l);
115
116 /* Only the base level and layer can be addressed with the overridden
117 * layout.
118 */
119 surf->logical_level0_px.array_len = 1;
120 surf->levels = 1;
121 surf->dim_layout = dim_layout;
122
123 /* The requested slice of the texture is now at the base level and
124 * layer.
125 */
126 view->base_level = 0;
127 view->base_array_layer = 0;
128 }
129
130 static void
131 brw_emit_surface_state(struct brw_context *brw,
132 struct intel_mipmap_tree *mt,
133 GLenum target, struct isl_view view,
134 enum isl_aux_usage aux_usage,
135 uint32_t mocs, uint32_t *surf_offset, int surf_index,
136 unsigned reloc_flags)
137 {
138 uint32_t tile_x = mt->level[0].level_x;
139 uint32_t tile_y = mt->level[0].level_y;
140 uint32_t offset = mt->offset;
141
142 struct isl_surf surf;
143
144 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
145
146 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
147
148 struct brw_bo *aux_bo;
149 struct isl_surf *aux_surf = NULL;
150 uint64_t aux_offset = 0;
151 switch (aux_usage) {
152 case ISL_AUX_USAGE_MCS:
153 case ISL_AUX_USAGE_CCS_D:
154 case ISL_AUX_USAGE_CCS_E:
155 aux_surf = &mt->mcs_buf->surf;
156 aux_bo = mt->mcs_buf->bo;
157 aux_offset = mt->mcs_buf->offset;
158 break;
159
160 case ISL_AUX_USAGE_HIZ:
161 aux_surf = &mt->hiz_buf->surf;
162 aux_bo = mt->hiz_buf->bo;
163 aux_offset = 0;
164 break;
165
166 case ISL_AUX_USAGE_NONE:
167 break;
168 }
169
170 if (aux_usage != ISL_AUX_USAGE_NONE) {
171 /* We only really need a clear color if we also have an auxiliary
172 * surface. Without one, it does nothing.
173 */
174 clear_color = mt->fast_clear_color;
175 }
176
177 void *state = brw_state_batch(brw,
178 brw->isl_dev.ss.size,
179 brw->isl_dev.ss.align,
180 surf_offset);
181
182 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
183 .address = brw_emit_reloc(&brw->batch,
184 *surf_offset + brw->isl_dev.ss.addr_offset,
185 mt->bo, offset, reloc_flags),
186 .aux_surf = aux_surf, .aux_usage = aux_usage,
187 .aux_address = aux_offset,
188 .mocs = mocs, .clear_color = clear_color,
189 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
190 if (aux_surf) {
191 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
192 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
193 * contain other control information. Since buffer addresses are always
194 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
195 * an ordinary reloc to do the necessary address translation.
196 *
197 * FIXME: move to the point of assignment.
198 */
199 assert((aux_offset & 0xfff) == 0);
200 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
201 *aux_addr = brw_emit_reloc(&brw->batch,
202 *surf_offset +
203 brw->isl_dev.ss.aux_addr_offset,
204 aux_bo, *aux_addr,
205 reloc_flags);
206 }
207 }
208
209 uint32_t
210 brw_update_renderbuffer_surface(struct brw_context *brw,
211 struct gl_renderbuffer *rb,
212 uint32_t flags, unsigned unit,
213 uint32_t surf_index)
214 {
215 struct gl_context *ctx = &brw->ctx;
216 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
217 struct intel_mipmap_tree *mt = irb->mt;
218
219 enum isl_aux_usage aux_usage =
220 intel_miptree_render_aux_usage(brw, mt, ctx->Color.sRGBEnabled,
221 ctx->Color.BlendEnabled & (1 << unit));
222
223 if (flags & INTEL_AUX_BUFFER_DISABLED) {
224 assert(brw->gen >= 9);
225 aux_usage = ISL_AUX_USAGE_NONE;
226 }
227
228 assert(brw_render_target_supported(brw, rb));
229
230 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
231 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
232 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
233 __func__, _mesa_get_format_name(rb_format));
234 }
235
236 struct isl_view view = {
237 .format = brw->mesa_to_isl_render_format[rb_format],
238 .base_level = irb->mt_level - irb->mt->first_level,
239 .levels = 1,
240 .base_array_layer = irb->mt_layer,
241 .array_len = MAX2(irb->layer_count, 1),
242 .swizzle = ISL_SWIZZLE_IDENTITY,
243 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
244 };
245
246 uint32_t offset;
247 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
248 rb_mocs[brw->gen],
249 &offset, surf_index,
250 RELOC_WRITE);
251 return offset;
252 }
253
254 GLuint
255 translate_tex_target(GLenum target)
256 {
257 switch (target) {
258 case GL_TEXTURE_1D:
259 case GL_TEXTURE_1D_ARRAY_EXT:
260 return BRW_SURFACE_1D;
261
262 case GL_TEXTURE_RECTANGLE_NV:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_2D:
266 case GL_TEXTURE_2D_ARRAY_EXT:
267 case GL_TEXTURE_EXTERNAL_OES:
268 case GL_TEXTURE_2D_MULTISAMPLE:
269 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
270 return BRW_SURFACE_2D;
271
272 case GL_TEXTURE_3D:
273 return BRW_SURFACE_3D;
274
275 case GL_TEXTURE_CUBE_MAP:
276 case GL_TEXTURE_CUBE_MAP_ARRAY:
277 return BRW_SURFACE_CUBE;
278
279 default:
280 unreachable("not reached");
281 }
282 }
283
284 uint32_t
285 brw_get_surface_tiling_bits(enum isl_tiling tiling)
286 {
287 switch (tiling) {
288 case ISL_TILING_X:
289 return BRW_SURFACE_TILED;
290 case ISL_TILING_Y0:
291 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
292 default:
293 return 0;
294 }
295 }
296
297
298 uint32_t
299 brw_get_surface_num_multisamples(unsigned num_samples)
300 {
301 if (num_samples > 1)
302 return BRW_SURFACE_MULTISAMPLECOUNT_4;
303 else
304 return BRW_SURFACE_MULTISAMPLECOUNT_1;
305 }
306
307 /**
308 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
309 * swizzling.
310 */
311 int
312 brw_get_texture_swizzle(const struct gl_context *ctx,
313 const struct gl_texture_object *t)
314 {
315 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
316
317 int swizzles[SWIZZLE_NIL + 1] = {
318 SWIZZLE_X,
319 SWIZZLE_Y,
320 SWIZZLE_Z,
321 SWIZZLE_W,
322 SWIZZLE_ZERO,
323 SWIZZLE_ONE,
324 SWIZZLE_NIL
325 };
326
327 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
328 img->_BaseFormat == GL_DEPTH_STENCIL) {
329 GLenum depth_mode = t->DepthMode;
330
331 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
332 * with depth component data specified with a sized internal format.
333 * Otherwise, it's left at the old default, GL_LUMINANCE.
334 */
335 if (_mesa_is_gles3(ctx) &&
336 img->InternalFormat != GL_DEPTH_COMPONENT &&
337 img->InternalFormat != GL_DEPTH_STENCIL) {
338 depth_mode = GL_RED;
339 }
340
341 switch (depth_mode) {
342 case GL_ALPHA:
343 swizzles[0] = SWIZZLE_ZERO;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_X;
347 break;
348 case GL_LUMINANCE:
349 swizzles[0] = SWIZZLE_X;
350 swizzles[1] = SWIZZLE_X;
351 swizzles[2] = SWIZZLE_X;
352 swizzles[3] = SWIZZLE_ONE;
353 break;
354 case GL_INTENSITY:
355 swizzles[0] = SWIZZLE_X;
356 swizzles[1] = SWIZZLE_X;
357 swizzles[2] = SWIZZLE_X;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_RED:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 }
367 }
368
369 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
370
371 /* If the texture's format is alpha-only, force R, G, and B to
372 * 0.0. Similarly, if the texture's format has no alpha channel,
373 * force the alpha value read to 1.0. This allows for the
374 * implementation to use an RGBA texture for any of these formats
375 * without leaking any unexpected values.
376 */
377 switch (img->_BaseFormat) {
378 case GL_ALPHA:
379 swizzles[0] = SWIZZLE_ZERO;
380 swizzles[1] = SWIZZLE_ZERO;
381 swizzles[2] = SWIZZLE_ZERO;
382 break;
383 case GL_LUMINANCE:
384 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_ONE;
389 }
390 break;
391 case GL_LUMINANCE_ALPHA:
392 if (datatype == GL_SIGNED_NORMALIZED) {
393 swizzles[0] = SWIZZLE_X;
394 swizzles[1] = SWIZZLE_X;
395 swizzles[2] = SWIZZLE_X;
396 swizzles[3] = SWIZZLE_W;
397 }
398 break;
399 case GL_INTENSITY:
400 if (datatype == GL_SIGNED_NORMALIZED) {
401 swizzles[0] = SWIZZLE_X;
402 swizzles[1] = SWIZZLE_X;
403 swizzles[2] = SWIZZLE_X;
404 swizzles[3] = SWIZZLE_X;
405 }
406 break;
407 case GL_RED:
408 case GL_RG:
409 case GL_RGB:
410 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
411 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
412 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
413 swizzles[3] = SWIZZLE_ONE;
414 break;
415 }
416
417 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
418 swizzles[GET_SWZ(t->_Swizzle, 1)],
419 swizzles[GET_SWZ(t->_Swizzle, 2)],
420 swizzles[GET_SWZ(t->_Swizzle, 3)]);
421 }
422
423 /**
424 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
425 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
426 *
427 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
428 * 0 1 2 3 4 5
429 * 4 5 6 7 0 1
430 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
431 *
432 * which is simply adding 4 then modding by 8 (or anding with 7).
433 *
434 * We then may need to apply workarounds for textureGather hardware bugs.
435 */
436 static unsigned
437 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
438 {
439 unsigned scs = (swizzle + 4) & 7;
440
441 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
442 }
443
444 static bool
445 brw_aux_surface_disabled(const struct brw_context *brw,
446 const struct intel_mipmap_tree *mt)
447 {
448 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
449
450 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
451 const struct intel_renderbuffer *irb =
452 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
453
454 if (irb && irb->mt == mt)
455 return brw->draw_aux_buffer_disabled[i];
456 }
457
458 return false;
459 }
460
461 void
462 brw_update_texture_surface(struct gl_context *ctx,
463 unsigned unit,
464 uint32_t *surf_offset,
465 bool for_gather,
466 uint32_t plane)
467 {
468 struct brw_context *brw = brw_context(ctx);
469 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
470
471 if (obj->Target == GL_TEXTURE_BUFFER) {
472 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
473
474 } else {
475 struct intel_texture_object *intel_obj = intel_texture_object(obj);
476 struct intel_mipmap_tree *mt = intel_obj->mt;
477
478 if (plane > 0) {
479 if (mt->plane[plane - 1] == NULL)
480 return;
481 mt = mt->plane[plane - 1];
482 }
483
484 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
485 /* If this is a view with restricted NumLayers, then our effective depth
486 * is not just the miptree depth.
487 */
488 unsigned view_num_layers;
489 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
490 view_num_layers = obj->NumLayers;
491 } else {
492 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
493 mt->surf.logical_level0_px.depth :
494 mt->surf.logical_level0_px.array_len;
495 }
496
497 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
498 * texturing functions that return a float, as our code generation always
499 * selects the .x channel (which would always be 0).
500 */
501 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
502 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
503 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
504 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
505 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
506 brw_get_texture_swizzle(&brw->ctx, obj));
507
508 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
509 enum isl_format format = translate_tex_format(brw, mesa_fmt,
510 sampler->sRGBDecode);
511
512 /* Implement gen6 and gen7 gather work-around */
513 bool need_green_to_blue = false;
514 if (for_gather) {
515 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
516 format == ISL_FORMAT_R32G32_SINT ||
517 format == ISL_FORMAT_R32G32_UINT)) {
518 format = ISL_FORMAT_R32G32_FLOAT_LD;
519 need_green_to_blue = brw->is_haswell;
520 } else if (brw->gen == 6) {
521 /* Sandybridge's gather4 message is broken for integer formats.
522 * To work around this, we pretend the surface is UNORM for
523 * 8 or 16-bit formats, and emit shader instructions to recover
524 * the real INT/UINT value. For 32-bit formats, we pretend
525 * the surface is FLOAT, and simply reinterpret the resulting
526 * bits.
527 */
528 switch (format) {
529 case ISL_FORMAT_R8_SINT:
530 case ISL_FORMAT_R8_UINT:
531 format = ISL_FORMAT_R8_UNORM;
532 break;
533
534 case ISL_FORMAT_R16_SINT:
535 case ISL_FORMAT_R16_UINT:
536 format = ISL_FORMAT_R16_UNORM;
537 break;
538
539 case ISL_FORMAT_R32_SINT:
540 case ISL_FORMAT_R32_UINT:
541 format = ISL_FORMAT_R32_FLOAT;
542 break;
543
544 default:
545 break;
546 }
547 }
548 }
549
550 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
551 if (brw->gen <= 7) {
552 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
553 mt = mt->r8stencil_mt;
554 } else {
555 mt = mt->stencil_mt;
556 }
557 format = ISL_FORMAT_R8_UINT;
558 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
559 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
560 mt = mt->r8stencil_mt;
561 format = ISL_FORMAT_R8_UINT;
562 }
563
564 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
565
566 struct isl_view view = {
567 .format = format,
568 .base_level = obj->MinLevel + obj->BaseLevel,
569 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
570 .base_array_layer = obj->MinLayer,
571 .array_len = view_num_layers,
572 .swizzle = {
573 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
574 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
575 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
576 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
577 },
578 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
579 };
580
581 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
582 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
583 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
584
585 enum isl_aux_usage aux_usage =
586 intel_miptree_texture_aux_usage(brw, mt, format);
587
588 if (brw_aux_surface_disabled(brw, mt))
589 aux_usage = ISL_AUX_USAGE_NONE;
590
591 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
592 tex_mocs[brw->gen],
593 surf_offset, surf_index,
594 0);
595 }
596 }
597
598 void
599 brw_emit_buffer_surface_state(struct brw_context *brw,
600 uint32_t *out_offset,
601 struct brw_bo *bo,
602 unsigned buffer_offset,
603 unsigned surface_format,
604 unsigned buffer_size,
605 unsigned pitch,
606 unsigned reloc_flags)
607 {
608 uint32_t *dw = brw_state_batch(brw,
609 brw->isl_dev.ss.size,
610 brw->isl_dev.ss.align,
611 out_offset);
612
613 isl_buffer_fill_state(&brw->isl_dev, dw,
614 .address = !bo ? buffer_offset :
615 brw_emit_reloc(&brw->batch,
616 *out_offset + brw->isl_dev.ss.addr_offset,
617 bo, buffer_offset,
618 reloc_flags),
619 .size = buffer_size,
620 .format = surface_format,
621 .stride = pitch,
622 .mocs = tex_mocs[brw->gen]);
623 }
624
625 void
626 brw_update_buffer_texture_surface(struct gl_context *ctx,
627 unsigned unit,
628 uint32_t *surf_offset)
629 {
630 struct brw_context *brw = brw_context(ctx);
631 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
632 struct intel_buffer_object *intel_obj =
633 intel_buffer_object(tObj->BufferObject);
634 uint32_t size = tObj->BufferSize;
635 struct brw_bo *bo = NULL;
636 mesa_format format = tObj->_BufferObjectFormat;
637 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
638 int texel_size = _mesa_get_format_bytes(format);
639
640 if (intel_obj) {
641 size = MIN2(size, intel_obj->Base.Size);
642 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
643 false);
644 }
645
646 /* The ARB_texture_buffer_specification says:
647 *
648 * "The number of texels in the buffer texture's texel array is given by
649 *
650 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
651 *
652 * where <buffer_size> is the size of the buffer object, in basic
653 * machine units and <components> and <base_type> are the element count
654 * and base data type for elements, as specified in Table X.1. The
655 * number of texels in the texel array is then clamped to the
656 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
657 *
658 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
659 * so that when ISL divides by stride to obtain the number of texels, that
660 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
661 */
662 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
663
664 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
665 _mesa_problem(NULL, "bad format %s for texture buffer\n",
666 _mesa_get_format_name(format));
667 }
668
669 brw_emit_buffer_surface_state(brw, surf_offset, bo,
670 tObj->BufferOffset,
671 isl_format,
672 size,
673 texel_size,
674 0);
675 }
676
677 /**
678 * Create the constant buffer surface. Vertex/fragment shader constants will be
679 * read from this buffer with Data Port Read instructions/messages.
680 */
681 void
682 brw_create_constant_surface(struct brw_context *brw,
683 struct brw_bo *bo,
684 uint32_t offset,
685 uint32_t size,
686 uint32_t *out_offset)
687 {
688 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
689 ISL_FORMAT_R32G32B32A32_FLOAT,
690 size, 1, 0);
691 }
692
693 /**
694 * Create the buffer surface. Shader buffer variables will be
695 * read from / write to this buffer with Data Port Read/Write
696 * instructions/messages.
697 */
698 void
699 brw_create_buffer_surface(struct brw_context *brw,
700 struct brw_bo *bo,
701 uint32_t offset,
702 uint32_t size,
703 uint32_t *out_offset)
704 {
705 /* Use a raw surface so we can reuse existing untyped read/write/atomic
706 * messages. We need these specifically for the fragment shader since they
707 * include a pixel mask header that we need to ensure correct behavior
708 * with helper invocations, which cannot write to the buffer.
709 */
710 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
711 ISL_FORMAT_RAW,
712 size, 1, RELOC_WRITE);
713 }
714
715 /**
716 * Set up a binding table entry for use by stream output logic (transform
717 * feedback).
718 *
719 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
720 */
721 void
722 brw_update_sol_surface(struct brw_context *brw,
723 struct gl_buffer_object *buffer_obj,
724 uint32_t *out_offset, unsigned num_vector_components,
725 unsigned stride_dwords, unsigned offset_dwords)
726 {
727 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
728 uint32_t offset_bytes = 4 * offset_dwords;
729 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
730 offset_bytes,
731 buffer_obj->Size - offset_bytes,
732 true);
733 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
734 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
735 size_t size_dwords = buffer_obj->Size / 4;
736 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
737
738 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
739 * too big to map using a single binding table entry?
740 */
741 assert((size_dwords - offset_dwords) / stride_dwords
742 <= BRW_MAX_NUM_BUFFER_ENTRIES);
743
744 if (size_dwords > offset_dwords + num_vector_components) {
745 /* There is room for at least 1 transform feedback output in the buffer.
746 * Compute the number of additional transform feedback outputs the
747 * buffer has room for.
748 */
749 buffer_size_minus_1 =
750 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
751 } else {
752 /* There isn't even room for a single transform feedback output in the
753 * buffer. We can't configure the binding table entry to prevent output
754 * entirely; we'll have to rely on the geometry shader to detect
755 * overflow. But to minimize the damage in case of a bug, set up the
756 * binding table entry to just allow a single output.
757 */
758 buffer_size_minus_1 = 0;
759 }
760 width = buffer_size_minus_1 & 0x7f;
761 height = (buffer_size_minus_1 & 0xfff80) >> 7;
762 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
763
764 switch (num_vector_components) {
765 case 1:
766 surface_format = ISL_FORMAT_R32_FLOAT;
767 break;
768 case 2:
769 surface_format = ISL_FORMAT_R32G32_FLOAT;
770 break;
771 case 3:
772 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
773 break;
774 case 4:
775 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
776 break;
777 default:
778 unreachable("Invalid vector size for transform feedback output");
779 }
780
781 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
782 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
783 surface_format << BRW_SURFACE_FORMAT_SHIFT |
784 BRW_SURFACE_RC_READ_WRITE;
785 surf[1] = brw_emit_reloc(&brw->batch,
786 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
787 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
788 height << BRW_SURFACE_HEIGHT_SHIFT);
789 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
790 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
791 surf[4] = 0;
792 surf[5] = 0;
793 }
794
795 /* Creates a new WM constant buffer reflecting the current fragment program's
796 * constants, if needed by the fragment program.
797 *
798 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
799 * state atom.
800 */
801 static void
802 brw_upload_wm_pull_constants(struct brw_context *brw)
803 {
804 struct brw_stage_state *stage_state = &brw->wm.base;
805 /* BRW_NEW_FRAGMENT_PROGRAM */
806 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
807 /* BRW_NEW_FS_PROG_DATA */
808 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
809
810 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
811 /* _NEW_PROGRAM_CONSTANTS */
812 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
813 stage_state, prog_data);
814 }
815
816 const struct brw_tracked_state brw_wm_pull_constants = {
817 .dirty = {
818 .mesa = _NEW_PROGRAM_CONSTANTS,
819 .brw = BRW_NEW_BATCH |
820 BRW_NEW_BLORP |
821 BRW_NEW_FRAGMENT_PROGRAM |
822 BRW_NEW_FS_PROG_DATA,
823 },
824 .emit = brw_upload_wm_pull_constants,
825 };
826
827 /**
828 * Creates a null renderbuffer surface.
829 *
830 * This is used when the shader doesn't write to any color output. An FB
831 * write to target 0 will still be emitted, because that's how the thread is
832 * terminated (and computed depth is returned), so we need to have the
833 * hardware discard the target 0 color output..
834 */
835 static void
836 emit_null_surface_state(struct brw_context *brw,
837 unsigned width,
838 unsigned height,
839 unsigned samples,
840 uint32_t *out_offset)
841 {
842 uint32_t *surf = brw_state_batch(brw,
843 brw->isl_dev.ss.size,
844 brw->isl_dev.ss.align,
845 out_offset);
846
847 if (brw->gen != 6 || samples <= 1) {
848 isl_null_fill_state(&brw->isl_dev, surf,
849 isl_extent3d(width, height, 1));
850 return;
851 }
852
853 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
854 * So work around this problem by rendering into dummy color buffer.
855 *
856 * To decrease the amount of memory needed by the workaround buffer, we
857 * set its pitch to 128 bytes (the width of a Y tile). This means that
858 * the amount of memory needed for the workaround buffer is
859 * (width_in_tiles + height_in_tiles - 1) tiles.
860 *
861 * Note that since the workaround buffer will be interpreted by the
862 * hardware as an interleaved multisampled buffer, we need to compute
863 * width_in_tiles and height_in_tiles by dividing the width and height
864 * by 16 rather than the normal Y-tile size of 32.
865 */
866 unsigned width_in_tiles = ALIGN(width, 16) / 16;
867 unsigned height_in_tiles = ALIGN(height, 16) / 16;
868 unsigned pitch_minus_1 = 127;
869 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
870 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
871 size_needed);
872
873 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
874 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
875 surf[1] = brw_emit_reloc(&brw->batch, *out_offset + 4,
876 brw->wm.multisampled_null_render_target_bo,
877 0, RELOC_WRITE);
878
879 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
880 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
881
882 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
883 * Notes):
884 *
885 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
886 */
887 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
888 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
889 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
890 surf[5] = 0;
891 }
892
893 /**
894 * Sets up a surface state structure to point at the given region.
895 * While it is only used for the front/back buffer currently, it should be
896 * usable for further buffers when doing ARB_draw_buffer support.
897 */
898 static uint32_t
899 gen4_update_renderbuffer_surface(struct brw_context *brw,
900 struct gl_renderbuffer *rb,
901 uint32_t flags, unsigned unit,
902 uint32_t surf_index)
903 {
904 struct gl_context *ctx = &brw->ctx;
905 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
906 struct intel_mipmap_tree *mt = irb->mt;
907 uint32_t *surf;
908 uint32_t tile_x, tile_y;
909 enum isl_format format;
910 uint32_t offset;
911 /* _NEW_BUFFERS */
912 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
913 /* BRW_NEW_FS_PROG_DATA */
914
915 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
916 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
917
918 if (rb->TexImage && !brw->has_surface_tile_offset) {
919 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
920
921 if (tile_x != 0 || tile_y != 0) {
922 /* Original gen4 hardware couldn't draw to a non-tile-aligned
923 * destination in a miptree unless you actually setup your renderbuffer
924 * as a miptree and used the fragile lod/array_index/etc. controls to
925 * select the image. So, instead, we just make a new single-level
926 * miptree and render into that.
927 */
928 intel_renderbuffer_move_to_temp(brw, irb, false);
929 assert(irb->align_wa_mt);
930 mt = irb->align_wa_mt;
931 }
932 }
933
934 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
935
936 format = brw->mesa_to_isl_render_format[rb_format];
937 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
938 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
939 __func__, _mesa_get_format_name(rb_format));
940 }
941
942 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
943 format << BRW_SURFACE_FORMAT_SHIFT);
944
945 /* reloc */
946 assert(mt->offset % mt->cpp == 0);
947 surf[1] = brw_emit_reloc(&brw->batch, offset + 4, mt->bo,
948 mt->offset +
949 intel_renderbuffer_get_tile_offsets(irb,
950 &tile_x,
951 &tile_y),
952 RELOC_WRITE);
953
954 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
955 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
956
957 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
958 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
959
960 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
961
962 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
963 /* Note that the low bits of these fields are missing, so
964 * there's the possibility of getting in trouble.
965 */
966 assert(tile_x % 4 == 0);
967 assert(tile_y % 2 == 0);
968 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
969 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
970 (mt->surf.image_alignment_el.height == 4 ?
971 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
972
973 if (brw->gen < 6) {
974 /* _NEW_COLOR */
975 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
976 (ctx->Color.BlendEnabled & (1 << unit)))
977 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
978
979 if (!ctx->Color.ColorMask[unit][0])
980 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
981 if (!ctx->Color.ColorMask[unit][1])
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
983 if (!ctx->Color.ColorMask[unit][2])
984 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
985
986 /* As mentioned above, disable writes to the alpha component when the
987 * renderbuffer is XRGB.
988 */
989 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
990 !ctx->Color.ColorMask[unit][3]) {
991 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
992 }
993 }
994
995 return offset;
996 }
997
998 /**
999 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1000 */
1001 void
1002 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1003 const struct gl_framebuffer *fb,
1004 uint32_t render_target_start,
1005 uint32_t *surf_offset)
1006 {
1007 GLuint i;
1008 const unsigned int w = _mesa_geometric_width(fb);
1009 const unsigned int h = _mesa_geometric_height(fb);
1010 const unsigned int s = _mesa_geometric_samples(fb);
1011
1012 /* Update surfaces for drawing buffers */
1013 if (fb->_NumColorDrawBuffers >= 1) {
1014 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1015 const uint32_t surf_index = render_target_start + i;
1016 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1017 INTEL_RENDERBUFFER_LAYERED : 0) |
1018 (brw->draw_aux_buffer_disabled[i] ?
1019 INTEL_AUX_BUFFER_DISABLED : 0);
1020
1021 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1022 surf_offset[surf_index] =
1023 brw->vtbl.update_renderbuffer_surface(
1024 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1025 } else {
1026 emit_null_surface_state(brw, w, h, s, &surf_offset[surf_index]);
1027 }
1028 }
1029 } else {
1030 const uint32_t surf_index = render_target_start;
1031 emit_null_surface_state(brw, w, h, s, &surf_offset[surf_index]);
1032 }
1033 }
1034
1035 static void
1036 update_renderbuffer_surfaces(struct brw_context *brw)
1037 {
1038 const struct gl_context *ctx = &brw->ctx;
1039
1040 /* BRW_NEW_FS_PROG_DATA */
1041 const struct brw_wm_prog_data *wm_prog_data =
1042 brw_wm_prog_data(brw->wm.base.prog_data);
1043
1044 /* _NEW_BUFFERS | _NEW_COLOR */
1045 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1046 brw_update_renderbuffer_surfaces(
1047 brw, fb,
1048 wm_prog_data->binding_table.render_target_start,
1049 brw->wm.base.surf_offset);
1050 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1051 }
1052
1053 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1054 .dirty = {
1055 .mesa = _NEW_BUFFERS |
1056 _NEW_COLOR,
1057 .brw = BRW_NEW_BATCH |
1058 BRW_NEW_BLORP |
1059 BRW_NEW_FS_PROG_DATA,
1060 },
1061 .emit = update_renderbuffer_surfaces,
1062 };
1063
1064 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1065 .dirty = {
1066 .mesa = _NEW_BUFFERS,
1067 .brw = BRW_NEW_BATCH |
1068 BRW_NEW_BLORP |
1069 BRW_NEW_FAST_CLEAR_COLOR,
1070 },
1071 .emit = update_renderbuffer_surfaces,
1072 };
1073
1074 static void
1075 update_renderbuffer_read_surfaces(struct brw_context *brw)
1076 {
1077 const struct gl_context *ctx = &brw->ctx;
1078
1079 /* BRW_NEW_FS_PROG_DATA */
1080 const struct brw_wm_prog_data *wm_prog_data =
1081 brw_wm_prog_data(brw->wm.base.prog_data);
1082
1083 /* BRW_NEW_FRAGMENT_PROGRAM */
1084 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1085 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1086 /* _NEW_BUFFERS */
1087 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1088
1089 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1090 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1091 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1092 const unsigned surf_index =
1093 wm_prog_data->binding_table.render_target_read_start + i;
1094 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1095
1096 if (irb) {
1097 const enum isl_format format = brw->mesa_to_isl_render_format[
1098 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1099 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1100 format));
1101
1102 /* Override the target of the texture if the render buffer is a
1103 * single slice of a 3D texture (since the minimum array element
1104 * field of the surface state structure is ignored by the sampler
1105 * unit for 3D textures on some hardware), or if the render buffer
1106 * is a 1D array (since shaders always provide the array index
1107 * coordinate at the Z component to avoid state-dependent
1108 * recompiles when changing the texture target of the
1109 * framebuffer).
1110 */
1111 const GLenum target =
1112 (irb->mt->target == GL_TEXTURE_3D &&
1113 irb->layer_count == 1) ? GL_TEXTURE_2D :
1114 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1115 irb->mt->target;
1116
1117 const struct isl_view view = {
1118 .format = format,
1119 .base_level = irb->mt_level - irb->mt->first_level,
1120 .levels = 1,
1121 .base_array_layer = irb->mt_layer,
1122 .array_len = irb->layer_count,
1123 .swizzle = ISL_SWIZZLE_IDENTITY,
1124 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1125 };
1126
1127 enum isl_aux_usage aux_usage =
1128 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1129 if (brw->draw_aux_buffer_disabled[i])
1130 aux_usage = ISL_AUX_USAGE_NONE;
1131
1132 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1133 tex_mocs[brw->gen],
1134 surf_offset, surf_index,
1135 0);
1136
1137 } else {
1138 emit_null_surface_state(brw,
1139 _mesa_geometric_width(fb),
1140 _mesa_geometric_height(fb),
1141 _mesa_geometric_samples(fb),
1142 surf_offset);
1143 }
1144 }
1145
1146 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1147 }
1148 }
1149
1150 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1151 .dirty = {
1152 .mesa = _NEW_BUFFERS,
1153 .brw = BRW_NEW_BATCH |
1154 BRW_NEW_FAST_CLEAR_COLOR |
1155 BRW_NEW_FRAGMENT_PROGRAM |
1156 BRW_NEW_FS_PROG_DATA,
1157 },
1158 .emit = update_renderbuffer_read_surfaces,
1159 };
1160
1161 static void
1162 update_stage_texture_surfaces(struct brw_context *brw,
1163 const struct gl_program *prog,
1164 struct brw_stage_state *stage_state,
1165 bool for_gather, uint32_t plane)
1166 {
1167 if (!prog)
1168 return;
1169
1170 struct gl_context *ctx = &brw->ctx;
1171
1172 uint32_t *surf_offset = stage_state->surf_offset;
1173
1174 /* BRW_NEW_*_PROG_DATA */
1175 if (for_gather)
1176 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1177 else
1178 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1179
1180 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1181 for (unsigned s = 0; s < num_samplers; s++) {
1182 surf_offset[s] = 0;
1183
1184 if (prog->SamplersUsed & (1 << s)) {
1185 const unsigned unit = prog->SamplerUnits[s];
1186
1187 /* _NEW_TEXTURE */
1188 if (ctx->Texture.Unit[unit]._Current) {
1189 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1190 }
1191 }
1192 }
1193 }
1194
1195
1196 /**
1197 * Construct SURFACE_STATE objects for enabled textures.
1198 */
1199 static void
1200 brw_update_texture_surfaces(struct brw_context *brw)
1201 {
1202 /* BRW_NEW_VERTEX_PROGRAM */
1203 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1204
1205 /* BRW_NEW_TESS_PROGRAMS */
1206 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1207 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1208
1209 /* BRW_NEW_GEOMETRY_PROGRAM */
1210 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1211
1212 /* BRW_NEW_FRAGMENT_PROGRAM */
1213 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1214
1215 /* _NEW_TEXTURE */
1216 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1217 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1218 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1219 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1220 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1221
1222 /* emit alternate set of surface state for gather. this
1223 * allows the surface format to be overriden for only the
1224 * gather4 messages. */
1225 if (brw->gen < 8) {
1226 if (vs && vs->nir->info.uses_texture_gather)
1227 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1228 if (tcs && tcs->nir->info.uses_texture_gather)
1229 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1230 if (tes && tes->nir->info.uses_texture_gather)
1231 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1232 if (gs && gs->nir->info.uses_texture_gather)
1233 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1234 if (fs && fs->nir->info.uses_texture_gather)
1235 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1236 }
1237
1238 if (fs) {
1239 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1240 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1241 }
1242
1243 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1244 }
1245
1246 const struct brw_tracked_state brw_texture_surfaces = {
1247 .dirty = {
1248 .mesa = _NEW_TEXTURE,
1249 .brw = BRW_NEW_BATCH |
1250 BRW_NEW_BLORP |
1251 BRW_NEW_FAST_CLEAR_COLOR |
1252 BRW_NEW_FRAGMENT_PROGRAM |
1253 BRW_NEW_FS_PROG_DATA |
1254 BRW_NEW_GEOMETRY_PROGRAM |
1255 BRW_NEW_GS_PROG_DATA |
1256 BRW_NEW_TESS_PROGRAMS |
1257 BRW_NEW_TCS_PROG_DATA |
1258 BRW_NEW_TES_PROG_DATA |
1259 BRW_NEW_TEXTURE_BUFFER |
1260 BRW_NEW_VERTEX_PROGRAM |
1261 BRW_NEW_VS_PROG_DATA,
1262 },
1263 .emit = brw_update_texture_surfaces,
1264 };
1265
1266 static void
1267 brw_update_cs_texture_surfaces(struct brw_context *brw)
1268 {
1269 /* BRW_NEW_COMPUTE_PROGRAM */
1270 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1271
1272 /* _NEW_TEXTURE */
1273 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1274
1275 /* emit alternate set of surface state for gather. this
1276 * allows the surface format to be overriden for only the
1277 * gather4 messages.
1278 */
1279 if (brw->gen < 8) {
1280 if (cs && cs->nir->info.uses_texture_gather)
1281 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1282 }
1283
1284 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1285 }
1286
1287 const struct brw_tracked_state brw_cs_texture_surfaces = {
1288 .dirty = {
1289 .mesa = _NEW_TEXTURE,
1290 .brw = BRW_NEW_BATCH |
1291 BRW_NEW_BLORP |
1292 BRW_NEW_COMPUTE_PROGRAM |
1293 BRW_NEW_FAST_CLEAR_COLOR,
1294 },
1295 .emit = brw_update_cs_texture_surfaces,
1296 };
1297
1298
1299 void
1300 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1301 struct brw_stage_state *stage_state,
1302 struct brw_stage_prog_data *prog_data)
1303 {
1304 struct gl_context *ctx = &brw->ctx;
1305
1306 if (!prog)
1307 return;
1308
1309 uint32_t *ubo_surf_offsets =
1310 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1311
1312 for (int i = 0; i < prog->info.num_ubos; i++) {
1313 struct gl_uniform_buffer_binding *binding =
1314 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1315
1316 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1317 emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1318 } else {
1319 struct intel_buffer_object *intel_bo =
1320 intel_buffer_object(binding->BufferObject);
1321 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1322 if (!binding->AutomaticSize)
1323 size = MIN2(size, binding->Size);
1324 struct brw_bo *bo =
1325 intel_bufferobj_buffer(brw, intel_bo,
1326 binding->Offset,
1327 size, false);
1328 brw_create_constant_surface(brw, bo, binding->Offset,
1329 size,
1330 &ubo_surf_offsets[i]);
1331 }
1332 }
1333
1334 uint32_t *ssbo_surf_offsets =
1335 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1336
1337 for (int i = 0; i < prog->info.num_ssbos; i++) {
1338 struct gl_shader_storage_buffer_binding *binding =
1339 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1340
1341 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1342 emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1343 } else {
1344 struct intel_buffer_object *intel_bo =
1345 intel_buffer_object(binding->BufferObject);
1346 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1347 if (!binding->AutomaticSize)
1348 size = MIN2(size, binding->Size);
1349 struct brw_bo *bo =
1350 intel_bufferobj_buffer(brw, intel_bo,
1351 binding->Offset,
1352 size, true);
1353 brw_create_buffer_surface(brw, bo, binding->Offset,
1354 size,
1355 &ssbo_surf_offsets[i]);
1356 }
1357 }
1358
1359 stage_state->push_constants_dirty = true;
1360
1361 if (prog->info.num_ubos || prog->info.num_ssbos)
1362 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1363 }
1364
1365 static void
1366 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1367 {
1368 struct gl_context *ctx = &brw->ctx;
1369 /* _NEW_PROGRAM */
1370 struct gl_program *prog = ctx->FragmentProgram._Current;
1371
1372 /* BRW_NEW_FS_PROG_DATA */
1373 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1374 }
1375
1376 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1377 .dirty = {
1378 .mesa = _NEW_PROGRAM,
1379 .brw = BRW_NEW_BATCH |
1380 BRW_NEW_BLORP |
1381 BRW_NEW_FS_PROG_DATA |
1382 BRW_NEW_UNIFORM_BUFFER,
1383 },
1384 .emit = brw_upload_wm_ubo_surfaces,
1385 };
1386
1387 static void
1388 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1389 {
1390 struct gl_context *ctx = &brw->ctx;
1391 /* _NEW_PROGRAM */
1392 struct gl_program *prog =
1393 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1394
1395 /* BRW_NEW_CS_PROG_DATA */
1396 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1397 }
1398
1399 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1400 .dirty = {
1401 .mesa = _NEW_PROGRAM,
1402 .brw = BRW_NEW_BATCH |
1403 BRW_NEW_BLORP |
1404 BRW_NEW_CS_PROG_DATA |
1405 BRW_NEW_UNIFORM_BUFFER,
1406 },
1407 .emit = brw_upload_cs_ubo_surfaces,
1408 };
1409
1410 void
1411 brw_upload_abo_surfaces(struct brw_context *brw,
1412 const struct gl_program *prog,
1413 struct brw_stage_state *stage_state,
1414 struct brw_stage_prog_data *prog_data)
1415 {
1416 struct gl_context *ctx = &brw->ctx;
1417 uint32_t *surf_offsets =
1418 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1419
1420 if (prog->info.num_abos) {
1421 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1422 struct gl_atomic_buffer_binding *binding =
1423 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1424 struct intel_buffer_object *intel_bo =
1425 intel_buffer_object(binding->BufferObject);
1426 struct brw_bo *bo =
1427 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1428 intel_bo->Base.Size - binding->Offset,
1429 true);
1430
1431 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1432 binding->Offset, ISL_FORMAT_RAW,
1433 bo->size - binding->Offset, 1,
1434 RELOC_WRITE);
1435 }
1436
1437 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1438 }
1439 }
1440
1441 static void
1442 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1443 {
1444 /* _NEW_PROGRAM */
1445 const struct gl_program *wm = brw->fragment_program;
1446
1447 if (wm) {
1448 /* BRW_NEW_FS_PROG_DATA */
1449 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1450 }
1451 }
1452
1453 const struct brw_tracked_state brw_wm_abo_surfaces = {
1454 .dirty = {
1455 .mesa = _NEW_PROGRAM,
1456 .brw = BRW_NEW_ATOMIC_BUFFER |
1457 BRW_NEW_BLORP |
1458 BRW_NEW_BATCH |
1459 BRW_NEW_FS_PROG_DATA,
1460 },
1461 .emit = brw_upload_wm_abo_surfaces,
1462 };
1463
1464 static void
1465 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1466 {
1467 /* _NEW_PROGRAM */
1468 const struct gl_program *cp = brw->compute_program;
1469
1470 if (cp) {
1471 /* BRW_NEW_CS_PROG_DATA */
1472 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1473 }
1474 }
1475
1476 const struct brw_tracked_state brw_cs_abo_surfaces = {
1477 .dirty = {
1478 .mesa = _NEW_PROGRAM,
1479 .brw = BRW_NEW_ATOMIC_BUFFER |
1480 BRW_NEW_BLORP |
1481 BRW_NEW_BATCH |
1482 BRW_NEW_CS_PROG_DATA,
1483 },
1484 .emit = brw_upload_cs_abo_surfaces,
1485 };
1486
1487 static void
1488 brw_upload_cs_image_surfaces(struct brw_context *brw)
1489 {
1490 /* _NEW_PROGRAM */
1491 const struct gl_program *cp = brw->compute_program;
1492
1493 if (cp) {
1494 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1495 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1496 brw->cs.base.prog_data);
1497 }
1498 }
1499
1500 const struct brw_tracked_state brw_cs_image_surfaces = {
1501 .dirty = {
1502 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1503 .brw = BRW_NEW_BATCH |
1504 BRW_NEW_BLORP |
1505 BRW_NEW_CS_PROG_DATA |
1506 BRW_NEW_FAST_CLEAR_COLOR |
1507 BRW_NEW_IMAGE_UNITS
1508 },
1509 .emit = brw_upload_cs_image_surfaces,
1510 };
1511
1512 static uint32_t
1513 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1514 {
1515 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1516 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1517 if (access == GL_WRITE_ONLY) {
1518 return hw_format;
1519 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1520 /* Typed surface reads support a very limited subset of the shader
1521 * image formats. Translate it into the closest format the
1522 * hardware supports.
1523 */
1524 return isl_lower_storage_image_format(devinfo, hw_format);
1525 } else {
1526 /* The hardware doesn't actually support a typed format that we can use
1527 * so we have to fall back to untyped read/write messages.
1528 */
1529 return ISL_FORMAT_RAW;
1530 }
1531 }
1532
1533 static void
1534 update_default_image_param(struct brw_context *brw,
1535 struct gl_image_unit *u,
1536 unsigned surface_idx,
1537 struct brw_image_param *param)
1538 {
1539 memset(param, 0, sizeof(*param));
1540 param->surface_idx = surface_idx;
1541 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1542 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1543 * detailed explanation of these parameters.
1544 */
1545 param->swizzling[0] = 0xff;
1546 param->swizzling[1] = 0xff;
1547 }
1548
1549 static void
1550 update_buffer_image_param(struct brw_context *brw,
1551 struct gl_image_unit *u,
1552 unsigned surface_idx,
1553 struct brw_image_param *param)
1554 {
1555 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1556 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1557 update_default_image_param(brw, u, surface_idx, param);
1558
1559 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1560 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1561 }
1562
1563 static unsigned
1564 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1565 unsigned level)
1566 {
1567 if (target == GL_TEXTURE_CUBE_MAP)
1568 return 6;
1569
1570 return target == GL_TEXTURE_3D ?
1571 minify(mt->surf.logical_level0_px.depth, level) :
1572 mt->surf.logical_level0_px.array_len;
1573 }
1574
1575 static void
1576 update_image_surface(struct brw_context *brw,
1577 struct gl_image_unit *u,
1578 GLenum access,
1579 unsigned surface_idx,
1580 uint32_t *surf_offset,
1581 struct brw_image_param *param)
1582 {
1583 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1584 struct gl_texture_object *obj = u->TexObj;
1585 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1586
1587 if (obj->Target == GL_TEXTURE_BUFFER) {
1588 struct intel_buffer_object *intel_obj =
1589 intel_buffer_object(obj->BufferObject);
1590 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1591 _mesa_get_format_bytes(u->_ActualFormat));
1592
1593 brw_emit_buffer_surface_state(
1594 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1595 format, intel_obj->Base.Size, texel_size,
1596 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1597
1598 update_buffer_image_param(brw, u, surface_idx, param);
1599
1600 } else {
1601 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1602 struct intel_mipmap_tree *mt = intel_obj->mt;
1603 const unsigned num_layers = u->Layered ?
1604 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1605
1606 struct isl_view view = {
1607 .format = format,
1608 .base_level = obj->MinLevel + u->Level,
1609 .levels = 1,
1610 .base_array_layer = obj->MinLayer + u->_Layer,
1611 .array_len = num_layers,
1612 .swizzle = ISL_SWIZZLE_IDENTITY,
1613 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1614 };
1615
1616 if (format == ISL_FORMAT_RAW) {
1617 brw_emit_buffer_surface_state(
1618 brw, surf_offset, mt->bo, mt->offset,
1619 format, mt->bo->size - mt->offset, 1 /* pitch */,
1620 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1621
1622 } else {
1623 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1624 assert(!intel_miptree_has_color_unresolved(mt,
1625 view.base_level, 1,
1626 view.base_array_layer,
1627 view.array_len));
1628 brw_emit_surface_state(brw, mt, mt->target, view,
1629 ISL_AUX_USAGE_NONE, tex_mocs[brw->gen],
1630 surf_offset, surf_index,
1631 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1632 }
1633
1634 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1635 param->surface_idx = surface_idx;
1636 }
1637
1638 } else {
1639 emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1640 update_default_image_param(brw, u, surface_idx, param);
1641 }
1642 }
1643
1644 void
1645 brw_upload_image_surfaces(struct brw_context *brw,
1646 const struct gl_program *prog,
1647 struct brw_stage_state *stage_state,
1648 struct brw_stage_prog_data *prog_data)
1649 {
1650 assert(prog);
1651 struct gl_context *ctx = &brw->ctx;
1652
1653 if (prog->info.num_images) {
1654 for (unsigned i = 0; i < prog->info.num_images; i++) {
1655 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1656 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1657
1658 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1659 surf_idx,
1660 &stage_state->surf_offset[surf_idx],
1661 &prog_data->image_param[i]);
1662 }
1663
1664 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1665 /* This may have changed the image metadata dependent on the context
1666 * image unit state and passed to the program as uniforms, make sure
1667 * that push and pull constants are reuploaded.
1668 */
1669 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1670 }
1671 }
1672
1673 static void
1674 brw_upload_wm_image_surfaces(struct brw_context *brw)
1675 {
1676 /* BRW_NEW_FRAGMENT_PROGRAM */
1677 const struct gl_program *wm = brw->fragment_program;
1678
1679 if (wm) {
1680 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1681 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1682 brw->wm.base.prog_data);
1683 }
1684 }
1685
1686 const struct brw_tracked_state brw_wm_image_surfaces = {
1687 .dirty = {
1688 .mesa = _NEW_TEXTURE,
1689 .brw = BRW_NEW_BATCH |
1690 BRW_NEW_BLORP |
1691 BRW_NEW_FAST_CLEAR_COLOR |
1692 BRW_NEW_FRAGMENT_PROGRAM |
1693 BRW_NEW_FS_PROG_DATA |
1694 BRW_NEW_IMAGE_UNITS
1695 },
1696 .emit = brw_upload_wm_image_surfaces,
1697 };
1698
1699 void
1700 gen4_init_vtable_surface_functions(struct brw_context *brw)
1701 {
1702 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1703 }
1704
1705 void
1706 gen6_init_vtable_surface_functions(struct brw_context *brw)
1707 {
1708 gen4_init_vtable_surface_functions(brw);
1709 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1710 }
1711
1712 static void
1713 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1714 {
1715 struct gl_context *ctx = &brw->ctx;
1716 /* _NEW_PROGRAM */
1717 struct gl_program *prog =
1718 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1719 /* BRW_NEW_CS_PROG_DATA */
1720 const struct brw_cs_prog_data *cs_prog_data =
1721 brw_cs_prog_data(brw->cs.base.prog_data);
1722
1723 if (prog && cs_prog_data->uses_num_work_groups) {
1724 const unsigned surf_idx =
1725 cs_prog_data->binding_table.work_groups_start;
1726 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1727 struct brw_bo *bo;
1728 uint32_t bo_offset;
1729
1730 if (brw->compute.num_work_groups_bo == NULL) {
1731 bo = NULL;
1732 intel_upload_data(brw,
1733 (void *)brw->compute.num_work_groups,
1734 3 * sizeof(GLuint),
1735 sizeof(GLuint),
1736 &bo,
1737 &bo_offset);
1738 } else {
1739 bo = brw->compute.num_work_groups_bo;
1740 bo_offset = brw->compute.num_work_groups_offset;
1741 }
1742
1743 brw_emit_buffer_surface_state(brw, surf_offset,
1744 bo, bo_offset,
1745 ISL_FORMAT_RAW,
1746 3 * sizeof(GLuint), 1,
1747 RELOC_WRITE);
1748 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1749 }
1750 }
1751
1752 const struct brw_tracked_state brw_cs_work_groups_surface = {
1753 .dirty = {
1754 .brw = BRW_NEW_BLORP |
1755 BRW_NEW_CS_PROG_DATA |
1756 BRW_NEW_CS_WORK_GROUPS
1757 },
1758 .emit = brw_upload_cs_work_groups_surface,
1759 };