i965/wm: Use level offsets directly
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 [10] = CNL_MOCS_WB,
68 };
69
70 uint32_t rb_mocs[] = {
71 [7] = GEN7_MOCS_L3,
72 [8] = BDW_MOCS_PTE,
73 [9] = SKL_MOCS_PTE,
74 [10] = CNL_MOCS_PTE,
75 };
76
77 static void
78 brw_emit_surface_state(struct brw_context *brw,
79 struct intel_mipmap_tree *mt, uint32_t flags,
80 GLenum target, struct isl_view view,
81 uint32_t mocs, uint32_t *surf_offset, int surf_index,
82 unsigned read_domains, unsigned write_domains)
83 {
84 uint32_t tile_x = mt->level[0].level_x;
85 uint32_t tile_y = mt->level[0].level_y;
86 uint32_t offset = mt->offset;
87
88 struct isl_surf surf;
89 intel_miptree_get_isl_surf(brw, mt, &surf);
90
91 surf.dim = get_isl_surf_dim(target);
92
93 const enum isl_dim_layout dim_layout =
94 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target,
95 mt->array_layout);
96
97 if (surf.dim_layout != dim_layout) {
98 /* The layout of the specified texture target is not compatible with the
99 * actual layout of the miptree structure in memory -- You're entering
100 * dangerous territory, this can only possibly work if you only intended
101 * to access a single level and slice of the texture, and the hardware
102 * supports the tile offset feature in order to allow non-tile-aligned
103 * base offsets, since we'll have to point the hardware to the first
104 * texel of the level instead of relying on the usual base level/layer
105 * controls.
106 */
107 assert(brw->has_surface_tile_offset);
108 assert(view.levels == 1 && view.array_len == 1);
109 assert(tile_x == 0 && tile_y == 0);
110
111 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
112 view.base_array_layer,
113 &tile_x, &tile_y);
114
115 /* Minify the logical dimensions of the texture. */
116 const unsigned l = view.base_level - mt->first_level;
117 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
118 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
119 minify(surf.logical_level0_px.height, l);
120 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
121 minify(surf.logical_level0_px.depth, l);
122
123 /* Only the base level and layer can be addressed with the overridden
124 * layout.
125 */
126 surf.logical_level0_px.array_len = 1;
127 surf.levels = 1;
128 surf.dim_layout = dim_layout;
129
130 /* The requested slice of the texture is now at the base level and
131 * layer.
132 */
133 view.base_level = 0;
134 view.base_array_layer = 0;
135 }
136
137 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
138
139 struct brw_bo *aux_bo;
140 struct isl_surf *aux_surf = NULL;
141 uint64_t aux_offset = 0;
142 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
143 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
144 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
145 aux_usage = intel_miptree_get_aux_isl_usage(brw, mt);
146
147 if (mt->mcs_buf) {
148 aux_surf = &mt->mcs_buf->surf;
149
150 aux_bo = mt->mcs_buf->bo;
151 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
152 } else {
153 aux_surf = &mt->hiz_buf->surf;
154
155 aux_bo = mt->hiz_buf->bo;
156 aux_offset = mt->hiz_buf->bo->offset64;
157 }
158
159 /* We only really need a clear color if we also have an auxiliary
160 * surface. Without one, it does nothing.
161 */
162 clear_color = mt->fast_clear_color;
163 }
164
165 void *state = brw_state_batch(brw,
166 brw->isl_dev.ss.size,
167 brw->isl_dev.ss.align,
168 surf_offset);
169
170 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
171 .address = mt->bo->offset64 + offset,
172 .aux_surf = aux_surf, .aux_usage = aux_usage,
173 .aux_address = aux_offset,
174 .mocs = mocs, .clear_color = clear_color,
175 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
176
177 brw_emit_reloc(&brw->batch, *surf_offset + brw->isl_dev.ss.addr_offset,
178 mt->bo, offset, read_domains, write_domains);
179
180 if (aux_surf) {
181 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
182 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
183 * contain other control information. Since buffer addresses are always
184 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
185 * an ordinary reloc to do the necessary address translation.
186 */
187 assert((aux_offset & 0xfff) == 0);
188 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
189 brw_emit_reloc(&brw->batch,
190 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
191 aux_bo, *aux_addr - aux_bo->offset64,
192 read_domains, write_domains);
193 }
194 }
195
196 uint32_t
197 brw_update_renderbuffer_surface(struct brw_context *brw,
198 struct gl_renderbuffer *rb,
199 uint32_t flags, unsigned unit /* unused */,
200 uint32_t surf_index)
201 {
202 struct gl_context *ctx = &brw->ctx;
203 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
204 struct intel_mipmap_tree *mt = irb->mt;
205
206 if (brw->gen < 9) {
207 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
208 }
209
210 assert(brw_render_target_supported(brw, rb));
211
212 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
213 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
214 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
215 __func__, _mesa_get_format_name(rb_format));
216 }
217
218 struct isl_view view = {
219 .format = brw->mesa_to_isl_render_format[rb_format],
220 .base_level = irb->mt_level - irb->mt->first_level,
221 .levels = 1,
222 .base_array_layer = irb->mt_layer,
223 .array_len = MAX2(irb->layer_count, 1),
224 .swizzle = ISL_SWIZZLE_IDENTITY,
225 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
226 };
227
228 uint32_t offset;
229 brw_emit_surface_state(brw, mt, flags, mt->target, view,
230 rb_mocs[brw->gen],
231 &offset, surf_index,
232 I915_GEM_DOMAIN_RENDER,
233 I915_GEM_DOMAIN_RENDER);
234 return offset;
235 }
236
237 GLuint
238 translate_tex_target(GLenum target)
239 {
240 switch (target) {
241 case GL_TEXTURE_1D:
242 case GL_TEXTURE_1D_ARRAY_EXT:
243 return BRW_SURFACE_1D;
244
245 case GL_TEXTURE_RECTANGLE_NV:
246 return BRW_SURFACE_2D;
247
248 case GL_TEXTURE_2D:
249 case GL_TEXTURE_2D_ARRAY_EXT:
250 case GL_TEXTURE_EXTERNAL_OES:
251 case GL_TEXTURE_2D_MULTISAMPLE:
252 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
253 return BRW_SURFACE_2D;
254
255 case GL_TEXTURE_3D:
256 return BRW_SURFACE_3D;
257
258 case GL_TEXTURE_CUBE_MAP:
259 case GL_TEXTURE_CUBE_MAP_ARRAY:
260 return BRW_SURFACE_CUBE;
261
262 default:
263 unreachable("not reached");
264 }
265 }
266
267 uint32_t
268 brw_get_surface_tiling_bits(uint32_t tiling)
269 {
270 switch (tiling) {
271 case I915_TILING_X:
272 return BRW_SURFACE_TILED;
273 case I915_TILING_Y:
274 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
275 default:
276 return 0;
277 }
278 }
279
280
281 uint32_t
282 brw_get_surface_num_multisamples(unsigned num_samples)
283 {
284 if (num_samples > 1)
285 return BRW_SURFACE_MULTISAMPLECOUNT_4;
286 else
287 return BRW_SURFACE_MULTISAMPLECOUNT_1;
288 }
289
290 /**
291 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
292 * swizzling.
293 */
294 int
295 brw_get_texture_swizzle(const struct gl_context *ctx,
296 const struct gl_texture_object *t)
297 {
298 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
299
300 int swizzles[SWIZZLE_NIL + 1] = {
301 SWIZZLE_X,
302 SWIZZLE_Y,
303 SWIZZLE_Z,
304 SWIZZLE_W,
305 SWIZZLE_ZERO,
306 SWIZZLE_ONE,
307 SWIZZLE_NIL
308 };
309
310 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
311 img->_BaseFormat == GL_DEPTH_STENCIL) {
312 GLenum depth_mode = t->DepthMode;
313
314 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
315 * with depth component data specified with a sized internal format.
316 * Otherwise, it's left at the old default, GL_LUMINANCE.
317 */
318 if (_mesa_is_gles3(ctx) &&
319 img->InternalFormat != GL_DEPTH_COMPONENT &&
320 img->InternalFormat != GL_DEPTH_STENCIL) {
321 depth_mode = GL_RED;
322 }
323
324 switch (depth_mode) {
325 case GL_ALPHA:
326 swizzles[0] = SWIZZLE_ZERO;
327 swizzles[1] = SWIZZLE_ZERO;
328 swizzles[2] = SWIZZLE_ZERO;
329 swizzles[3] = SWIZZLE_X;
330 break;
331 case GL_LUMINANCE:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_ONE;
336 break;
337 case GL_INTENSITY:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_X;
340 swizzles[2] = SWIZZLE_X;
341 swizzles[3] = SWIZZLE_X;
342 break;
343 case GL_RED:
344 swizzles[0] = SWIZZLE_X;
345 swizzles[1] = SWIZZLE_ZERO;
346 swizzles[2] = SWIZZLE_ZERO;
347 swizzles[3] = SWIZZLE_ONE;
348 break;
349 }
350 }
351
352 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
353
354 /* If the texture's format is alpha-only, force R, G, and B to
355 * 0.0. Similarly, if the texture's format has no alpha channel,
356 * force the alpha value read to 1.0. This allows for the
357 * implementation to use an RGBA texture for any of these formats
358 * without leaking any unexpected values.
359 */
360 switch (img->_BaseFormat) {
361 case GL_ALPHA:
362 swizzles[0] = SWIZZLE_ZERO;
363 swizzles[1] = SWIZZLE_ZERO;
364 swizzles[2] = SWIZZLE_ZERO;
365 break;
366 case GL_LUMINANCE:
367 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
368 swizzles[0] = SWIZZLE_X;
369 swizzles[1] = SWIZZLE_X;
370 swizzles[2] = SWIZZLE_X;
371 swizzles[3] = SWIZZLE_ONE;
372 }
373 break;
374 case GL_LUMINANCE_ALPHA:
375 if (datatype == GL_SIGNED_NORMALIZED) {
376 swizzles[0] = SWIZZLE_X;
377 swizzles[1] = SWIZZLE_X;
378 swizzles[2] = SWIZZLE_X;
379 swizzles[3] = SWIZZLE_W;
380 }
381 break;
382 case GL_INTENSITY:
383 if (datatype == GL_SIGNED_NORMALIZED) {
384 swizzles[0] = SWIZZLE_X;
385 swizzles[1] = SWIZZLE_X;
386 swizzles[2] = SWIZZLE_X;
387 swizzles[3] = SWIZZLE_X;
388 }
389 break;
390 case GL_RED:
391 case GL_RG:
392 case GL_RGB:
393 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
394 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
395 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
396 swizzles[3] = SWIZZLE_ONE;
397 break;
398 }
399
400 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
401 swizzles[GET_SWZ(t->_Swizzle, 1)],
402 swizzles[GET_SWZ(t->_Swizzle, 2)],
403 swizzles[GET_SWZ(t->_Swizzle, 3)]);
404 }
405
406 /**
407 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
408 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
409 *
410 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
411 * 0 1 2 3 4 5
412 * 4 5 6 7 0 1
413 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
414 *
415 * which is simply adding 4 then modding by 8 (or anding with 7).
416 *
417 * We then may need to apply workarounds for textureGather hardware bugs.
418 */
419 static unsigned
420 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
421 {
422 unsigned scs = (swizzle + 4) & 7;
423
424 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
425 }
426
427 static unsigned
428 brw_find_matching_rb(const struct gl_framebuffer *fb,
429 const struct intel_mipmap_tree *mt)
430 {
431 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
432 const struct intel_renderbuffer *irb =
433 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
434
435 if (irb && irb->mt == mt)
436 return i;
437 }
438
439 return fb->_NumColorDrawBuffers;
440 }
441
442 static inline bool
443 brw_texture_view_sane(const struct brw_context *brw,
444 const struct intel_mipmap_tree *mt,
445 const struct isl_view *view)
446 {
447 /* There are special cases only for lossless compression. */
448 if (mt->aux_usage != ISL_AUX_USAGE_CCS_E)
449 return true;
450
451 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
452 return true;
453
454 /* Logic elsewhere needs to take care to resolve the color buffer prior
455 * to sampling it as non-compressed.
456 */
457 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
458 view->base_array_layer,
459 view->array_len))
460 return false;
461
462 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
463 const unsigned rb_index = brw_find_matching_rb(fb, mt);
464
465 if (rb_index == fb->_NumColorDrawBuffers)
466 return true;
467
468 /* Underlying surface is compressed but it is sampled using a format that
469 * the sampling engine doesn't support as compressed. Compression must be
470 * disabled for both sampling engine and data port in case the same surface
471 * is used also as render target.
472 */
473 return brw->draw_aux_buffer_disabled[rb_index];
474 }
475
476 static bool
477 brw_disable_aux_surface(const struct brw_context *brw,
478 const struct intel_mipmap_tree *mt,
479 const struct isl_view *view)
480 {
481 /* Nothing to disable. */
482 if (!mt->mcs_buf)
483 return false;
484
485 const bool is_unresolved = intel_miptree_has_color_unresolved(
486 mt, view->base_level, view->levels,
487 view->base_array_layer, view->array_len);
488
489 /* There are special cases only for lossless compression. */
490 if (mt->aux_usage != ISL_AUX_USAGE_CCS_E)
491 return !is_unresolved;
492
493 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
494 const unsigned rb_index = brw_find_matching_rb(fb, mt);
495
496 /* If we are drawing into this with compression enabled, then we must also
497 * enable compression when texturing from it regardless of
498 * fast_clear_state. If we don't then, after the first draw call with
499 * this setup, there will be data in the CCS which won't get picked up by
500 * subsequent texturing operations as required by ARB_texture_barrier.
501 * Since we don't want to re-emit the binding table or do a resolve
502 * operation every draw call, the easiest thing to do is just enable
503 * compression on the texturing side. This is completely safe to do
504 * since, if compressed texturing weren't allowed, we would have disabled
505 * compression of render targets in whatever_that_function_is_called().
506 */
507 if (rb_index < fb->_NumColorDrawBuffers) {
508 if (brw->draw_aux_buffer_disabled[rb_index]) {
509 assert(!is_unresolved);
510 }
511
512 return brw->draw_aux_buffer_disabled[rb_index];
513 }
514
515 return !is_unresolved;
516 }
517
518 void
519 brw_update_texture_surface(struct gl_context *ctx,
520 unsigned unit,
521 uint32_t *surf_offset,
522 bool for_gather,
523 uint32_t plane)
524 {
525 struct brw_context *brw = brw_context(ctx);
526 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
527
528 if (obj->Target == GL_TEXTURE_BUFFER) {
529 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
530
531 } else {
532 struct intel_texture_object *intel_obj = intel_texture_object(obj);
533 struct intel_mipmap_tree *mt = intel_obj->mt;
534
535 if (plane > 0) {
536 if (mt->plane[plane - 1] == NULL)
537 return;
538 mt = mt->plane[plane - 1];
539 }
540
541 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
542 /* If this is a view with restricted NumLayers, then our effective depth
543 * is not just the miptree depth.
544 */
545 const unsigned view_num_layers =
546 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
547 mt->logical_depth0;
548
549 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
550 * texturing functions that return a float, as our code generation always
551 * selects the .x channel (which would always be 0).
552 */
553 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
554 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
555 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
556 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
557 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
558 brw_get_texture_swizzle(&brw->ctx, obj));
559
560 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
561 enum isl_format format = translate_tex_format(brw, mesa_fmt,
562 sampler->sRGBDecode);
563
564 /* Implement gen6 and gen7 gather work-around */
565 bool need_green_to_blue = false;
566 if (for_gather) {
567 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
568 format == ISL_FORMAT_R32G32_SINT ||
569 format == ISL_FORMAT_R32G32_UINT)) {
570 format = ISL_FORMAT_R32G32_FLOAT_LD;
571 need_green_to_blue = brw->is_haswell;
572 } else if (brw->gen == 6) {
573 /* Sandybridge's gather4 message is broken for integer formats.
574 * To work around this, we pretend the surface is UNORM for
575 * 8 or 16-bit formats, and emit shader instructions to recover
576 * the real INT/UINT value. For 32-bit formats, we pretend
577 * the surface is FLOAT, and simply reinterpret the resulting
578 * bits.
579 */
580 switch (format) {
581 case ISL_FORMAT_R8_SINT:
582 case ISL_FORMAT_R8_UINT:
583 format = ISL_FORMAT_R8_UNORM;
584 break;
585
586 case ISL_FORMAT_R16_SINT:
587 case ISL_FORMAT_R16_UINT:
588 format = ISL_FORMAT_R16_UNORM;
589 break;
590
591 case ISL_FORMAT_R32_SINT:
592 case ISL_FORMAT_R32_UINT:
593 format = ISL_FORMAT_R32_FLOAT;
594 break;
595
596 default:
597 break;
598 }
599 }
600 }
601
602 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
603 if (brw->gen <= 7) {
604 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
605 mt = mt->r8stencil_mt;
606 } else {
607 mt = mt->stencil_mt;
608 }
609 format = ISL_FORMAT_R8_UINT;
610 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
611 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
612 mt = mt->r8stencil_mt;
613 format = ISL_FORMAT_R8_UINT;
614 }
615
616 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
617
618 struct isl_view view = {
619 .format = format,
620 .base_level = obj->MinLevel + obj->BaseLevel,
621 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
622 .base_array_layer = obj->MinLayer,
623 .array_len = view_num_layers,
624 .swizzle = {
625 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
626 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
627 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
628 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
629 },
630 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
631 };
632
633 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
634 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
635 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
636
637 assert(brw_texture_view_sane(brw, mt, &view));
638
639 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
640 INTEL_AUX_BUFFER_DISABLED : 0;
641 brw_emit_surface_state(brw, mt, flags, mt->target, view,
642 tex_mocs[brw->gen],
643 surf_offset, surf_index,
644 I915_GEM_DOMAIN_SAMPLER, 0);
645 }
646 }
647
648 void
649 brw_emit_buffer_surface_state(struct brw_context *brw,
650 uint32_t *out_offset,
651 struct brw_bo *bo,
652 unsigned buffer_offset,
653 unsigned surface_format,
654 unsigned buffer_size,
655 unsigned pitch,
656 bool rw)
657 {
658 uint32_t *dw = brw_state_batch(brw,
659 brw->isl_dev.ss.size,
660 brw->isl_dev.ss.align,
661 out_offset);
662
663 isl_buffer_fill_state(&brw->isl_dev, dw,
664 .address = (bo ? bo->offset64 : 0) + buffer_offset,
665 .size = buffer_size,
666 .format = surface_format,
667 .stride = pitch,
668 .mocs = tex_mocs[brw->gen]);
669
670 if (bo) {
671 brw_emit_reloc(&brw->batch, *out_offset + brw->isl_dev.ss.addr_offset,
672 bo, buffer_offset,
673 I915_GEM_DOMAIN_SAMPLER,
674 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
675 }
676 }
677
678 void
679 brw_update_buffer_texture_surface(struct gl_context *ctx,
680 unsigned unit,
681 uint32_t *surf_offset)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
685 struct intel_buffer_object *intel_obj =
686 intel_buffer_object(tObj->BufferObject);
687 uint32_t size = tObj->BufferSize;
688 struct brw_bo *bo = NULL;
689 mesa_format format = tObj->_BufferObjectFormat;
690 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
691 int texel_size = _mesa_get_format_bytes(format);
692
693 if (intel_obj) {
694 size = MIN2(size, intel_obj->Base.Size);
695 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
696 false);
697 }
698
699 /* The ARB_texture_buffer_specification says:
700 *
701 * "The number of texels in the buffer texture's texel array is given by
702 *
703 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
704 *
705 * where <buffer_size> is the size of the buffer object, in basic
706 * machine units and <components> and <base_type> are the element count
707 * and base data type for elements, as specified in Table X.1. The
708 * number of texels in the texel array is then clamped to the
709 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
710 *
711 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
712 * so that when ISL divides by stride to obtain the number of texels, that
713 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
714 */
715 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
716
717 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
718 _mesa_problem(NULL, "bad format %s for texture buffer\n",
719 _mesa_get_format_name(format));
720 }
721
722 brw_emit_buffer_surface_state(brw, surf_offset, bo,
723 tObj->BufferOffset,
724 isl_format,
725 size,
726 texel_size,
727 false /* rw */);
728 }
729
730 /**
731 * Create the constant buffer surface. Vertex/fragment shader constants will be
732 * read from this buffer with Data Port Read instructions/messages.
733 */
734 void
735 brw_create_constant_surface(struct brw_context *brw,
736 struct brw_bo *bo,
737 uint32_t offset,
738 uint32_t size,
739 uint32_t *out_offset)
740 {
741 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
742 ISL_FORMAT_R32G32B32A32_FLOAT,
743 size, 1, false);
744 }
745
746 /**
747 * Create the buffer surface. Shader buffer variables will be
748 * read from / write to this buffer with Data Port Read/Write
749 * instructions/messages.
750 */
751 void
752 brw_create_buffer_surface(struct brw_context *brw,
753 struct brw_bo *bo,
754 uint32_t offset,
755 uint32_t size,
756 uint32_t *out_offset)
757 {
758 /* Use a raw surface so we can reuse existing untyped read/write/atomic
759 * messages. We need these specifically for the fragment shader since they
760 * include a pixel mask header that we need to ensure correct behavior
761 * with helper invocations, which cannot write to the buffer.
762 */
763 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
764 ISL_FORMAT_RAW,
765 size, 1, true);
766 }
767
768 /**
769 * Set up a binding table entry for use by stream output logic (transform
770 * feedback).
771 *
772 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
773 */
774 void
775 brw_update_sol_surface(struct brw_context *brw,
776 struct gl_buffer_object *buffer_obj,
777 uint32_t *out_offset, unsigned num_vector_components,
778 unsigned stride_dwords, unsigned offset_dwords)
779 {
780 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
781 uint32_t offset_bytes = 4 * offset_dwords;
782 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
783 offset_bytes,
784 buffer_obj->Size - offset_bytes,
785 true);
786 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
787 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
788 size_t size_dwords = buffer_obj->Size / 4;
789 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
790
791 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
792 * too big to map using a single binding table entry?
793 */
794 assert((size_dwords - offset_dwords) / stride_dwords
795 <= BRW_MAX_NUM_BUFFER_ENTRIES);
796
797 if (size_dwords > offset_dwords + num_vector_components) {
798 /* There is room for at least 1 transform feedback output in the buffer.
799 * Compute the number of additional transform feedback outputs the
800 * buffer has room for.
801 */
802 buffer_size_minus_1 =
803 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
804 } else {
805 /* There isn't even room for a single transform feedback output in the
806 * buffer. We can't configure the binding table entry to prevent output
807 * entirely; we'll have to rely on the geometry shader to detect
808 * overflow. But to minimize the damage in case of a bug, set up the
809 * binding table entry to just allow a single output.
810 */
811 buffer_size_minus_1 = 0;
812 }
813 width = buffer_size_minus_1 & 0x7f;
814 height = (buffer_size_minus_1 & 0xfff80) >> 7;
815 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
816
817 switch (num_vector_components) {
818 case 1:
819 surface_format = ISL_FORMAT_R32_FLOAT;
820 break;
821 case 2:
822 surface_format = ISL_FORMAT_R32G32_FLOAT;
823 break;
824 case 3:
825 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
826 break;
827 case 4:
828 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
829 break;
830 default:
831 unreachable("Invalid vector size for transform feedback output");
832 }
833
834 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
835 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
836 surface_format << BRW_SURFACE_FORMAT_SHIFT |
837 BRW_SURFACE_RC_READ_WRITE;
838 surf[1] = bo->offset64 + offset_bytes; /* reloc */
839 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
840 height << BRW_SURFACE_HEIGHT_SHIFT);
841 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
842 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
843 surf[4] = 0;
844 surf[5] = 0;
845
846 /* Emit relocation to surface contents. */
847 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, offset_bytes,
848 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
849 }
850
851 /* Creates a new WM constant buffer reflecting the current fragment program's
852 * constants, if needed by the fragment program.
853 *
854 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
855 * state atom.
856 */
857 static void
858 brw_upload_wm_pull_constants(struct brw_context *brw)
859 {
860 struct brw_stage_state *stage_state = &brw->wm.base;
861 /* BRW_NEW_FRAGMENT_PROGRAM */
862 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
863 /* BRW_NEW_FS_PROG_DATA */
864 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
865
866 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
867 /* _NEW_PROGRAM_CONSTANTS */
868 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
869 stage_state, prog_data);
870 }
871
872 const struct brw_tracked_state brw_wm_pull_constants = {
873 .dirty = {
874 .mesa = _NEW_PROGRAM_CONSTANTS,
875 .brw = BRW_NEW_BATCH |
876 BRW_NEW_BLORP |
877 BRW_NEW_FRAGMENT_PROGRAM |
878 BRW_NEW_FS_PROG_DATA,
879 },
880 .emit = brw_upload_wm_pull_constants,
881 };
882
883 /**
884 * Creates a null renderbuffer surface.
885 *
886 * This is used when the shader doesn't write to any color output. An FB
887 * write to target 0 will still be emitted, because that's how the thread is
888 * terminated (and computed depth is returned), so we need to have the
889 * hardware discard the target 0 color output..
890 */
891 static void
892 brw_emit_null_surface_state(struct brw_context *brw,
893 unsigned width,
894 unsigned height,
895 unsigned samples,
896 uint32_t *out_offset)
897 {
898 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
899 * Notes):
900 *
901 * A null surface will be used in instances where an actual surface is
902 * not bound. When a write message is generated to a null surface, no
903 * actual surface is written to. When a read message (including any
904 * sampling engine message) is generated to a null surface, the result
905 * is all zeros. Note that a null surface type is allowed to be used
906 * with all messages, even if it is not specificially indicated as
907 * supported. All of the remaining fields in surface state are ignored
908 * for null surfaces, with the following exceptions:
909 *
910 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
911 * depth buffer’s corresponding state for all render target surfaces,
912 * including null.
913 *
914 * - Surface Format must be R8G8B8A8_UNORM.
915 */
916 unsigned surface_type = BRW_SURFACE_NULL;
917 struct brw_bo *bo = NULL;
918 unsigned pitch_minus_1 = 0;
919 uint32_t multisampling_state = 0;
920 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
921
922 if (samples > 1) {
923 /* On Gen6, null render targets seem to cause GPU hangs when
924 * multisampling. So work around this problem by rendering into dummy
925 * color buffer.
926 *
927 * To decrease the amount of memory needed by the workaround buffer, we
928 * set its pitch to 128 bytes (the width of a Y tile). This means that
929 * the amount of memory needed for the workaround buffer is
930 * (width_in_tiles + height_in_tiles - 1) tiles.
931 *
932 * Note that since the workaround buffer will be interpreted by the
933 * hardware as an interleaved multisampled buffer, we need to compute
934 * width_in_tiles and height_in_tiles by dividing the width and height
935 * by 16 rather than the normal Y-tile size of 32.
936 */
937 unsigned width_in_tiles = ALIGN(width, 16) / 16;
938 unsigned height_in_tiles = ALIGN(height, 16) / 16;
939 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
940 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
941 size_needed);
942 bo = brw->wm.multisampled_null_render_target_bo;
943 surface_type = BRW_SURFACE_2D;
944 pitch_minus_1 = 127;
945 multisampling_state = brw_get_surface_num_multisamples(samples);
946 }
947
948 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
949 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
950 if (brw->gen < 6) {
951 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
952 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
953 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
954 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
955 }
956 surf[1] = bo ? bo->offset64 : 0;
957 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
958 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
959
960 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
961 * Notes):
962 *
963 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
964 */
965 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
966 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
967 surf[4] = multisampling_state;
968 surf[5] = 0;
969
970 if (bo) {
971 brw_emit_reloc(&brw->batch, *out_offset + 4, bo, 0,
972 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
973 }
974 }
975
976 /**
977 * Sets up a surface state structure to point at the given region.
978 * While it is only used for the front/back buffer currently, it should be
979 * usable for further buffers when doing ARB_draw_buffer support.
980 */
981 static uint32_t
982 gen4_update_renderbuffer_surface(struct brw_context *brw,
983 struct gl_renderbuffer *rb,
984 uint32_t flags, unsigned unit,
985 uint32_t surf_index)
986 {
987 struct gl_context *ctx = &brw->ctx;
988 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
989 struct intel_mipmap_tree *mt = irb->mt;
990 uint32_t *surf;
991 uint32_t tile_x, tile_y;
992 enum isl_format format;
993 uint32_t offset;
994 /* _NEW_BUFFERS */
995 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
996 /* BRW_NEW_FS_PROG_DATA */
997
998 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
999 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
1000
1001 if (rb->TexImage && !brw->has_surface_tile_offset) {
1002 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1003
1004 if (tile_x != 0 || tile_y != 0) {
1005 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1006 * destination in a miptree unless you actually setup your renderbuffer
1007 * as a miptree and used the fragile lod/array_index/etc. controls to
1008 * select the image. So, instead, we just make a new single-level
1009 * miptree and render into that.
1010 */
1011 intel_renderbuffer_move_to_temp(brw, irb, false);
1012 assert(irb->align_wa_mt);
1013 mt = irb->align_wa_mt;
1014 }
1015 }
1016
1017 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1018
1019 format = brw->mesa_to_isl_render_format[rb_format];
1020 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
1021 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1022 __func__, _mesa_get_format_name(rb_format));
1023 }
1024
1025 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1026 format << BRW_SURFACE_FORMAT_SHIFT);
1027
1028 /* reloc */
1029 assert(mt->offset % mt->cpp == 0);
1030 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1031 mt->bo->offset64 + mt->offset);
1032
1033 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1034 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1035
1036 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1037 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1038
1039 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1040
1041 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1042 /* Note that the low bits of these fields are missing, so
1043 * there's the possibility of getting in trouble.
1044 */
1045 assert(tile_x % 4 == 0);
1046 assert(tile_y % 2 == 0);
1047 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1048 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1049 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1050
1051 if (brw->gen < 6) {
1052 /* _NEW_COLOR */
1053 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1054 (ctx->Color.BlendEnabled & (1 << unit)))
1055 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1056
1057 if (!ctx->Color.ColorMask[unit][0])
1058 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1059 if (!ctx->Color.ColorMask[unit][1])
1060 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1061 if (!ctx->Color.ColorMask[unit][2])
1062 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1063
1064 /* As mentioned above, disable writes to the alpha component when the
1065 * renderbuffer is XRGB.
1066 */
1067 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1068 !ctx->Color.ColorMask[unit][3]) {
1069 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1070 }
1071 }
1072
1073 brw_emit_reloc(&brw->batch, offset + 4, mt->bo, surf[1] - mt->bo->offset64,
1074 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1075
1076 return offset;
1077 }
1078
1079 /**
1080 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1081 */
1082 void
1083 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1084 const struct gl_framebuffer *fb,
1085 uint32_t render_target_start,
1086 uint32_t *surf_offset)
1087 {
1088 GLuint i;
1089 const unsigned int w = _mesa_geometric_width(fb);
1090 const unsigned int h = _mesa_geometric_height(fb);
1091 const unsigned int s = _mesa_geometric_samples(fb);
1092
1093 /* Update surfaces for drawing buffers */
1094 if (fb->_NumColorDrawBuffers >= 1) {
1095 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1096 const uint32_t surf_index = render_target_start + i;
1097 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1098 INTEL_RENDERBUFFER_LAYERED : 0) |
1099 (brw->draw_aux_buffer_disabled[i] ?
1100 INTEL_AUX_BUFFER_DISABLED : 0);
1101
1102 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1103 surf_offset[surf_index] =
1104 brw->vtbl.update_renderbuffer_surface(
1105 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1106 } else {
1107 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1108 &surf_offset[surf_index]);
1109 }
1110 }
1111 } else {
1112 const uint32_t surf_index = render_target_start;
1113 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1114 &surf_offset[surf_index]);
1115 }
1116 }
1117
1118 static void
1119 update_renderbuffer_surfaces(struct brw_context *brw)
1120 {
1121 const struct gl_context *ctx = &brw->ctx;
1122
1123 /* BRW_NEW_FS_PROG_DATA */
1124 const struct brw_wm_prog_data *wm_prog_data =
1125 brw_wm_prog_data(brw->wm.base.prog_data);
1126
1127 /* _NEW_BUFFERS | _NEW_COLOR */
1128 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1129 brw_update_renderbuffer_surfaces(
1130 brw, fb,
1131 wm_prog_data->binding_table.render_target_start,
1132 brw->wm.base.surf_offset);
1133 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1134 }
1135
1136 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1137 .dirty = {
1138 .mesa = _NEW_BUFFERS |
1139 _NEW_COLOR,
1140 .brw = BRW_NEW_BATCH |
1141 BRW_NEW_BLORP |
1142 BRW_NEW_FS_PROG_DATA,
1143 },
1144 .emit = update_renderbuffer_surfaces,
1145 };
1146
1147 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1148 .dirty = {
1149 .mesa = _NEW_BUFFERS,
1150 .brw = BRW_NEW_BATCH |
1151 BRW_NEW_BLORP,
1152 },
1153 .emit = update_renderbuffer_surfaces,
1154 };
1155
1156 static void
1157 update_renderbuffer_read_surfaces(struct brw_context *brw)
1158 {
1159 const struct gl_context *ctx = &brw->ctx;
1160
1161 /* BRW_NEW_FS_PROG_DATA */
1162 const struct brw_wm_prog_data *wm_prog_data =
1163 brw_wm_prog_data(brw->wm.base.prog_data);
1164
1165 /* BRW_NEW_FRAGMENT_PROGRAM */
1166 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1167 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1168 /* _NEW_BUFFERS */
1169 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1170
1171 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1172 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1173 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1174 const unsigned surf_index =
1175 wm_prog_data->binding_table.render_target_read_start + i;
1176 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1177
1178 if (irb) {
1179 const enum isl_format format = brw->mesa_to_isl_render_format[
1180 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1181 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1182 format));
1183
1184 /* Override the target of the texture if the render buffer is a
1185 * single slice of a 3D texture (since the minimum array element
1186 * field of the surface state structure is ignored by the sampler
1187 * unit for 3D textures on some hardware), or if the render buffer
1188 * is a 1D array (since shaders always provide the array index
1189 * coordinate at the Z component to avoid state-dependent
1190 * recompiles when changing the texture target of the
1191 * framebuffer).
1192 */
1193 const GLenum target =
1194 (irb->mt->target == GL_TEXTURE_3D &&
1195 irb->layer_count == 1) ? GL_TEXTURE_2D :
1196 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1197 irb->mt->target;
1198
1199 const struct isl_view view = {
1200 .format = format,
1201 .base_level = irb->mt_level - irb->mt->first_level,
1202 .levels = 1,
1203 .base_array_layer = irb->mt_layer,
1204 .array_len = irb->layer_count,
1205 .swizzle = ISL_SWIZZLE_IDENTITY,
1206 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1207 };
1208
1209 const int flags = brw->draw_aux_buffer_disabled[i] ?
1210 INTEL_AUX_BUFFER_DISABLED : 0;
1211 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1212 tex_mocs[brw->gen],
1213 surf_offset, surf_index,
1214 I915_GEM_DOMAIN_SAMPLER, 0);
1215
1216 } else {
1217 brw->vtbl.emit_null_surface_state(
1218 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1219 _mesa_geometric_samples(fb), surf_offset);
1220 }
1221 }
1222
1223 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1224 }
1225 }
1226
1227 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1228 .dirty = {
1229 .mesa = _NEW_BUFFERS,
1230 .brw = BRW_NEW_BATCH |
1231 BRW_NEW_FRAGMENT_PROGRAM |
1232 BRW_NEW_FS_PROG_DATA,
1233 },
1234 .emit = update_renderbuffer_read_surfaces,
1235 };
1236
1237 static void
1238 update_stage_texture_surfaces(struct brw_context *brw,
1239 const struct gl_program *prog,
1240 struct brw_stage_state *stage_state,
1241 bool for_gather, uint32_t plane)
1242 {
1243 if (!prog)
1244 return;
1245
1246 struct gl_context *ctx = &brw->ctx;
1247
1248 uint32_t *surf_offset = stage_state->surf_offset;
1249
1250 /* BRW_NEW_*_PROG_DATA */
1251 if (for_gather)
1252 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1253 else
1254 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1255
1256 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1257 for (unsigned s = 0; s < num_samplers; s++) {
1258 surf_offset[s] = 0;
1259
1260 if (prog->SamplersUsed & (1 << s)) {
1261 const unsigned unit = prog->SamplerUnits[s];
1262
1263 /* _NEW_TEXTURE */
1264 if (ctx->Texture.Unit[unit]._Current) {
1265 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1266 }
1267 }
1268 }
1269 }
1270
1271
1272 /**
1273 * Construct SURFACE_STATE objects for enabled textures.
1274 */
1275 static void
1276 brw_update_texture_surfaces(struct brw_context *brw)
1277 {
1278 /* BRW_NEW_VERTEX_PROGRAM */
1279 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1280
1281 /* BRW_NEW_TESS_PROGRAMS */
1282 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1283 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1284
1285 /* BRW_NEW_GEOMETRY_PROGRAM */
1286 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1287
1288 /* BRW_NEW_FRAGMENT_PROGRAM */
1289 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1290
1291 /* _NEW_TEXTURE */
1292 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1293 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1294 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1295 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1296 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1297
1298 /* emit alternate set of surface state for gather. this
1299 * allows the surface format to be overriden for only the
1300 * gather4 messages. */
1301 if (brw->gen < 8) {
1302 if (vs && vs->nir->info.uses_texture_gather)
1303 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1304 if (tcs && tcs->nir->info.uses_texture_gather)
1305 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1306 if (tes && tes->nir->info.uses_texture_gather)
1307 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1308 if (gs && gs->nir->info.uses_texture_gather)
1309 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1310 if (fs && fs->nir->info.uses_texture_gather)
1311 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1312 }
1313
1314 if (fs) {
1315 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1316 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1317 }
1318
1319 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1320 }
1321
1322 const struct brw_tracked_state brw_texture_surfaces = {
1323 .dirty = {
1324 .mesa = _NEW_TEXTURE,
1325 .brw = BRW_NEW_BATCH |
1326 BRW_NEW_BLORP |
1327 BRW_NEW_FRAGMENT_PROGRAM |
1328 BRW_NEW_FS_PROG_DATA |
1329 BRW_NEW_GEOMETRY_PROGRAM |
1330 BRW_NEW_GS_PROG_DATA |
1331 BRW_NEW_TESS_PROGRAMS |
1332 BRW_NEW_TCS_PROG_DATA |
1333 BRW_NEW_TES_PROG_DATA |
1334 BRW_NEW_TEXTURE_BUFFER |
1335 BRW_NEW_VERTEX_PROGRAM |
1336 BRW_NEW_VS_PROG_DATA,
1337 },
1338 .emit = brw_update_texture_surfaces,
1339 };
1340
1341 static void
1342 brw_update_cs_texture_surfaces(struct brw_context *brw)
1343 {
1344 /* BRW_NEW_COMPUTE_PROGRAM */
1345 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1346
1347 /* _NEW_TEXTURE */
1348 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1349
1350 /* emit alternate set of surface state for gather. this
1351 * allows the surface format to be overriden for only the
1352 * gather4 messages.
1353 */
1354 if (brw->gen < 8) {
1355 if (cs && cs->nir->info.uses_texture_gather)
1356 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1357 }
1358
1359 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1360 }
1361
1362 const struct brw_tracked_state brw_cs_texture_surfaces = {
1363 .dirty = {
1364 .mesa = _NEW_TEXTURE,
1365 .brw = BRW_NEW_BATCH |
1366 BRW_NEW_BLORP |
1367 BRW_NEW_COMPUTE_PROGRAM,
1368 },
1369 .emit = brw_update_cs_texture_surfaces,
1370 };
1371
1372
1373 void
1374 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1375 struct brw_stage_state *stage_state,
1376 struct brw_stage_prog_data *prog_data)
1377 {
1378 struct gl_context *ctx = &brw->ctx;
1379
1380 if (!prog)
1381 return;
1382
1383 uint32_t *ubo_surf_offsets =
1384 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1385
1386 for (int i = 0; i < prog->info.num_ubos; i++) {
1387 struct gl_uniform_buffer_binding *binding =
1388 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1389
1390 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1391 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1392 } else {
1393 struct intel_buffer_object *intel_bo =
1394 intel_buffer_object(binding->BufferObject);
1395 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1396 if (!binding->AutomaticSize)
1397 size = MIN2(size, binding->Size);
1398 struct brw_bo *bo =
1399 intel_bufferobj_buffer(brw, intel_bo,
1400 binding->Offset,
1401 size, false);
1402 brw_create_constant_surface(brw, bo, binding->Offset,
1403 size,
1404 &ubo_surf_offsets[i]);
1405 }
1406 }
1407
1408 uint32_t *ssbo_surf_offsets =
1409 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1410
1411 for (int i = 0; i < prog->info.num_ssbos; i++) {
1412 struct gl_shader_storage_buffer_binding *binding =
1413 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1414
1415 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1416 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1417 } else {
1418 struct intel_buffer_object *intel_bo =
1419 intel_buffer_object(binding->BufferObject);
1420 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1421 if (!binding->AutomaticSize)
1422 size = MIN2(size, binding->Size);
1423 struct brw_bo *bo =
1424 intel_bufferobj_buffer(brw, intel_bo,
1425 binding->Offset,
1426 size, true);
1427 brw_create_buffer_surface(brw, bo, binding->Offset,
1428 size,
1429 &ssbo_surf_offsets[i]);
1430 }
1431 }
1432
1433 stage_state->push_constants_dirty = true;
1434
1435 if (prog->info.num_ubos || prog->info.num_ssbos)
1436 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1437 }
1438
1439 static void
1440 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1441 {
1442 struct gl_context *ctx = &brw->ctx;
1443 /* _NEW_PROGRAM */
1444 struct gl_program *prog = ctx->FragmentProgram._Current;
1445
1446 /* BRW_NEW_FS_PROG_DATA */
1447 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1448 }
1449
1450 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1451 .dirty = {
1452 .mesa = _NEW_PROGRAM,
1453 .brw = BRW_NEW_BATCH |
1454 BRW_NEW_BLORP |
1455 BRW_NEW_FS_PROG_DATA |
1456 BRW_NEW_UNIFORM_BUFFER,
1457 },
1458 .emit = brw_upload_wm_ubo_surfaces,
1459 };
1460
1461 static void
1462 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1463 {
1464 struct gl_context *ctx = &brw->ctx;
1465 /* _NEW_PROGRAM */
1466 struct gl_program *prog =
1467 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1468
1469 /* BRW_NEW_CS_PROG_DATA */
1470 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1471 }
1472
1473 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1474 .dirty = {
1475 .mesa = _NEW_PROGRAM,
1476 .brw = BRW_NEW_BATCH |
1477 BRW_NEW_BLORP |
1478 BRW_NEW_CS_PROG_DATA |
1479 BRW_NEW_UNIFORM_BUFFER,
1480 },
1481 .emit = brw_upload_cs_ubo_surfaces,
1482 };
1483
1484 void
1485 brw_upload_abo_surfaces(struct brw_context *brw,
1486 const struct gl_program *prog,
1487 struct brw_stage_state *stage_state,
1488 struct brw_stage_prog_data *prog_data)
1489 {
1490 struct gl_context *ctx = &brw->ctx;
1491 uint32_t *surf_offsets =
1492 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1493
1494 if (prog->info.num_abos) {
1495 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1496 struct gl_atomic_buffer_binding *binding =
1497 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1498 struct intel_buffer_object *intel_bo =
1499 intel_buffer_object(binding->BufferObject);
1500 struct brw_bo *bo =
1501 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1502 intel_bo->Base.Size - binding->Offset,
1503 true);
1504
1505 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1506 binding->Offset, ISL_FORMAT_RAW,
1507 bo->size - binding->Offset, 1, true);
1508 }
1509
1510 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1511 }
1512 }
1513
1514 static void
1515 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1516 {
1517 /* _NEW_PROGRAM */
1518 const struct gl_program *wm = brw->fragment_program;
1519
1520 if (wm) {
1521 /* BRW_NEW_FS_PROG_DATA */
1522 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1523 }
1524 }
1525
1526 const struct brw_tracked_state brw_wm_abo_surfaces = {
1527 .dirty = {
1528 .mesa = _NEW_PROGRAM,
1529 .brw = BRW_NEW_ATOMIC_BUFFER |
1530 BRW_NEW_BLORP |
1531 BRW_NEW_BATCH |
1532 BRW_NEW_FS_PROG_DATA,
1533 },
1534 .emit = brw_upload_wm_abo_surfaces,
1535 };
1536
1537 static void
1538 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1539 {
1540 /* _NEW_PROGRAM */
1541 const struct gl_program *cp = brw->compute_program;
1542
1543 if (cp) {
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1546 }
1547 }
1548
1549 const struct brw_tracked_state brw_cs_abo_surfaces = {
1550 .dirty = {
1551 .mesa = _NEW_PROGRAM,
1552 .brw = BRW_NEW_ATOMIC_BUFFER |
1553 BRW_NEW_BLORP |
1554 BRW_NEW_BATCH |
1555 BRW_NEW_CS_PROG_DATA,
1556 },
1557 .emit = brw_upload_cs_abo_surfaces,
1558 };
1559
1560 static void
1561 brw_upload_cs_image_surfaces(struct brw_context *brw)
1562 {
1563 /* _NEW_PROGRAM */
1564 const struct gl_program *cp = brw->compute_program;
1565
1566 if (cp) {
1567 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1568 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1569 brw->cs.base.prog_data);
1570 }
1571 }
1572
1573 const struct brw_tracked_state brw_cs_image_surfaces = {
1574 .dirty = {
1575 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1576 .brw = BRW_NEW_BATCH |
1577 BRW_NEW_BLORP |
1578 BRW_NEW_CS_PROG_DATA |
1579 BRW_NEW_IMAGE_UNITS
1580 },
1581 .emit = brw_upload_cs_image_surfaces,
1582 };
1583
1584 static uint32_t
1585 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1586 {
1587 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1588 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1589 if (access == GL_WRITE_ONLY) {
1590 return hw_format;
1591 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1592 /* Typed surface reads support a very limited subset of the shader
1593 * image formats. Translate it into the closest format the
1594 * hardware supports.
1595 */
1596 return isl_lower_storage_image_format(devinfo, hw_format);
1597 } else {
1598 /* The hardware doesn't actually support a typed format that we can use
1599 * so we have to fall back to untyped read/write messages.
1600 */
1601 return ISL_FORMAT_RAW;
1602 }
1603 }
1604
1605 static void
1606 update_default_image_param(struct brw_context *brw,
1607 struct gl_image_unit *u,
1608 unsigned surface_idx,
1609 struct brw_image_param *param)
1610 {
1611 memset(param, 0, sizeof(*param));
1612 param->surface_idx = surface_idx;
1613 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1614 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1615 * detailed explanation of these parameters.
1616 */
1617 param->swizzling[0] = 0xff;
1618 param->swizzling[1] = 0xff;
1619 }
1620
1621 static void
1622 update_buffer_image_param(struct brw_context *brw,
1623 struct gl_image_unit *u,
1624 unsigned surface_idx,
1625 struct brw_image_param *param)
1626 {
1627 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1628 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1629 update_default_image_param(brw, u, surface_idx, param);
1630
1631 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1632 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1633 }
1634
1635 static void
1636 update_image_surface(struct brw_context *brw,
1637 struct gl_image_unit *u,
1638 GLenum access,
1639 unsigned surface_idx,
1640 uint32_t *surf_offset,
1641 struct brw_image_param *param)
1642 {
1643 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1644 struct gl_texture_object *obj = u->TexObj;
1645 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1646
1647 if (obj->Target == GL_TEXTURE_BUFFER) {
1648 struct intel_buffer_object *intel_obj =
1649 intel_buffer_object(obj->BufferObject);
1650 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1651 _mesa_get_format_bytes(u->_ActualFormat));
1652
1653 brw_emit_buffer_surface_state(
1654 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1655 format, intel_obj->Base.Size, texel_size,
1656 access != GL_READ_ONLY);
1657
1658 update_buffer_image_param(brw, u, surface_idx, param);
1659
1660 } else {
1661 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1662 struct intel_mipmap_tree *mt = intel_obj->mt;
1663 const unsigned num_layers = (!u->Layered ? 1 :
1664 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1665 mt->logical_depth0);
1666
1667 struct isl_view view = {
1668 .format = format,
1669 .base_level = obj->MinLevel + u->Level,
1670 .levels = 1,
1671 .base_array_layer = obj->MinLayer + u->_Layer,
1672 .array_len = num_layers,
1673 .swizzle = ISL_SWIZZLE_IDENTITY,
1674 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1675 };
1676
1677 if (format == ISL_FORMAT_RAW) {
1678 brw_emit_buffer_surface_state(
1679 brw, surf_offset, mt->bo, mt->offset,
1680 format, mt->bo->size - mt->offset, 1 /* pitch */,
1681 access != GL_READ_ONLY);
1682
1683 } else {
1684 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1685 assert(!intel_miptree_has_color_unresolved(mt,
1686 view.base_level, 1,
1687 view.base_array_layer,
1688 view.array_len));
1689 brw_emit_surface_state(brw, mt, INTEL_AUX_BUFFER_DISABLED,
1690 mt->target, view, tex_mocs[brw->gen],
1691 surf_offset, surf_index,
1692 I915_GEM_DOMAIN_SAMPLER,
1693 access == GL_READ_ONLY ? 0 :
1694 I915_GEM_DOMAIN_SAMPLER);
1695 }
1696
1697 struct isl_surf surf;
1698 intel_miptree_get_isl_surf(brw, mt, &surf);
1699
1700 isl_surf_fill_image_param(&brw->isl_dev, param, &surf, &view);
1701 param->surface_idx = surface_idx;
1702 }
1703
1704 } else {
1705 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1706 update_default_image_param(brw, u, surface_idx, param);
1707 }
1708 }
1709
1710 void
1711 brw_upload_image_surfaces(struct brw_context *brw,
1712 const struct gl_program *prog,
1713 struct brw_stage_state *stage_state,
1714 struct brw_stage_prog_data *prog_data)
1715 {
1716 assert(prog);
1717 struct gl_context *ctx = &brw->ctx;
1718
1719 if (prog->info.num_images) {
1720 for (unsigned i = 0; i < prog->info.num_images; i++) {
1721 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1722 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1723
1724 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1725 surf_idx,
1726 &stage_state->surf_offset[surf_idx],
1727 &prog_data->image_param[i]);
1728 }
1729
1730 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1731 /* This may have changed the image metadata dependent on the context
1732 * image unit state and passed to the program as uniforms, make sure
1733 * that push and pull constants are reuploaded.
1734 */
1735 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1736 }
1737 }
1738
1739 static void
1740 brw_upload_wm_image_surfaces(struct brw_context *brw)
1741 {
1742 /* BRW_NEW_FRAGMENT_PROGRAM */
1743 const struct gl_program *wm = brw->fragment_program;
1744
1745 if (wm) {
1746 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1747 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1748 brw->wm.base.prog_data);
1749 }
1750 }
1751
1752 const struct brw_tracked_state brw_wm_image_surfaces = {
1753 .dirty = {
1754 .mesa = _NEW_TEXTURE,
1755 .brw = BRW_NEW_BATCH |
1756 BRW_NEW_BLORP |
1757 BRW_NEW_FRAGMENT_PROGRAM |
1758 BRW_NEW_FS_PROG_DATA |
1759 BRW_NEW_IMAGE_UNITS
1760 },
1761 .emit = brw_upload_wm_image_surfaces,
1762 };
1763
1764 void
1765 gen4_init_vtable_surface_functions(struct brw_context *brw)
1766 {
1767 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1768 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1769 }
1770
1771 void
1772 gen6_init_vtable_surface_functions(struct brw_context *brw)
1773 {
1774 gen4_init_vtable_surface_functions(brw);
1775 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1776 }
1777
1778 static void
1779 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1780 {
1781 struct gl_context *ctx = &brw->ctx;
1782 /* _NEW_PROGRAM */
1783 struct gl_program *prog =
1784 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1785 /* BRW_NEW_CS_PROG_DATA */
1786 const struct brw_cs_prog_data *cs_prog_data =
1787 brw_cs_prog_data(brw->cs.base.prog_data);
1788
1789 if (prog && cs_prog_data->uses_num_work_groups) {
1790 const unsigned surf_idx =
1791 cs_prog_data->binding_table.work_groups_start;
1792 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1793 struct brw_bo *bo;
1794 uint32_t bo_offset;
1795
1796 if (brw->compute.num_work_groups_bo == NULL) {
1797 bo = NULL;
1798 intel_upload_data(brw,
1799 (void *)brw->compute.num_work_groups,
1800 3 * sizeof(GLuint),
1801 sizeof(GLuint),
1802 &bo,
1803 &bo_offset);
1804 } else {
1805 bo = brw->compute.num_work_groups_bo;
1806 bo_offset = brw->compute.num_work_groups_offset;
1807 }
1808
1809 brw_emit_buffer_surface_state(brw, surf_offset,
1810 bo, bo_offset,
1811 ISL_FORMAT_RAW,
1812 3 * sizeof(GLuint), 1, true);
1813 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1814 }
1815 }
1816
1817 const struct brw_tracked_state brw_cs_work_groups_surface = {
1818 .dirty = {
1819 .brw = BRW_NEW_BLORP |
1820 BRW_NEW_CS_PROG_DATA |
1821 BRW_NEW_CS_WORK_GROUPS
1822 },
1823 .emit = brw_upload_cs_work_groups_surface,
1824 };