1d4953e64aded801829d028535c74203668131b5
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = brw_state_batch(brw,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 drm_intel_bo_emit_reloc(brw->batch.bo,
172 *surf_offset + brw->isl_dev.ss.addr_offset,
173 mt->bo, offset,
174 read_domains, write_domains);
175
176 if (aux_surf) {
177 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
178 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
179 * contain other control information. Since buffer addresses are always
180 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
181 * an ordinary reloc to do the necessary address translation.
182 */
183 assert((aux_offset & 0xfff) == 0);
184 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
185 drm_intel_bo_emit_reloc(brw->batch.bo,
186 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
187 aux_bo, *aux_addr - aux_bo->offset64,
188 read_domains, write_domains);
189 }
190 }
191
192 uint32_t
193 brw_update_renderbuffer_surface(struct brw_context *brw,
194 struct gl_renderbuffer *rb,
195 uint32_t flags, unsigned unit /* unused */,
196 uint32_t surf_index)
197 {
198 struct gl_context *ctx = &brw->ctx;
199 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
200 struct intel_mipmap_tree *mt = irb->mt;
201
202 if (brw->gen < 9) {
203 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
204 }
205
206 assert(brw_render_target_supported(brw, rb));
207
208 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
209 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
210 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
211 __func__, _mesa_get_format_name(rb_format));
212 }
213
214 const unsigned layer_multiplier =
215 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
216 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
217 MAX2(irb->mt->num_samples, 1) : 1;
218
219 struct isl_view view = {
220 .format = brw->render_target_format[rb_format],
221 .base_level = irb->mt_level - irb->mt->first_level,
222 .levels = 1,
223 .base_array_layer = irb->mt_layer / layer_multiplier,
224 .array_len = MAX2(irb->layer_count, 1),
225 .swizzle = ISL_SWIZZLE_IDENTITY,
226 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
227 };
228
229 uint32_t offset;
230 brw_emit_surface_state(brw, mt, flags, mt->target, view,
231 rb_mocs[brw->gen],
232 &offset, surf_index,
233 I915_GEM_DOMAIN_RENDER,
234 I915_GEM_DOMAIN_RENDER);
235 return offset;
236 }
237
238 GLuint
239 translate_tex_target(GLenum target)
240 {
241 switch (target) {
242 case GL_TEXTURE_1D:
243 case GL_TEXTURE_1D_ARRAY_EXT:
244 return BRW_SURFACE_1D;
245
246 case GL_TEXTURE_RECTANGLE_NV:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_2D:
250 case GL_TEXTURE_2D_ARRAY_EXT:
251 case GL_TEXTURE_EXTERNAL_OES:
252 case GL_TEXTURE_2D_MULTISAMPLE:
253 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
254 return BRW_SURFACE_2D;
255
256 case GL_TEXTURE_3D:
257 return BRW_SURFACE_3D;
258
259 case GL_TEXTURE_CUBE_MAP:
260 case GL_TEXTURE_CUBE_MAP_ARRAY:
261 return BRW_SURFACE_CUBE;
262
263 default:
264 unreachable("not reached");
265 }
266 }
267
268 uint32_t
269 brw_get_surface_tiling_bits(uint32_t tiling)
270 {
271 switch (tiling) {
272 case I915_TILING_X:
273 return BRW_SURFACE_TILED;
274 case I915_TILING_Y:
275 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
276 default:
277 return 0;
278 }
279 }
280
281
282 uint32_t
283 brw_get_surface_num_multisamples(unsigned num_samples)
284 {
285 if (num_samples > 1)
286 return BRW_SURFACE_MULTISAMPLECOUNT_4;
287 else
288 return BRW_SURFACE_MULTISAMPLECOUNT_1;
289 }
290
291 /**
292 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
293 * swizzling.
294 */
295 int
296 brw_get_texture_swizzle(const struct gl_context *ctx,
297 const struct gl_texture_object *t)
298 {
299 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
300
301 int swizzles[SWIZZLE_NIL + 1] = {
302 SWIZZLE_X,
303 SWIZZLE_Y,
304 SWIZZLE_Z,
305 SWIZZLE_W,
306 SWIZZLE_ZERO,
307 SWIZZLE_ONE,
308 SWIZZLE_NIL
309 };
310
311 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
312 img->_BaseFormat == GL_DEPTH_STENCIL) {
313 GLenum depth_mode = t->DepthMode;
314
315 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316 * with depth component data specified with a sized internal format.
317 * Otherwise, it's left at the old default, GL_LUMINANCE.
318 */
319 if (_mesa_is_gles3(ctx) &&
320 img->InternalFormat != GL_DEPTH_COMPONENT &&
321 img->InternalFormat != GL_DEPTH_STENCIL) {
322 depth_mode = GL_RED;
323 }
324
325 switch (depth_mode) {
326 case GL_ALPHA:
327 swizzles[0] = SWIZZLE_ZERO;
328 swizzles[1] = SWIZZLE_ZERO;
329 swizzles[2] = SWIZZLE_ZERO;
330 swizzles[3] = SWIZZLE_X;
331 break;
332 case GL_LUMINANCE:
333 swizzles[0] = SWIZZLE_X;
334 swizzles[1] = SWIZZLE_X;
335 swizzles[2] = SWIZZLE_X;
336 swizzles[3] = SWIZZLE_ONE;
337 break;
338 case GL_INTENSITY:
339 swizzles[0] = SWIZZLE_X;
340 swizzles[1] = SWIZZLE_X;
341 swizzles[2] = SWIZZLE_X;
342 swizzles[3] = SWIZZLE_X;
343 break;
344 case GL_RED:
345 swizzles[0] = SWIZZLE_X;
346 swizzles[1] = SWIZZLE_ZERO;
347 swizzles[2] = SWIZZLE_ZERO;
348 swizzles[3] = SWIZZLE_ONE;
349 break;
350 }
351 }
352
353 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
354
355 /* If the texture's format is alpha-only, force R, G, and B to
356 * 0.0. Similarly, if the texture's format has no alpha channel,
357 * force the alpha value read to 1.0. This allows for the
358 * implementation to use an RGBA texture for any of these formats
359 * without leaking any unexpected values.
360 */
361 switch (img->_BaseFormat) {
362 case GL_ALPHA:
363 swizzles[0] = SWIZZLE_ZERO;
364 swizzles[1] = SWIZZLE_ZERO;
365 swizzles[2] = SWIZZLE_ZERO;
366 break;
367 case GL_LUMINANCE:
368 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_ONE;
373 }
374 break;
375 case GL_LUMINANCE_ALPHA:
376 if (datatype == GL_SIGNED_NORMALIZED) {
377 swizzles[0] = SWIZZLE_X;
378 swizzles[1] = SWIZZLE_X;
379 swizzles[2] = SWIZZLE_X;
380 swizzles[3] = SWIZZLE_W;
381 }
382 break;
383 case GL_INTENSITY:
384 if (datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_X;
389 }
390 break;
391 case GL_RED:
392 case GL_RG:
393 case GL_RGB:
394 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
395 swizzles[3] = SWIZZLE_ONE;
396 break;
397 }
398
399 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400 swizzles[GET_SWZ(t->_Swizzle, 1)],
401 swizzles[GET_SWZ(t->_Swizzle, 2)],
402 swizzles[GET_SWZ(t->_Swizzle, 3)]);
403 }
404
405 /**
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
408 *
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * 0 1 2 3 4 5
411 * 4 5 6 7 0 1
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
413 *
414 * which is simply adding 4 then modding by 8 (or anding with 7).
415 *
416 * We then may need to apply workarounds for textureGather hardware bugs.
417 */
418 static unsigned
419 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
420 {
421 unsigned scs = (swizzle + 4) & 7;
422
423 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
424 }
425
426 static unsigned
427 brw_find_matching_rb(const struct gl_framebuffer *fb,
428 const struct intel_mipmap_tree *mt)
429 {
430 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431 const struct intel_renderbuffer *irb =
432 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433
434 if (irb && irb->mt == mt)
435 return i;
436 }
437
438 return fb->_NumColorDrawBuffers;
439 }
440
441 static inline bool
442 brw_texture_view_sane(const struct brw_context *brw,
443 const struct intel_mipmap_tree *mt,
444 const struct isl_view *view)
445 {
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw, mt))
448 return true;
449
450 if (isl_format_supports_ccs_e(&brw->screen->devinfo, view->format))
451 return true;
452
453 /* Logic elsewhere needs to take care to resolve the color buffer prior
454 * to sampling it as non-compressed.
455 */
456 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
457 view->base_array_layer,
458 view->array_len))
459 return false;
460
461 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
462 const unsigned rb_index = brw_find_matching_rb(fb, mt);
463
464 if (rb_index == fb->_NumColorDrawBuffers)
465 return true;
466
467 /* Underlying surface is compressed but it is sampled using a format that
468 * the sampling engine doesn't support as compressed. Compression must be
469 * disabled for both sampling engine and data port in case the same surface
470 * is used also as render target.
471 */
472 return brw->draw_aux_buffer_disabled[rb_index];
473 }
474
475 static bool
476 brw_disable_aux_surface(const struct brw_context *brw,
477 const struct intel_mipmap_tree *mt,
478 const struct isl_view *view)
479 {
480 /* Nothing to disable. */
481 if (!mt->mcs_buf)
482 return false;
483
484 const bool is_unresolved = intel_miptree_has_color_unresolved(
485 mt, view->base_level, view->levels,
486 view->base_array_layer, view->array_len);
487
488 /* There are special cases only for lossless compression. */
489 if (!intel_miptree_is_lossless_compressed(brw, mt))
490 return !is_unresolved;
491
492 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
493 const unsigned rb_index = brw_find_matching_rb(fb, mt);
494
495 /* If we are drawing into this with compression enabled, then we must also
496 * enable compression when texturing from it regardless of
497 * fast_clear_state. If we don't then, after the first draw call with
498 * this setup, there will be data in the CCS which won't get picked up by
499 * subsequent texturing operations as required by ARB_texture_barrier.
500 * Since we don't want to re-emit the binding table or do a resolve
501 * operation every draw call, the easiest thing to do is just enable
502 * compression on the texturing side. This is completely safe to do
503 * since, if compressed texturing weren't allowed, we would have disabled
504 * compression of render targets in whatever_that_function_is_called().
505 */
506 if (rb_index < fb->_NumColorDrawBuffers) {
507 if (brw->draw_aux_buffer_disabled[rb_index]) {
508 assert(!is_unresolved);
509 }
510
511 return brw->draw_aux_buffer_disabled[rb_index];
512 }
513
514 return !is_unresolved;
515 }
516
517 void
518 brw_update_texture_surface(struct gl_context *ctx,
519 unsigned unit,
520 uint32_t *surf_offset,
521 bool for_gather,
522 uint32_t plane)
523 {
524 struct brw_context *brw = brw_context(ctx);
525 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
526
527 if (obj->Target == GL_TEXTURE_BUFFER) {
528 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
529
530 } else {
531 struct intel_texture_object *intel_obj = intel_texture_object(obj);
532 struct intel_mipmap_tree *mt = intel_obj->mt;
533
534 if (plane > 0) {
535 if (mt->plane[plane - 1] == NULL)
536 return;
537 mt = mt->plane[plane - 1];
538 }
539
540 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
541 /* If this is a view with restricted NumLayers, then our effective depth
542 * is not just the miptree depth.
543 */
544 const unsigned view_num_layers =
545 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
546 mt->logical_depth0;
547
548 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
549 * texturing functions that return a float, as our code generation always
550 * selects the .x channel (which would always be 0).
551 */
552 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
553 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
554 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
555 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
556 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
557 brw_get_texture_swizzle(&brw->ctx, obj));
558
559 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
560 unsigned format = translate_tex_format(brw, mesa_fmt,
561 sampler->sRGBDecode);
562
563 /* Implement gen6 and gen7 gather work-around */
564 bool need_green_to_blue = false;
565 if (for_gather) {
566 if (brw->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
567 format == ISL_FORMAT_R32G32_SINT ||
568 format == ISL_FORMAT_R32G32_UINT)) {
569 format = ISL_FORMAT_R32G32_FLOAT_LD;
570 need_green_to_blue = brw->is_haswell;
571 } else if (brw->gen == 6) {
572 /* Sandybridge's gather4 message is broken for integer formats.
573 * To work around this, we pretend the surface is UNORM for
574 * 8 or 16-bit formats, and emit shader instructions to recover
575 * the real INT/UINT value. For 32-bit formats, we pretend
576 * the surface is FLOAT, and simply reinterpret the resulting
577 * bits.
578 */
579 switch (format) {
580 case ISL_FORMAT_R8_SINT:
581 case ISL_FORMAT_R8_UINT:
582 format = ISL_FORMAT_R8_UNORM;
583 break;
584
585 case ISL_FORMAT_R16_SINT:
586 case ISL_FORMAT_R16_UINT:
587 format = ISL_FORMAT_R16_UNORM;
588 break;
589
590 case ISL_FORMAT_R32_SINT:
591 case ISL_FORMAT_R32_UINT:
592 format = ISL_FORMAT_R32_FLOAT;
593 break;
594
595 default:
596 break;
597 }
598 }
599 }
600
601 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
602 if (brw->gen <= 7) {
603 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
604 mt = mt->r8stencil_mt;
605 } else {
606 mt = mt->stencil_mt;
607 }
608 format = ISL_FORMAT_R8_UINT;
609 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
610 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
611 mt = mt->r8stencil_mt;
612 format = ISL_FORMAT_R8_UINT;
613 }
614
615 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
616
617 struct isl_view view = {
618 .format = format,
619 .base_level = obj->MinLevel + obj->BaseLevel,
620 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
621 .base_array_layer = obj->MinLayer,
622 .array_len = view_num_layers,
623 .swizzle = {
624 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
625 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
626 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
627 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
628 },
629 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
630 };
631
632 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
633 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
634 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
635
636 assert(brw_texture_view_sane(brw, mt, &view));
637
638 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
639 INTEL_AUX_BUFFER_DISABLED : 0;
640 brw_emit_surface_state(brw, mt, flags, mt->target, view,
641 tex_mocs[brw->gen],
642 surf_offset, surf_index,
643 I915_GEM_DOMAIN_SAMPLER, 0);
644 }
645 }
646
647 void
648 brw_emit_buffer_surface_state(struct brw_context *brw,
649 uint32_t *out_offset,
650 drm_intel_bo *bo,
651 unsigned buffer_offset,
652 unsigned surface_format,
653 unsigned buffer_size,
654 unsigned pitch,
655 bool rw)
656 {
657 uint32_t *dw = brw_state_batch(brw,
658 brw->isl_dev.ss.size,
659 brw->isl_dev.ss.align,
660 out_offset);
661
662 isl_buffer_fill_state(&brw->isl_dev, dw,
663 .address = (bo ? bo->offset64 : 0) + buffer_offset,
664 .size = buffer_size,
665 .format = surface_format,
666 .stride = pitch,
667 .mocs = tex_mocs[brw->gen]);
668
669 if (bo) {
670 drm_intel_bo_emit_reloc(brw->batch.bo,
671 *out_offset + brw->isl_dev.ss.addr_offset,
672 bo, buffer_offset,
673 I915_GEM_DOMAIN_SAMPLER,
674 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
675 }
676 }
677
678 void
679 brw_update_buffer_texture_surface(struct gl_context *ctx,
680 unsigned unit,
681 uint32_t *surf_offset)
682 {
683 struct brw_context *brw = brw_context(ctx);
684 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
685 struct intel_buffer_object *intel_obj =
686 intel_buffer_object(tObj->BufferObject);
687 uint32_t size = tObj->BufferSize;
688 drm_intel_bo *bo = NULL;
689 mesa_format format = tObj->_BufferObjectFormat;
690 uint32_t brw_format = brw_isl_format_for_mesa_format(format);
691 int texel_size = _mesa_get_format_bytes(format);
692
693 if (intel_obj) {
694 size = MIN2(size, intel_obj->Base.Size);
695 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
696 }
697
698 /* The ARB_texture_buffer_specification says:
699 *
700 * "The number of texels in the buffer texture's texel array is given by
701 *
702 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
703 *
704 * where <buffer_size> is the size of the buffer object, in basic
705 * machine units and <components> and <base_type> are the element count
706 * and base data type for elements, as specified in Table X.1. The
707 * number of texels in the texel array is then clamped to the
708 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
709 *
710 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
711 * so that when ISL divides by stride to obtain the number of texels, that
712 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
713 */
714 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
715
716 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
717 _mesa_problem(NULL, "bad format %s for texture buffer\n",
718 _mesa_get_format_name(format));
719 }
720
721 brw_emit_buffer_surface_state(brw, surf_offset, bo,
722 tObj->BufferOffset,
723 brw_format,
724 size,
725 texel_size,
726 false /* rw */);
727 }
728
729 /**
730 * Create the constant buffer surface. Vertex/fragment shader constants will be
731 * read from this buffer with Data Port Read instructions/messages.
732 */
733 void
734 brw_create_constant_surface(struct brw_context *brw,
735 drm_intel_bo *bo,
736 uint32_t offset,
737 uint32_t size,
738 uint32_t *out_offset)
739 {
740 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
741 ISL_FORMAT_R32G32B32A32_FLOAT,
742 size, 1, false);
743 }
744
745 /**
746 * Create the buffer surface. Shader buffer variables will be
747 * read from / write to this buffer with Data Port Read/Write
748 * instructions/messages.
749 */
750 void
751 brw_create_buffer_surface(struct brw_context *brw,
752 drm_intel_bo *bo,
753 uint32_t offset,
754 uint32_t size,
755 uint32_t *out_offset)
756 {
757 /* Use a raw surface so we can reuse existing untyped read/write/atomic
758 * messages. We need these specifically for the fragment shader since they
759 * include a pixel mask header that we need to ensure correct behavior
760 * with helper invocations, which cannot write to the buffer.
761 */
762 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
763 ISL_FORMAT_RAW,
764 size, 1, true);
765 }
766
767 /**
768 * Set up a binding table entry for use by stream output logic (transform
769 * feedback).
770 *
771 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
772 */
773 void
774 brw_update_sol_surface(struct brw_context *brw,
775 struct gl_buffer_object *buffer_obj,
776 uint32_t *out_offset, unsigned num_vector_components,
777 unsigned stride_dwords, unsigned offset_dwords)
778 {
779 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
780 uint32_t offset_bytes = 4 * offset_dwords;
781 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
782 offset_bytes,
783 buffer_obj->Size - offset_bytes);
784 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
785 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
786 size_t size_dwords = buffer_obj->Size / 4;
787 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
788
789 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
790 * too big to map using a single binding table entry?
791 */
792 assert((size_dwords - offset_dwords) / stride_dwords
793 <= BRW_MAX_NUM_BUFFER_ENTRIES);
794
795 if (size_dwords > offset_dwords + num_vector_components) {
796 /* There is room for at least 1 transform feedback output in the buffer.
797 * Compute the number of additional transform feedback outputs the
798 * buffer has room for.
799 */
800 buffer_size_minus_1 =
801 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
802 } else {
803 /* There isn't even room for a single transform feedback output in the
804 * buffer. We can't configure the binding table entry to prevent output
805 * entirely; we'll have to rely on the geometry shader to detect
806 * overflow. But to minimize the damage in case of a bug, set up the
807 * binding table entry to just allow a single output.
808 */
809 buffer_size_minus_1 = 0;
810 }
811 width = buffer_size_minus_1 & 0x7f;
812 height = (buffer_size_minus_1 & 0xfff80) >> 7;
813 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
814
815 switch (num_vector_components) {
816 case 1:
817 surface_format = ISL_FORMAT_R32_FLOAT;
818 break;
819 case 2:
820 surface_format = ISL_FORMAT_R32G32_FLOAT;
821 break;
822 case 3:
823 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
824 break;
825 case 4:
826 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
827 break;
828 default:
829 unreachable("Invalid vector size for transform feedback output");
830 }
831
832 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
833 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
834 surface_format << BRW_SURFACE_FORMAT_SHIFT |
835 BRW_SURFACE_RC_READ_WRITE;
836 surf[1] = bo->offset64 + offset_bytes; /* reloc */
837 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
838 height << BRW_SURFACE_HEIGHT_SHIFT);
839 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
840 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
841 surf[4] = 0;
842 surf[5] = 0;
843
844 /* Emit relocation to surface contents. */
845 drm_intel_bo_emit_reloc(brw->batch.bo,
846 *out_offset + 4,
847 bo, offset_bytes,
848 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
849 }
850
851 /* Creates a new WM constant buffer reflecting the current fragment program's
852 * constants, if needed by the fragment program.
853 *
854 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
855 * state atom.
856 */
857 static void
858 brw_upload_wm_pull_constants(struct brw_context *brw)
859 {
860 struct brw_stage_state *stage_state = &brw->wm.base;
861 /* BRW_NEW_FRAGMENT_PROGRAM */
862 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
863 /* BRW_NEW_FS_PROG_DATA */
864 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
865
866 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
867 /* _NEW_PROGRAM_CONSTANTS */
868 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
869 stage_state, prog_data);
870 }
871
872 const struct brw_tracked_state brw_wm_pull_constants = {
873 .dirty = {
874 .mesa = _NEW_PROGRAM_CONSTANTS,
875 .brw = BRW_NEW_BATCH |
876 BRW_NEW_BLORP |
877 BRW_NEW_FRAGMENT_PROGRAM |
878 BRW_NEW_FS_PROG_DATA,
879 },
880 .emit = brw_upload_wm_pull_constants,
881 };
882
883 /**
884 * Creates a null renderbuffer surface.
885 *
886 * This is used when the shader doesn't write to any color output. An FB
887 * write to target 0 will still be emitted, because that's how the thread is
888 * terminated (and computed depth is returned), so we need to have the
889 * hardware discard the target 0 color output..
890 */
891 static void
892 brw_emit_null_surface_state(struct brw_context *brw,
893 unsigned width,
894 unsigned height,
895 unsigned samples,
896 uint32_t *out_offset)
897 {
898 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
899 * Notes):
900 *
901 * A null surface will be used in instances where an actual surface is
902 * not bound. When a write message is generated to a null surface, no
903 * actual surface is written to. When a read message (including any
904 * sampling engine message) is generated to a null surface, the result
905 * is all zeros. Note that a null surface type is allowed to be used
906 * with all messages, even if it is not specificially indicated as
907 * supported. All of the remaining fields in surface state are ignored
908 * for null surfaces, with the following exceptions:
909 *
910 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
911 * depth buffer’s corresponding state for all render target surfaces,
912 * including null.
913 *
914 * - Surface Format must be R8G8B8A8_UNORM.
915 */
916 unsigned surface_type = BRW_SURFACE_NULL;
917 drm_intel_bo *bo = NULL;
918 unsigned pitch_minus_1 = 0;
919 uint32_t multisampling_state = 0;
920 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
921
922 if (samples > 1) {
923 /* On Gen6, null render targets seem to cause GPU hangs when
924 * multisampling. So work around this problem by rendering into dummy
925 * color buffer.
926 *
927 * To decrease the amount of memory needed by the workaround buffer, we
928 * set its pitch to 128 bytes (the width of a Y tile). This means that
929 * the amount of memory needed for the workaround buffer is
930 * (width_in_tiles + height_in_tiles - 1) tiles.
931 *
932 * Note that since the workaround buffer will be interpreted by the
933 * hardware as an interleaved multisampled buffer, we need to compute
934 * width_in_tiles and height_in_tiles by dividing the width and height
935 * by 16 rather than the normal Y-tile size of 32.
936 */
937 unsigned width_in_tiles = ALIGN(width, 16) / 16;
938 unsigned height_in_tiles = ALIGN(height, 16) / 16;
939 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
940 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
941 size_needed);
942 bo = brw->wm.multisampled_null_render_target_bo;
943 surface_type = BRW_SURFACE_2D;
944 pitch_minus_1 = 127;
945 multisampling_state = brw_get_surface_num_multisamples(samples);
946 }
947
948 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
949 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
950 if (brw->gen < 6) {
951 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
952 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
953 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
954 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
955 }
956 surf[1] = bo ? bo->offset64 : 0;
957 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
958 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
959
960 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
961 * Notes):
962 *
963 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
964 */
965 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
966 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
967 surf[4] = multisampling_state;
968 surf[5] = 0;
969
970 if (bo) {
971 drm_intel_bo_emit_reloc(brw->batch.bo,
972 *out_offset + 4,
973 bo, 0,
974 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
975 }
976 }
977
978 /**
979 * Sets up a surface state structure to point at the given region.
980 * While it is only used for the front/back buffer currently, it should be
981 * usable for further buffers when doing ARB_draw_buffer support.
982 */
983 static uint32_t
984 gen4_update_renderbuffer_surface(struct brw_context *brw,
985 struct gl_renderbuffer *rb,
986 uint32_t flags, unsigned unit,
987 uint32_t surf_index)
988 {
989 struct gl_context *ctx = &brw->ctx;
990 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
991 struct intel_mipmap_tree *mt = irb->mt;
992 uint32_t *surf;
993 uint32_t tile_x, tile_y;
994 uint32_t format = 0;
995 uint32_t offset;
996 /* _NEW_BUFFERS */
997 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
998 /* BRW_NEW_FS_PROG_DATA */
999
1000 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
1001 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
1002
1003 if (rb->TexImage && !brw->has_surface_tile_offset) {
1004 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
1005
1006 if (tile_x != 0 || tile_y != 0) {
1007 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1008 * destination in a miptree unless you actually setup your renderbuffer
1009 * as a miptree and used the fragile lod/array_index/etc. controls to
1010 * select the image. So, instead, we just make a new single-level
1011 * miptree and render into that.
1012 */
1013 intel_renderbuffer_move_to_temp(brw, irb, false);
1014 mt = irb->mt;
1015 }
1016 }
1017
1018 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
1019
1020 format = brw->render_target_format[rb_format];
1021 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1022 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1023 __func__, _mesa_get_format_name(rb_format));
1024 }
1025
1026 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1027 format << BRW_SURFACE_FORMAT_SHIFT);
1028
1029 /* reloc */
1030 assert(mt->offset % mt->cpp == 0);
1031 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1032 mt->bo->offset64 + mt->offset);
1033
1034 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1035 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1036
1037 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1038 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1039
1040 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1041
1042 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1043 /* Note that the low bits of these fields are missing, so
1044 * there's the possibility of getting in trouble.
1045 */
1046 assert(tile_x % 4 == 0);
1047 assert(tile_y % 2 == 0);
1048 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1049 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1050 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1051
1052 if (brw->gen < 6) {
1053 /* _NEW_COLOR */
1054 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1055 (ctx->Color.BlendEnabled & (1 << unit)))
1056 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1057
1058 if (!ctx->Color.ColorMask[unit][0])
1059 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1060 if (!ctx->Color.ColorMask[unit][1])
1061 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1062 if (!ctx->Color.ColorMask[unit][2])
1063 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1064
1065 /* As mentioned above, disable writes to the alpha component when the
1066 * renderbuffer is XRGB.
1067 */
1068 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1069 !ctx->Color.ColorMask[unit][3]) {
1070 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1071 }
1072 }
1073
1074 drm_intel_bo_emit_reloc(brw->batch.bo,
1075 offset + 4,
1076 mt->bo,
1077 surf[1] - mt->bo->offset64,
1078 I915_GEM_DOMAIN_RENDER,
1079 I915_GEM_DOMAIN_RENDER);
1080
1081 return offset;
1082 }
1083
1084 /**
1085 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1086 */
1087 void
1088 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1089 const struct gl_framebuffer *fb,
1090 uint32_t render_target_start,
1091 uint32_t *surf_offset)
1092 {
1093 GLuint i;
1094 const unsigned int w = _mesa_geometric_width(fb);
1095 const unsigned int h = _mesa_geometric_height(fb);
1096 const unsigned int s = _mesa_geometric_samples(fb);
1097
1098 /* Update surfaces for drawing buffers */
1099 if (fb->_NumColorDrawBuffers >= 1) {
1100 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1101 const uint32_t surf_index = render_target_start + i;
1102 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1103 INTEL_RENDERBUFFER_LAYERED : 0) |
1104 (brw->draw_aux_buffer_disabled[i] ?
1105 INTEL_AUX_BUFFER_DISABLED : 0);
1106
1107 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1108 surf_offset[surf_index] =
1109 brw->vtbl.update_renderbuffer_surface(
1110 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1111 } else {
1112 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1113 &surf_offset[surf_index]);
1114 }
1115 }
1116 } else {
1117 const uint32_t surf_index = render_target_start;
1118 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1119 &surf_offset[surf_index]);
1120 }
1121 }
1122
1123 static void
1124 update_renderbuffer_surfaces(struct brw_context *brw)
1125 {
1126 const struct gl_context *ctx = &brw->ctx;
1127
1128 /* BRW_NEW_FS_PROG_DATA */
1129 const struct brw_wm_prog_data *wm_prog_data =
1130 brw_wm_prog_data(brw->wm.base.prog_data);
1131
1132 /* _NEW_BUFFERS | _NEW_COLOR */
1133 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1134 brw_update_renderbuffer_surfaces(
1135 brw, fb,
1136 wm_prog_data->binding_table.render_target_start,
1137 brw->wm.base.surf_offset);
1138 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1139 }
1140
1141 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1142 .dirty = {
1143 .mesa = _NEW_BUFFERS |
1144 _NEW_COLOR,
1145 .brw = BRW_NEW_BATCH |
1146 BRW_NEW_BLORP |
1147 BRW_NEW_FS_PROG_DATA,
1148 },
1149 .emit = update_renderbuffer_surfaces,
1150 };
1151
1152 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1153 .dirty = {
1154 .mesa = _NEW_BUFFERS,
1155 .brw = BRW_NEW_BATCH |
1156 BRW_NEW_BLORP,
1157 },
1158 .emit = update_renderbuffer_surfaces,
1159 };
1160
1161 static void
1162 update_renderbuffer_read_surfaces(struct brw_context *brw)
1163 {
1164 const struct gl_context *ctx = &brw->ctx;
1165
1166 /* BRW_NEW_FS_PROG_DATA */
1167 const struct brw_wm_prog_data *wm_prog_data =
1168 brw_wm_prog_data(brw->wm.base.prog_data);
1169
1170 /* BRW_NEW_FRAGMENT_PROGRAM */
1171 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1172 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1173 /* _NEW_BUFFERS */
1174 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1175
1176 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1177 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1178 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1179 const unsigned surf_index =
1180 wm_prog_data->binding_table.render_target_read_start + i;
1181 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1182
1183 if (irb) {
1184 const unsigned format = brw->render_target_format[
1185 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1186 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1187 format));
1188
1189 /* Override the target of the texture if the render buffer is a
1190 * single slice of a 3D texture (since the minimum array element
1191 * field of the surface state structure is ignored by the sampler
1192 * unit for 3D textures on some hardware), or if the render buffer
1193 * is a 1D array (since shaders always provide the array index
1194 * coordinate at the Z component to avoid state-dependent
1195 * recompiles when changing the texture target of the
1196 * framebuffer).
1197 */
1198 const GLenum target =
1199 (irb->mt->target == GL_TEXTURE_3D &&
1200 irb->layer_count == 1) ? GL_TEXTURE_2D :
1201 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1202 irb->mt->target;
1203
1204 /* intel_renderbuffer::mt_layer is expressed in sample units for
1205 * the UMS and CMS multisample layouts, but
1206 * intel_renderbuffer::layer_count is expressed in units of whole
1207 * logical layers regardless of the multisample layout.
1208 */
1209 const unsigned mt_layer_unit =
1210 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1211 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1212 MAX2(irb->mt->num_samples, 1) : 1;
1213
1214 const struct isl_view view = {
1215 .format = format,
1216 .base_level = irb->mt_level - irb->mt->first_level,
1217 .levels = 1,
1218 .base_array_layer = irb->mt_layer / mt_layer_unit,
1219 .array_len = irb->layer_count,
1220 .swizzle = ISL_SWIZZLE_IDENTITY,
1221 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1222 };
1223
1224 const int flags = brw->draw_aux_buffer_disabled[i] ?
1225 INTEL_AUX_BUFFER_DISABLED : 0;
1226 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1227 tex_mocs[brw->gen],
1228 surf_offset, surf_index,
1229 I915_GEM_DOMAIN_SAMPLER, 0);
1230
1231 } else {
1232 brw->vtbl.emit_null_surface_state(
1233 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1234 _mesa_geometric_samples(fb), surf_offset);
1235 }
1236 }
1237
1238 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1239 }
1240 }
1241
1242 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1243 .dirty = {
1244 .mesa = _NEW_BUFFERS,
1245 .brw = BRW_NEW_BATCH |
1246 BRW_NEW_FRAGMENT_PROGRAM |
1247 BRW_NEW_FS_PROG_DATA,
1248 },
1249 .emit = update_renderbuffer_read_surfaces,
1250 };
1251
1252 static void
1253 update_stage_texture_surfaces(struct brw_context *brw,
1254 const struct gl_program *prog,
1255 struct brw_stage_state *stage_state,
1256 bool for_gather, uint32_t plane)
1257 {
1258 if (!prog)
1259 return;
1260
1261 struct gl_context *ctx = &brw->ctx;
1262
1263 uint32_t *surf_offset = stage_state->surf_offset;
1264
1265 /* BRW_NEW_*_PROG_DATA */
1266 if (for_gather)
1267 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1268 else
1269 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1270
1271 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1272 for (unsigned s = 0; s < num_samplers; s++) {
1273 surf_offset[s] = 0;
1274
1275 if (prog->SamplersUsed & (1 << s)) {
1276 const unsigned unit = prog->SamplerUnits[s];
1277
1278 /* _NEW_TEXTURE */
1279 if (ctx->Texture.Unit[unit]._Current) {
1280 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1281 }
1282 }
1283 }
1284 }
1285
1286
1287 /**
1288 * Construct SURFACE_STATE objects for enabled textures.
1289 */
1290 static void
1291 brw_update_texture_surfaces(struct brw_context *brw)
1292 {
1293 /* BRW_NEW_VERTEX_PROGRAM */
1294 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1295
1296 /* BRW_NEW_TESS_PROGRAMS */
1297 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1298 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1299
1300 /* BRW_NEW_GEOMETRY_PROGRAM */
1301 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1302
1303 /* BRW_NEW_FRAGMENT_PROGRAM */
1304 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1305
1306 /* _NEW_TEXTURE */
1307 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1308 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1309 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1310 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1311 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1312
1313 /* emit alternate set of surface state for gather. this
1314 * allows the surface format to be overriden for only the
1315 * gather4 messages. */
1316 if (brw->gen < 8) {
1317 if (vs && vs->nir->info->uses_texture_gather)
1318 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1319 if (tcs && tcs->nir->info->uses_texture_gather)
1320 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1321 if (tes && tes->nir->info->uses_texture_gather)
1322 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1323 if (gs && gs->nir->info->uses_texture_gather)
1324 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1325 if (fs && fs->nir->info->uses_texture_gather)
1326 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1327 }
1328
1329 if (fs) {
1330 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1331 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1332 }
1333
1334 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1335 }
1336
1337 const struct brw_tracked_state brw_texture_surfaces = {
1338 .dirty = {
1339 .mesa = _NEW_TEXTURE,
1340 .brw = BRW_NEW_BATCH |
1341 BRW_NEW_BLORP |
1342 BRW_NEW_FRAGMENT_PROGRAM |
1343 BRW_NEW_FS_PROG_DATA |
1344 BRW_NEW_GEOMETRY_PROGRAM |
1345 BRW_NEW_GS_PROG_DATA |
1346 BRW_NEW_TESS_PROGRAMS |
1347 BRW_NEW_TCS_PROG_DATA |
1348 BRW_NEW_TES_PROG_DATA |
1349 BRW_NEW_TEXTURE_BUFFER |
1350 BRW_NEW_VERTEX_PROGRAM |
1351 BRW_NEW_VS_PROG_DATA,
1352 },
1353 .emit = brw_update_texture_surfaces,
1354 };
1355
1356 static void
1357 brw_update_cs_texture_surfaces(struct brw_context *brw)
1358 {
1359 /* BRW_NEW_COMPUTE_PROGRAM */
1360 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1361
1362 /* _NEW_TEXTURE */
1363 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1364
1365 /* emit alternate set of surface state for gather. this
1366 * allows the surface format to be overriden for only the
1367 * gather4 messages.
1368 */
1369 if (brw->gen < 8) {
1370 if (cs && cs->nir->info->uses_texture_gather)
1371 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1372 }
1373
1374 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1375 }
1376
1377 const struct brw_tracked_state brw_cs_texture_surfaces = {
1378 .dirty = {
1379 .mesa = _NEW_TEXTURE,
1380 .brw = BRW_NEW_BATCH |
1381 BRW_NEW_BLORP |
1382 BRW_NEW_COMPUTE_PROGRAM,
1383 },
1384 .emit = brw_update_cs_texture_surfaces,
1385 };
1386
1387
1388 void
1389 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1390 struct brw_stage_state *stage_state,
1391 struct brw_stage_prog_data *prog_data)
1392 {
1393 struct gl_context *ctx = &brw->ctx;
1394
1395 if (!prog)
1396 return;
1397
1398 uint32_t *ubo_surf_offsets =
1399 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1400
1401 for (int i = 0; i < prog->info.num_ubos; i++) {
1402 struct gl_uniform_buffer_binding *binding =
1403 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1404
1405 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1406 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1407 } else {
1408 struct intel_buffer_object *intel_bo =
1409 intel_buffer_object(binding->BufferObject);
1410 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1411 if (!binding->AutomaticSize)
1412 size = MIN2(size, binding->Size);
1413 drm_intel_bo *bo =
1414 intel_bufferobj_buffer(brw, intel_bo,
1415 binding->Offset,
1416 size);
1417 brw_create_constant_surface(brw, bo, binding->Offset,
1418 size,
1419 &ubo_surf_offsets[i]);
1420 }
1421 }
1422
1423 uint32_t *ssbo_surf_offsets =
1424 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1425
1426 for (int i = 0; i < prog->info.num_ssbos; i++) {
1427 struct gl_shader_storage_buffer_binding *binding =
1428 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1429
1430 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1431 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1432 } else {
1433 struct intel_buffer_object *intel_bo =
1434 intel_buffer_object(binding->BufferObject);
1435 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1436 if (!binding->AutomaticSize)
1437 size = MIN2(size, binding->Size);
1438 drm_intel_bo *bo =
1439 intel_bufferobj_buffer(brw, intel_bo,
1440 binding->Offset,
1441 size);
1442 brw_create_buffer_surface(brw, bo, binding->Offset,
1443 size,
1444 &ssbo_surf_offsets[i]);
1445 }
1446 }
1447
1448 if (prog->info.num_ubos || prog->info.num_ssbos)
1449 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1450 }
1451
1452 static void
1453 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1454 {
1455 struct gl_context *ctx = &brw->ctx;
1456 /* _NEW_PROGRAM */
1457 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1458
1459 /* BRW_NEW_FS_PROG_DATA */
1460 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1461 }
1462
1463 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1464 .dirty = {
1465 .mesa = _NEW_PROGRAM,
1466 .brw = BRW_NEW_BATCH |
1467 BRW_NEW_BLORP |
1468 BRW_NEW_FS_PROG_DATA |
1469 BRW_NEW_UNIFORM_BUFFER,
1470 },
1471 .emit = brw_upload_wm_ubo_surfaces,
1472 };
1473
1474 static void
1475 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1476 {
1477 struct gl_context *ctx = &brw->ctx;
1478 /* _NEW_PROGRAM */
1479 struct gl_program *prog =
1480 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1481
1482 /* BRW_NEW_CS_PROG_DATA */
1483 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1484 }
1485
1486 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1487 .dirty = {
1488 .mesa = _NEW_PROGRAM,
1489 .brw = BRW_NEW_BATCH |
1490 BRW_NEW_BLORP |
1491 BRW_NEW_CS_PROG_DATA |
1492 BRW_NEW_UNIFORM_BUFFER,
1493 },
1494 .emit = brw_upload_cs_ubo_surfaces,
1495 };
1496
1497 void
1498 brw_upload_abo_surfaces(struct brw_context *brw,
1499 const struct gl_program *prog,
1500 struct brw_stage_state *stage_state,
1501 struct brw_stage_prog_data *prog_data)
1502 {
1503 struct gl_context *ctx = &brw->ctx;
1504 uint32_t *surf_offsets =
1505 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1506
1507 if (prog->info.num_abos) {
1508 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1509 struct gl_atomic_buffer_binding *binding =
1510 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1511 struct intel_buffer_object *intel_bo =
1512 intel_buffer_object(binding->BufferObject);
1513 drm_intel_bo *bo = intel_bufferobj_buffer(
1514 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1515
1516 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1517 binding->Offset, ISL_FORMAT_RAW,
1518 bo->size - binding->Offset, 1, true);
1519 }
1520
1521 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1522 }
1523 }
1524
1525 static void
1526 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1527 {
1528 /* _NEW_PROGRAM */
1529 const struct gl_program *wm = brw->fragment_program;
1530
1531 if (wm) {
1532 /* BRW_NEW_FS_PROG_DATA */
1533 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1534 }
1535 }
1536
1537 const struct brw_tracked_state brw_wm_abo_surfaces = {
1538 .dirty = {
1539 .mesa = _NEW_PROGRAM,
1540 .brw = BRW_NEW_ATOMIC_BUFFER |
1541 BRW_NEW_BLORP |
1542 BRW_NEW_BATCH |
1543 BRW_NEW_FS_PROG_DATA,
1544 },
1545 .emit = brw_upload_wm_abo_surfaces,
1546 };
1547
1548 static void
1549 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1550 {
1551 /* _NEW_PROGRAM */
1552 const struct gl_program *cp = brw->compute_program;
1553
1554 if (cp) {
1555 /* BRW_NEW_CS_PROG_DATA */
1556 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1557 }
1558 }
1559
1560 const struct brw_tracked_state brw_cs_abo_surfaces = {
1561 .dirty = {
1562 .mesa = _NEW_PROGRAM,
1563 .brw = BRW_NEW_ATOMIC_BUFFER |
1564 BRW_NEW_BLORP |
1565 BRW_NEW_BATCH |
1566 BRW_NEW_CS_PROG_DATA,
1567 },
1568 .emit = brw_upload_cs_abo_surfaces,
1569 };
1570
1571 static void
1572 brw_upload_cs_image_surfaces(struct brw_context *brw)
1573 {
1574 /* _NEW_PROGRAM */
1575 const struct gl_program *cp = brw->compute_program;
1576
1577 if (cp) {
1578 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1579 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1580 brw->cs.base.prog_data);
1581 }
1582 }
1583
1584 const struct brw_tracked_state brw_cs_image_surfaces = {
1585 .dirty = {
1586 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1587 .brw = BRW_NEW_BATCH |
1588 BRW_NEW_BLORP |
1589 BRW_NEW_CS_PROG_DATA |
1590 BRW_NEW_IMAGE_UNITS
1591 },
1592 .emit = brw_upload_cs_image_surfaces,
1593 };
1594
1595 static uint32_t
1596 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1597 {
1598 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1599 uint32_t hw_format = brw_isl_format_for_mesa_format(format);
1600 if (access == GL_WRITE_ONLY) {
1601 return hw_format;
1602 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1603 /* Typed surface reads support a very limited subset of the shader
1604 * image formats. Translate it into the closest format the
1605 * hardware supports.
1606 */
1607 return isl_lower_storage_image_format(devinfo, hw_format);
1608 } else {
1609 /* The hardware doesn't actually support a typed format that we can use
1610 * so we have to fall back to untyped read/write messages.
1611 */
1612 return ISL_FORMAT_RAW;
1613 }
1614 }
1615
1616 static void
1617 update_default_image_param(struct brw_context *brw,
1618 struct gl_image_unit *u,
1619 unsigned surface_idx,
1620 struct brw_image_param *param)
1621 {
1622 memset(param, 0, sizeof(*param));
1623 param->surface_idx = surface_idx;
1624 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1625 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1626 * detailed explanation of these parameters.
1627 */
1628 param->swizzling[0] = 0xff;
1629 param->swizzling[1] = 0xff;
1630 }
1631
1632 static void
1633 update_buffer_image_param(struct brw_context *brw,
1634 struct gl_image_unit *u,
1635 unsigned surface_idx,
1636 struct brw_image_param *param)
1637 {
1638 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1639 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1640 update_default_image_param(brw, u, surface_idx, param);
1641
1642 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1643 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1644 }
1645
1646 static void
1647 update_texture_image_param(struct brw_context *brw,
1648 struct gl_image_unit *u,
1649 unsigned surface_idx,
1650 struct brw_image_param *param)
1651 {
1652 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1653
1654 update_default_image_param(brw, u, surface_idx, param);
1655
1656 param->size[0] = minify(mt->logical_width0, u->Level);
1657 param->size[1] = minify(mt->logical_height0, u->Level);
1658 param->size[2] = (!u->Layered ? 1 :
1659 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1660 u->TexObj->Target == GL_TEXTURE_3D ?
1661 minify(mt->logical_depth0, u->Level) :
1662 mt->logical_depth0);
1663
1664 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1665 &param->offset[0],
1666 &param->offset[1]);
1667
1668 param->stride[0] = mt->cpp;
1669 param->stride[1] = mt->pitch / mt->cpp;
1670 param->stride[2] =
1671 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1672 param->stride[3] =
1673 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1674
1675 if (mt->tiling == I915_TILING_X) {
1676 /* An X tile is a rectangular block of 512x8 bytes. */
1677 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1678 param->tiling[1] = _mesa_logbase2(8);
1679
1680 if (brw->has_swizzling) {
1681 /* Right shifts required to swizzle bits 9 and 10 of the memory
1682 * address with bit 6.
1683 */
1684 param->swizzling[0] = 3;
1685 param->swizzling[1] = 4;
1686 }
1687 } else if (mt->tiling == I915_TILING_Y) {
1688 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1689 * different to the layout of an X-tiled surface, we simply pretend that
1690 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1691 * one arranged in X-major order just like is the case for X-tiling.
1692 */
1693 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1694 param->tiling[1] = _mesa_logbase2(32);
1695
1696 if (brw->has_swizzling) {
1697 /* Right shift required to swizzle bit 9 of the memory address with
1698 * bit 6.
1699 */
1700 param->swizzling[0] = 3;
1701 }
1702 }
1703
1704 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1705 * address calculation algorithm (emit_address_calculation() in
1706 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1707 * modulus equal to the LOD.
1708 */
1709 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1710 0);
1711 }
1712
1713 static void
1714 update_image_surface(struct brw_context *brw,
1715 struct gl_image_unit *u,
1716 GLenum access,
1717 unsigned surface_idx,
1718 uint32_t *surf_offset,
1719 struct brw_image_param *param)
1720 {
1721 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1722 struct gl_texture_object *obj = u->TexObj;
1723 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1724
1725 if (obj->Target == GL_TEXTURE_BUFFER) {
1726 struct intel_buffer_object *intel_obj =
1727 intel_buffer_object(obj->BufferObject);
1728 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1729 _mesa_get_format_bytes(u->_ActualFormat));
1730
1731 brw_emit_buffer_surface_state(
1732 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1733 format, intel_obj->Base.Size, texel_size,
1734 access != GL_READ_ONLY);
1735
1736 update_buffer_image_param(brw, u, surface_idx, param);
1737
1738 } else {
1739 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1740 struct intel_mipmap_tree *mt = intel_obj->mt;
1741
1742 if (format == ISL_FORMAT_RAW) {
1743 brw_emit_buffer_surface_state(
1744 brw, surf_offset, mt->bo, mt->offset,
1745 format, mt->bo->size - mt->offset, 1 /* pitch */,
1746 access != GL_READ_ONLY);
1747
1748 } else {
1749 const unsigned num_layers = (!u->Layered ? 1 :
1750 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1751 mt->logical_depth0);
1752
1753 struct isl_view view = {
1754 .format = format,
1755 .base_level = obj->MinLevel + u->Level,
1756 .levels = 1,
1757 .base_array_layer = obj->MinLayer + u->_Layer,
1758 .array_len = num_layers,
1759 .swizzle = ISL_SWIZZLE_IDENTITY,
1760 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1761 };
1762
1763 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1764 const bool unresolved = intel_miptree_has_color_unresolved(
1765 mt, view.base_level, view.levels,
1766 view.base_array_layer, view.array_len);
1767 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1768 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1769 tex_mocs[brw->gen],
1770 surf_offset, surf_index,
1771 I915_GEM_DOMAIN_SAMPLER,
1772 access == GL_READ_ONLY ? 0 :
1773 I915_GEM_DOMAIN_SAMPLER);
1774 }
1775
1776 update_texture_image_param(brw, u, surface_idx, param);
1777 }
1778
1779 } else {
1780 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1781 update_default_image_param(brw, u, surface_idx, param);
1782 }
1783 }
1784
1785 void
1786 brw_upload_image_surfaces(struct brw_context *brw,
1787 const struct gl_program *prog,
1788 struct brw_stage_state *stage_state,
1789 struct brw_stage_prog_data *prog_data)
1790 {
1791 assert(prog);
1792 struct gl_context *ctx = &brw->ctx;
1793
1794 if (prog->info.num_images) {
1795 for (unsigned i = 0; i < prog->info.num_images; i++) {
1796 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1797 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1798
1799 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1800 surf_idx,
1801 &stage_state->surf_offset[surf_idx],
1802 &prog_data->image_param[i]);
1803 }
1804
1805 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1806 /* This may have changed the image metadata dependent on the context
1807 * image unit state and passed to the program as uniforms, make sure
1808 * that push and pull constants are reuploaded.
1809 */
1810 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1811 }
1812 }
1813
1814 static void
1815 brw_upload_wm_image_surfaces(struct brw_context *brw)
1816 {
1817 /* BRW_NEW_FRAGMENT_PROGRAM */
1818 const struct gl_program *wm = brw->fragment_program;
1819
1820 if (wm) {
1821 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1822 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1823 brw->wm.base.prog_data);
1824 }
1825 }
1826
1827 const struct brw_tracked_state brw_wm_image_surfaces = {
1828 .dirty = {
1829 .mesa = _NEW_TEXTURE,
1830 .brw = BRW_NEW_BATCH |
1831 BRW_NEW_BLORP |
1832 BRW_NEW_FRAGMENT_PROGRAM |
1833 BRW_NEW_FS_PROG_DATA |
1834 BRW_NEW_IMAGE_UNITS
1835 },
1836 .emit = brw_upload_wm_image_surfaces,
1837 };
1838
1839 void
1840 gen4_init_vtable_surface_functions(struct brw_context *brw)
1841 {
1842 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1843 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1844 }
1845
1846 void
1847 gen6_init_vtable_surface_functions(struct brw_context *brw)
1848 {
1849 gen4_init_vtable_surface_functions(brw);
1850 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1851 }
1852
1853 static void
1854 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1855 {
1856 struct gl_context *ctx = &brw->ctx;
1857 /* _NEW_PROGRAM */
1858 struct gl_program *prog =
1859 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1860 /* BRW_NEW_CS_PROG_DATA */
1861 const struct brw_cs_prog_data *cs_prog_data =
1862 brw_cs_prog_data(brw->cs.base.prog_data);
1863
1864 if (prog && cs_prog_data->uses_num_work_groups) {
1865 const unsigned surf_idx =
1866 cs_prog_data->binding_table.work_groups_start;
1867 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1868 drm_intel_bo *bo;
1869 uint32_t bo_offset;
1870
1871 if (brw->compute.num_work_groups_bo == NULL) {
1872 bo = NULL;
1873 intel_upload_data(brw,
1874 (void *)brw->compute.num_work_groups,
1875 3 * sizeof(GLuint),
1876 sizeof(GLuint),
1877 &bo,
1878 &bo_offset);
1879 } else {
1880 bo = brw->compute.num_work_groups_bo;
1881 bo_offset = brw->compute.num_work_groups_offset;
1882 }
1883
1884 brw_emit_buffer_surface_state(brw, surf_offset,
1885 bo, bo_offset,
1886 ISL_FORMAT_RAW,
1887 3 * sizeof(GLuint), 1, true);
1888 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1889 }
1890 }
1891
1892 const struct brw_tracked_state brw_cs_work_groups_surface = {
1893 .dirty = {
1894 .brw = BRW_NEW_BLORP |
1895 BRW_NEW_CS_PROG_DATA |
1896 BRW_NEW_CS_WORK_GROUPS
1897 },
1898 .emit = brw_upload_cs_work_groups_surface,
1899 };