i965: Enable OpenGL 4.5 on Haswell.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 assert(mt->mcs_buf->offset == 0);
147 aux_bo = mt->mcs_buf->bo;
148 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
149 } else {
150 aux_bo = mt->hiz_buf->aux_base.bo;
151 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
152 }
153
154 /* We only really need a clear color if we also have an auxiliary
155 * surface. Without one, it does nothing.
156 */
157 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
158 }
159
160 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
161 brw->isl_dev.ss.size,
162 brw->isl_dev.ss.align,
163 surf_index, surf_offset);
164
165 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
166 .address = mt->bo->offset64 + offset,
167 .aux_surf = aux_surf, .aux_usage = aux_usage,
168 .aux_address = aux_offset,
169 .mocs = mocs, .clear_color = clear_color,
170 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
171
172 drm_intel_bo_emit_reloc(brw->batch.bo,
173 *surf_offset + brw->isl_dev.ss.addr_offset,
174 mt->bo, offset,
175 read_domains, write_domains);
176
177 if (aux_surf) {
178 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
179 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
180 * contain other control information. Since buffer addresses are always
181 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
182 * an ordinary reloc to do the necessary address translation.
183 */
184 assert((aux_offset & 0xfff) == 0);
185 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
186 drm_intel_bo_emit_reloc(brw->batch.bo,
187 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
188 aux_bo, *aux_addr & 0xfff,
189 read_domains, write_domains);
190 }
191 }
192
193 uint32_t
194 brw_update_renderbuffer_surface(struct brw_context *brw,
195 struct gl_renderbuffer *rb,
196 uint32_t flags, unsigned unit /* unused */,
197 uint32_t surf_index)
198 {
199 struct gl_context *ctx = &brw->ctx;
200 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
201 struct intel_mipmap_tree *mt = irb->mt;
202
203 if (brw->gen < 9) {
204 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
205 }
206
207 assert(brw_render_target_supported(brw, rb));
208
209 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
210 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
211 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
212 __func__, _mesa_get_format_name(rb_format));
213 }
214
215 const unsigned layer_multiplier =
216 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
217 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
218 MAX2(irb->mt->num_samples, 1) : 1;
219
220 struct isl_view view = {
221 .format = brw->render_target_format[rb_format],
222 .base_level = irb->mt_level - irb->mt->first_level,
223 .levels = 1,
224 .base_array_layer = irb->mt_layer / layer_multiplier,
225 .array_len = MAX2(irb->layer_count, 1),
226 .swizzle = ISL_SWIZZLE_IDENTITY,
227 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
228 };
229
230 uint32_t offset;
231 brw_emit_surface_state(brw, mt, flags, mt->target, view,
232 rb_mocs[brw->gen],
233 &offset, surf_index,
234 I915_GEM_DOMAIN_RENDER,
235 I915_GEM_DOMAIN_RENDER);
236 return offset;
237 }
238
239 GLuint
240 translate_tex_target(GLenum target)
241 {
242 switch (target) {
243 case GL_TEXTURE_1D:
244 case GL_TEXTURE_1D_ARRAY_EXT:
245 return BRW_SURFACE_1D;
246
247 case GL_TEXTURE_RECTANGLE_NV:
248 return BRW_SURFACE_2D;
249
250 case GL_TEXTURE_2D:
251 case GL_TEXTURE_2D_ARRAY_EXT:
252 case GL_TEXTURE_EXTERNAL_OES:
253 case GL_TEXTURE_2D_MULTISAMPLE:
254 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
255 return BRW_SURFACE_2D;
256
257 case GL_TEXTURE_3D:
258 return BRW_SURFACE_3D;
259
260 case GL_TEXTURE_CUBE_MAP:
261 case GL_TEXTURE_CUBE_MAP_ARRAY:
262 return BRW_SURFACE_CUBE;
263
264 default:
265 unreachable("not reached");
266 }
267 }
268
269 uint32_t
270 brw_get_surface_tiling_bits(uint32_t tiling)
271 {
272 switch (tiling) {
273 case I915_TILING_X:
274 return BRW_SURFACE_TILED;
275 case I915_TILING_Y:
276 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
277 default:
278 return 0;
279 }
280 }
281
282
283 uint32_t
284 brw_get_surface_num_multisamples(unsigned num_samples)
285 {
286 if (num_samples > 1)
287 return BRW_SURFACE_MULTISAMPLECOUNT_4;
288 else
289 return BRW_SURFACE_MULTISAMPLECOUNT_1;
290 }
291
292 /**
293 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
294 * swizzling.
295 */
296 int
297 brw_get_texture_swizzle(const struct gl_context *ctx,
298 const struct gl_texture_object *t)
299 {
300 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
301
302 int swizzles[SWIZZLE_NIL + 1] = {
303 SWIZZLE_X,
304 SWIZZLE_Y,
305 SWIZZLE_Z,
306 SWIZZLE_W,
307 SWIZZLE_ZERO,
308 SWIZZLE_ONE,
309 SWIZZLE_NIL
310 };
311
312 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
313 img->_BaseFormat == GL_DEPTH_STENCIL) {
314 GLenum depth_mode = t->DepthMode;
315
316 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
317 * with depth component data specified with a sized internal format.
318 * Otherwise, it's left at the old default, GL_LUMINANCE.
319 */
320 if (_mesa_is_gles3(ctx) &&
321 img->InternalFormat != GL_DEPTH_COMPONENT &&
322 img->InternalFormat != GL_DEPTH_STENCIL) {
323 depth_mode = GL_RED;
324 }
325
326 switch (depth_mode) {
327 case GL_ALPHA:
328 swizzles[0] = SWIZZLE_ZERO;
329 swizzles[1] = SWIZZLE_ZERO;
330 swizzles[2] = SWIZZLE_ZERO;
331 swizzles[3] = SWIZZLE_X;
332 break;
333 case GL_LUMINANCE:
334 swizzles[0] = SWIZZLE_X;
335 swizzles[1] = SWIZZLE_X;
336 swizzles[2] = SWIZZLE_X;
337 swizzles[3] = SWIZZLE_ONE;
338 break;
339 case GL_INTENSITY:
340 swizzles[0] = SWIZZLE_X;
341 swizzles[1] = SWIZZLE_X;
342 swizzles[2] = SWIZZLE_X;
343 swizzles[3] = SWIZZLE_X;
344 break;
345 case GL_RED:
346 swizzles[0] = SWIZZLE_X;
347 swizzles[1] = SWIZZLE_ZERO;
348 swizzles[2] = SWIZZLE_ZERO;
349 swizzles[3] = SWIZZLE_ONE;
350 break;
351 }
352 }
353
354 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
355
356 /* If the texture's format is alpha-only, force R, G, and B to
357 * 0.0. Similarly, if the texture's format has no alpha channel,
358 * force the alpha value read to 1.0. This allows for the
359 * implementation to use an RGBA texture for any of these formats
360 * without leaking any unexpected values.
361 */
362 switch (img->_BaseFormat) {
363 case GL_ALPHA:
364 swizzles[0] = SWIZZLE_ZERO;
365 swizzles[1] = SWIZZLE_ZERO;
366 swizzles[2] = SWIZZLE_ZERO;
367 break;
368 case GL_LUMINANCE:
369 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_ONE;
374 }
375 break;
376 case GL_LUMINANCE_ALPHA:
377 if (datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_W;
382 }
383 break;
384 case GL_INTENSITY:
385 if (datatype == GL_SIGNED_NORMALIZED) {
386 swizzles[0] = SWIZZLE_X;
387 swizzles[1] = SWIZZLE_X;
388 swizzles[2] = SWIZZLE_X;
389 swizzles[3] = SWIZZLE_X;
390 }
391 break;
392 case GL_RED:
393 case GL_RG:
394 case GL_RGB:
395 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
396 swizzles[3] = SWIZZLE_ONE;
397 break;
398 }
399
400 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
401 swizzles[GET_SWZ(t->_Swizzle, 1)],
402 swizzles[GET_SWZ(t->_Swizzle, 2)],
403 swizzles[GET_SWZ(t->_Swizzle, 3)]);
404 }
405
406 /**
407 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
408 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
409 *
410 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
411 * 0 1 2 3 4 5
412 * 4 5 6 7 0 1
413 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
414 *
415 * which is simply adding 4 then modding by 8 (or anding with 7).
416 *
417 * We then may need to apply workarounds for textureGather hardware bugs.
418 */
419 static unsigned
420 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
421 {
422 unsigned scs = (swizzle + 4) & 7;
423
424 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
425 }
426
427 static unsigned
428 brw_find_matching_rb(const struct gl_framebuffer *fb,
429 const struct intel_mipmap_tree *mt)
430 {
431 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
432 const struct intel_renderbuffer *irb =
433 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
434
435 if (irb && irb->mt == mt)
436 return i;
437 }
438
439 return fb->_NumColorDrawBuffers;
440 }
441
442 static inline bool
443 brw_texture_view_sane(const struct brw_context *brw,
444 const struct intel_mipmap_tree *mt,
445 const struct isl_view *view)
446 {
447 /* There are special cases only for lossless compression. */
448 if (!intel_miptree_is_lossless_compressed(brw, mt))
449 return true;
450
451 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
452 view->format))
453 return true;
454
455 /* Logic elsewhere needs to take care to resolve the color buffer prior
456 * to sampling it as non-compressed.
457 */
458 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
459 view->base_array_layer,
460 view->array_len))
461 return false;
462
463 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
464 const unsigned rb_index = brw_find_matching_rb(fb, mt);
465
466 if (rb_index == fb->_NumColorDrawBuffers)
467 return true;
468
469 /* Underlying surface is compressed but it is sampled using a format that
470 * the sampling engine doesn't support as compressed. Compression must be
471 * disabled for both sampling engine and data port in case the same surface
472 * is used also as render target.
473 */
474 return brw->draw_aux_buffer_disabled[rb_index];
475 }
476
477 static bool
478 brw_disable_aux_surface(const struct brw_context *brw,
479 const struct intel_mipmap_tree *mt,
480 const struct isl_view *view)
481 {
482 /* Nothing to disable. */
483 if (!mt->mcs_buf)
484 return false;
485
486 const bool is_unresolved = intel_miptree_has_color_unresolved(
487 mt, view->base_level, view->levels,
488 view->base_array_layer, view->array_len);
489
490 /* There are special cases only for lossless compression. */
491 if (!intel_miptree_is_lossless_compressed(brw, mt))
492 return !is_unresolved;
493
494 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
495 const unsigned rb_index = brw_find_matching_rb(fb, mt);
496
497 /* If we are drawing into this with compression enabled, then we must also
498 * enable compression when texturing from it regardless of
499 * fast_clear_state. If we don't then, after the first draw call with
500 * this setup, there will be data in the CCS which won't get picked up by
501 * subsequent texturing operations as required by ARB_texture_barrier.
502 * Since we don't want to re-emit the binding table or do a resolve
503 * operation every draw call, the easiest thing to do is just enable
504 * compression on the texturing side. This is completely safe to do
505 * since, if compressed texturing weren't allowed, we would have disabled
506 * compression of render targets in whatever_that_function_is_called().
507 */
508 if (rb_index < fb->_NumColorDrawBuffers) {
509 if (brw->draw_aux_buffer_disabled[rb_index]) {
510 assert(!is_unresolved);
511 }
512
513 return brw->draw_aux_buffer_disabled[rb_index];
514 }
515
516 return !is_unresolved;
517 }
518
519 void
520 brw_update_texture_surface(struct gl_context *ctx,
521 unsigned unit,
522 uint32_t *surf_offset,
523 bool for_gather,
524 uint32_t plane)
525 {
526 struct brw_context *brw = brw_context(ctx);
527 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
528
529 if (obj->Target == GL_TEXTURE_BUFFER) {
530 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
531
532 } else {
533 struct intel_texture_object *intel_obj = intel_texture_object(obj);
534 struct intel_mipmap_tree *mt = intel_obj->mt;
535
536 if (plane > 0) {
537 if (mt->plane[plane - 1] == NULL)
538 return;
539 mt = mt->plane[plane - 1];
540 }
541
542 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
543 /* If this is a view with restricted NumLayers, then our effective depth
544 * is not just the miptree depth.
545 */
546 const unsigned view_num_layers =
547 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
548 mt->logical_depth0;
549
550 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
551 * texturing functions that return a float, as our code generation always
552 * selects the .x channel (which would always be 0).
553 */
554 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
555 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
556 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
557 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
558 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
559 brw_get_texture_swizzle(&brw->ctx, obj));
560
561 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
562 unsigned format = translate_tex_format(brw, mesa_fmt,
563 sampler->sRGBDecode);
564
565 /* Implement gen6 and gen7 gather work-around */
566 bool need_green_to_blue = false;
567 if (for_gather) {
568 if (brw->gen == 7 && (format == BRW_SURFACEFORMAT_R32G32_FLOAT ||
569 format == BRW_SURFACEFORMAT_R32G32_SINT ||
570 format == BRW_SURFACEFORMAT_R32G32_UINT)) {
571 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
572 need_green_to_blue = brw->is_haswell;
573 } else if (brw->gen == 6) {
574 /* Sandybridge's gather4 message is broken for integer formats.
575 * To work around this, we pretend the surface is UNORM for
576 * 8 or 16-bit formats, and emit shader instructions to recover
577 * the real INT/UINT value. For 32-bit formats, we pretend
578 * the surface is FLOAT, and simply reinterpret the resulting
579 * bits.
580 */
581 switch (format) {
582 case BRW_SURFACEFORMAT_R8_SINT:
583 case BRW_SURFACEFORMAT_R8_UINT:
584 format = BRW_SURFACEFORMAT_R8_UNORM;
585 break;
586
587 case BRW_SURFACEFORMAT_R16_SINT:
588 case BRW_SURFACEFORMAT_R16_UINT:
589 format = BRW_SURFACEFORMAT_R16_UNORM;
590 break;
591
592 case BRW_SURFACEFORMAT_R32_SINT:
593 case BRW_SURFACEFORMAT_R32_UINT:
594 format = BRW_SURFACEFORMAT_R32_FLOAT;
595 break;
596
597 default:
598 break;
599 }
600 }
601 }
602
603 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
604 if (brw->gen <= 7) {
605 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
606 mt = mt->r8stencil_mt;
607 } else {
608 mt = mt->stencil_mt;
609 }
610 format = BRW_SURFACEFORMAT_R8_UINT;
611 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
612 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
613 mt = mt->r8stencil_mt;
614 format = BRW_SURFACEFORMAT_R8_UINT;
615 }
616
617 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
618
619 struct isl_view view = {
620 .format = format,
621 .base_level = obj->MinLevel + obj->BaseLevel,
622 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
623 .base_array_layer = obj->MinLayer,
624 .array_len = view_num_layers,
625 .swizzle = {
626 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
627 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
628 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
629 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
630 },
631 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
632 };
633
634 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
635 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
636 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
637
638 assert(brw_texture_view_sane(brw, mt, &view));
639
640 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
641 INTEL_AUX_BUFFER_DISABLED : 0;
642 brw_emit_surface_state(brw, mt, flags, mt->target, view,
643 tex_mocs[brw->gen],
644 surf_offset, surf_index,
645 I915_GEM_DOMAIN_SAMPLER, 0);
646 }
647 }
648
649 void
650 brw_emit_buffer_surface_state(struct brw_context *brw,
651 uint32_t *out_offset,
652 drm_intel_bo *bo,
653 unsigned buffer_offset,
654 unsigned surface_format,
655 unsigned buffer_size,
656 unsigned pitch,
657 bool rw)
658 {
659 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
660 brw->isl_dev.ss.size,
661 brw->isl_dev.ss.align,
662 out_offset);
663
664 isl_buffer_fill_state(&brw->isl_dev, dw,
665 .address = (bo ? bo->offset64 : 0) + buffer_offset,
666 .size = buffer_size,
667 .format = surface_format,
668 .stride = pitch,
669 .mocs = tex_mocs[brw->gen]);
670
671 if (bo) {
672 drm_intel_bo_emit_reloc(brw->batch.bo,
673 *out_offset + brw->isl_dev.ss.addr_offset,
674 bo, buffer_offset,
675 I915_GEM_DOMAIN_SAMPLER,
676 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
677 }
678 }
679
680 void
681 brw_update_buffer_texture_surface(struct gl_context *ctx,
682 unsigned unit,
683 uint32_t *surf_offset)
684 {
685 struct brw_context *brw = brw_context(ctx);
686 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
687 struct intel_buffer_object *intel_obj =
688 intel_buffer_object(tObj->BufferObject);
689 uint32_t size = tObj->BufferSize;
690 drm_intel_bo *bo = NULL;
691 mesa_format format = tObj->_BufferObjectFormat;
692 uint32_t brw_format = brw_format_for_mesa_format(format);
693 int texel_size = _mesa_get_format_bytes(format);
694
695 if (intel_obj) {
696 size = MIN2(size, intel_obj->Base.Size);
697 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
698 }
699
700 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
701 _mesa_problem(NULL, "bad format %s for texture buffer\n",
702 _mesa_get_format_name(format));
703 }
704
705 brw_emit_buffer_surface_state(brw, surf_offset, bo,
706 tObj->BufferOffset,
707 brw_format,
708 size,
709 texel_size,
710 false /* rw */);
711 }
712
713 /**
714 * Create the constant buffer surface. Vertex/fragment shader constants will be
715 * read from this buffer with Data Port Read instructions/messages.
716 */
717 void
718 brw_create_constant_surface(struct brw_context *brw,
719 drm_intel_bo *bo,
720 uint32_t offset,
721 uint32_t size,
722 uint32_t *out_offset)
723 {
724 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
725 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
726 size, 1, false);
727 }
728
729 /**
730 * Create the buffer surface. Shader buffer variables will be
731 * read from / write to this buffer with Data Port Read/Write
732 * instructions/messages.
733 */
734 void
735 brw_create_buffer_surface(struct brw_context *brw,
736 drm_intel_bo *bo,
737 uint32_t offset,
738 uint32_t size,
739 uint32_t *out_offset)
740 {
741 /* Use a raw surface so we can reuse existing untyped read/write/atomic
742 * messages. We need these specifically for the fragment shader since they
743 * include a pixel mask header that we need to ensure correct behavior
744 * with helper invocations, which cannot write to the buffer.
745 */
746 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
747 BRW_SURFACEFORMAT_RAW,
748 size, 1, true);
749 }
750
751 /**
752 * Set up a binding table entry for use by stream output logic (transform
753 * feedback).
754 *
755 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
756 */
757 void
758 brw_update_sol_surface(struct brw_context *brw,
759 struct gl_buffer_object *buffer_obj,
760 uint32_t *out_offset, unsigned num_vector_components,
761 unsigned stride_dwords, unsigned offset_dwords)
762 {
763 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
764 uint32_t offset_bytes = 4 * offset_dwords;
765 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
766 offset_bytes,
767 buffer_obj->Size - offset_bytes);
768 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
769 out_offset);
770 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
771 size_t size_dwords = buffer_obj->Size / 4;
772 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
773
774 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
775 * too big to map using a single binding table entry?
776 */
777 assert((size_dwords - offset_dwords) / stride_dwords
778 <= BRW_MAX_NUM_BUFFER_ENTRIES);
779
780 if (size_dwords > offset_dwords + num_vector_components) {
781 /* There is room for at least 1 transform feedback output in the buffer.
782 * Compute the number of additional transform feedback outputs the
783 * buffer has room for.
784 */
785 buffer_size_minus_1 =
786 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
787 } else {
788 /* There isn't even room for a single transform feedback output in the
789 * buffer. We can't configure the binding table entry to prevent output
790 * entirely; we'll have to rely on the geometry shader to detect
791 * overflow. But to minimize the damage in case of a bug, set up the
792 * binding table entry to just allow a single output.
793 */
794 buffer_size_minus_1 = 0;
795 }
796 width = buffer_size_minus_1 & 0x7f;
797 height = (buffer_size_minus_1 & 0xfff80) >> 7;
798 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
799
800 switch (num_vector_components) {
801 case 1:
802 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
803 break;
804 case 2:
805 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
806 break;
807 case 3:
808 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
809 break;
810 case 4:
811 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
812 break;
813 default:
814 unreachable("Invalid vector size for transform feedback output");
815 }
816
817 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
818 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
819 surface_format << BRW_SURFACE_FORMAT_SHIFT |
820 BRW_SURFACE_RC_READ_WRITE;
821 surf[1] = bo->offset64 + offset_bytes; /* reloc */
822 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
823 height << BRW_SURFACE_HEIGHT_SHIFT);
824 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
825 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
826 surf[4] = 0;
827 surf[5] = 0;
828
829 /* Emit relocation to surface contents. */
830 drm_intel_bo_emit_reloc(brw->batch.bo,
831 *out_offset + 4,
832 bo, offset_bytes,
833 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
834 }
835
836 /* Creates a new WM constant buffer reflecting the current fragment program's
837 * constants, if needed by the fragment program.
838 *
839 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
840 * state atom.
841 */
842 static void
843 brw_upload_wm_pull_constants(struct brw_context *brw)
844 {
845 struct brw_stage_state *stage_state = &brw->wm.base;
846 /* BRW_NEW_FRAGMENT_PROGRAM */
847 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
848 /* BRW_NEW_FS_PROG_DATA */
849 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
850
851 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
852 /* _NEW_PROGRAM_CONSTANTS */
853 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
854 stage_state, prog_data);
855 }
856
857 const struct brw_tracked_state brw_wm_pull_constants = {
858 .dirty = {
859 .mesa = _NEW_PROGRAM_CONSTANTS,
860 .brw = BRW_NEW_BATCH |
861 BRW_NEW_BLORP |
862 BRW_NEW_FRAGMENT_PROGRAM |
863 BRW_NEW_FS_PROG_DATA,
864 },
865 .emit = brw_upload_wm_pull_constants,
866 };
867
868 /**
869 * Creates a null renderbuffer surface.
870 *
871 * This is used when the shader doesn't write to any color output. An FB
872 * write to target 0 will still be emitted, because that's how the thread is
873 * terminated (and computed depth is returned), so we need to have the
874 * hardware discard the target 0 color output..
875 */
876 static void
877 brw_emit_null_surface_state(struct brw_context *brw,
878 unsigned width,
879 unsigned height,
880 unsigned samples,
881 uint32_t *out_offset)
882 {
883 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
884 * Notes):
885 *
886 * A null surface will be used in instances where an actual surface is
887 * not bound. When a write message is generated to a null surface, no
888 * actual surface is written to. When a read message (including any
889 * sampling engine message) is generated to a null surface, the result
890 * is all zeros. Note that a null surface type is allowed to be used
891 * with all messages, even if it is not specificially indicated as
892 * supported. All of the remaining fields in surface state are ignored
893 * for null surfaces, with the following exceptions:
894 *
895 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
896 * depth buffer’s corresponding state for all render target surfaces,
897 * including null.
898 *
899 * - Surface Format must be R8G8B8A8_UNORM.
900 */
901 unsigned surface_type = BRW_SURFACE_NULL;
902 drm_intel_bo *bo = NULL;
903 unsigned pitch_minus_1 = 0;
904 uint32_t multisampling_state = 0;
905 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
906 out_offset);
907
908 if (samples > 1) {
909 /* On Gen6, null render targets seem to cause GPU hangs when
910 * multisampling. So work around this problem by rendering into dummy
911 * color buffer.
912 *
913 * To decrease the amount of memory needed by the workaround buffer, we
914 * set its pitch to 128 bytes (the width of a Y tile). This means that
915 * the amount of memory needed for the workaround buffer is
916 * (width_in_tiles + height_in_tiles - 1) tiles.
917 *
918 * Note that since the workaround buffer will be interpreted by the
919 * hardware as an interleaved multisampled buffer, we need to compute
920 * width_in_tiles and height_in_tiles by dividing the width and height
921 * by 16 rather than the normal Y-tile size of 32.
922 */
923 unsigned width_in_tiles = ALIGN(width, 16) / 16;
924 unsigned height_in_tiles = ALIGN(height, 16) / 16;
925 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
926 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
927 size_needed);
928 bo = brw->wm.multisampled_null_render_target_bo;
929 surface_type = BRW_SURFACE_2D;
930 pitch_minus_1 = 127;
931 multisampling_state = brw_get_surface_num_multisamples(samples);
932 }
933
934 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
935 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
936 if (brw->gen < 6) {
937 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
938 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
939 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
940 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
941 }
942 surf[1] = bo ? bo->offset64 : 0;
943 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
944 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
945
946 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
947 * Notes):
948 *
949 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
950 */
951 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
952 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
953 surf[4] = multisampling_state;
954 surf[5] = 0;
955
956 if (bo) {
957 drm_intel_bo_emit_reloc(brw->batch.bo,
958 *out_offset + 4,
959 bo, 0,
960 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
961 }
962 }
963
964 /**
965 * Sets up a surface state structure to point at the given region.
966 * While it is only used for the front/back buffer currently, it should be
967 * usable for further buffers when doing ARB_draw_buffer support.
968 */
969 static uint32_t
970 gen4_update_renderbuffer_surface(struct brw_context *brw,
971 struct gl_renderbuffer *rb,
972 uint32_t flags, unsigned unit,
973 uint32_t surf_index)
974 {
975 struct gl_context *ctx = &brw->ctx;
976 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
977 struct intel_mipmap_tree *mt = irb->mt;
978 uint32_t *surf;
979 uint32_t tile_x, tile_y;
980 uint32_t format = 0;
981 uint32_t offset;
982 /* _NEW_BUFFERS */
983 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
984 /* BRW_NEW_FS_PROG_DATA */
985
986 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
987 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
988
989 if (rb->TexImage && !brw->has_surface_tile_offset) {
990 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
991
992 if (tile_x != 0 || tile_y != 0) {
993 /* Original gen4 hardware couldn't draw to a non-tile-aligned
994 * destination in a miptree unless you actually setup your renderbuffer
995 * as a miptree and used the fragile lod/array_index/etc. controls to
996 * select the image. So, instead, we just make a new single-level
997 * miptree and render into that.
998 */
999 intel_renderbuffer_move_to_temp(brw, irb, false);
1000 mt = irb->mt;
1001 }
1002 }
1003
1004 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1005
1006 format = brw->render_target_format[rb_format];
1007 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1008 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1009 __func__, _mesa_get_format_name(rb_format));
1010 }
1011
1012 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1013 format << BRW_SURFACE_FORMAT_SHIFT);
1014
1015 /* reloc */
1016 assert(mt->offset % mt->cpp == 0);
1017 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1018 mt->bo->offset64 + mt->offset);
1019
1020 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1021 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1022
1023 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1024 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1025
1026 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1027
1028 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1029 /* Note that the low bits of these fields are missing, so
1030 * there's the possibility of getting in trouble.
1031 */
1032 assert(tile_x % 4 == 0);
1033 assert(tile_y % 2 == 0);
1034 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1035 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1036 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1037
1038 if (brw->gen < 6) {
1039 /* _NEW_COLOR */
1040 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1041 (ctx->Color.BlendEnabled & (1 << unit)))
1042 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1043
1044 if (!ctx->Color.ColorMask[unit][0])
1045 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1046 if (!ctx->Color.ColorMask[unit][1])
1047 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1048 if (!ctx->Color.ColorMask[unit][2])
1049 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1050
1051 /* As mentioned above, disable writes to the alpha component when the
1052 * renderbuffer is XRGB.
1053 */
1054 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1055 !ctx->Color.ColorMask[unit][3]) {
1056 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1057 }
1058 }
1059
1060 drm_intel_bo_emit_reloc(brw->batch.bo,
1061 offset + 4,
1062 mt->bo,
1063 surf[1] - mt->bo->offset64,
1064 I915_GEM_DOMAIN_RENDER,
1065 I915_GEM_DOMAIN_RENDER);
1066
1067 return offset;
1068 }
1069
1070 /**
1071 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1072 */
1073 void
1074 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1075 const struct gl_framebuffer *fb,
1076 uint32_t render_target_start,
1077 uint32_t *surf_offset)
1078 {
1079 GLuint i;
1080 const unsigned int w = _mesa_geometric_width(fb);
1081 const unsigned int h = _mesa_geometric_height(fb);
1082 const unsigned int s = _mesa_geometric_samples(fb);
1083
1084 /* Update surfaces for drawing buffers */
1085 if (fb->_NumColorDrawBuffers >= 1) {
1086 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1087 const uint32_t surf_index = render_target_start + i;
1088 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1089 INTEL_RENDERBUFFER_LAYERED : 0) |
1090 (brw->draw_aux_buffer_disabled[i] ?
1091 INTEL_AUX_BUFFER_DISABLED : 0);
1092
1093 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1094 surf_offset[surf_index] =
1095 brw->vtbl.update_renderbuffer_surface(
1096 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1097 } else {
1098 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1099 &surf_offset[surf_index]);
1100 }
1101 }
1102 } else {
1103 const uint32_t surf_index = render_target_start;
1104 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1105 &surf_offset[surf_index]);
1106 }
1107 }
1108
1109 static void
1110 update_renderbuffer_surfaces(struct brw_context *brw)
1111 {
1112 const struct gl_context *ctx = &brw->ctx;
1113
1114 /* BRW_NEW_FS_PROG_DATA */
1115 const struct brw_wm_prog_data *wm_prog_data =
1116 brw_wm_prog_data(brw->wm.base.prog_data);
1117
1118 /* _NEW_BUFFERS | _NEW_COLOR */
1119 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1120 brw_update_renderbuffer_surfaces(
1121 brw, fb,
1122 wm_prog_data->binding_table.render_target_start,
1123 brw->wm.base.surf_offset);
1124 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1125 }
1126
1127 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1128 .dirty = {
1129 .mesa = _NEW_BUFFERS |
1130 _NEW_COLOR,
1131 .brw = BRW_NEW_BATCH |
1132 BRW_NEW_BLORP |
1133 BRW_NEW_FS_PROG_DATA,
1134 },
1135 .emit = update_renderbuffer_surfaces,
1136 };
1137
1138 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1139 .dirty = {
1140 .mesa = _NEW_BUFFERS,
1141 .brw = BRW_NEW_BATCH |
1142 BRW_NEW_BLORP,
1143 },
1144 .emit = update_renderbuffer_surfaces,
1145 };
1146
1147 static void
1148 update_renderbuffer_read_surfaces(struct brw_context *brw)
1149 {
1150 const struct gl_context *ctx = &brw->ctx;
1151
1152 /* BRW_NEW_FS_PROG_DATA */
1153 const struct brw_wm_prog_data *wm_prog_data =
1154 brw_wm_prog_data(brw->wm.base.prog_data);
1155
1156 /* BRW_NEW_FRAGMENT_PROGRAM */
1157 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1158 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1159 /* _NEW_BUFFERS */
1160 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1161
1162 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1163 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1164 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1165 const unsigned surf_index =
1166 wm_prog_data->binding_table.render_target_read_start + i;
1167 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1168
1169 if (irb) {
1170 const unsigned format = brw->render_target_format[
1171 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1172 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1173 format));
1174
1175 /* Override the target of the texture if the render buffer is a
1176 * single slice of a 3D texture (since the minimum array element
1177 * field of the surface state structure is ignored by the sampler
1178 * unit for 3D textures on some hardware), or if the render buffer
1179 * is a 1D array (since shaders always provide the array index
1180 * coordinate at the Z component to avoid state-dependent
1181 * recompiles when changing the texture target of the
1182 * framebuffer).
1183 */
1184 const GLenum target =
1185 (irb->mt->target == GL_TEXTURE_3D &&
1186 irb->layer_count == 1) ? GL_TEXTURE_2D :
1187 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1188 irb->mt->target;
1189
1190 /* intel_renderbuffer::mt_layer is expressed in sample units for
1191 * the UMS and CMS multisample layouts, but
1192 * intel_renderbuffer::layer_count is expressed in units of whole
1193 * logical layers regardless of the multisample layout.
1194 */
1195 const unsigned mt_layer_unit =
1196 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1197 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1198 MAX2(irb->mt->num_samples, 1) : 1;
1199
1200 const struct isl_view view = {
1201 .format = format,
1202 .base_level = irb->mt_level - irb->mt->first_level,
1203 .levels = 1,
1204 .base_array_layer = irb->mt_layer / mt_layer_unit,
1205 .array_len = irb->layer_count,
1206 .swizzle = ISL_SWIZZLE_IDENTITY,
1207 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1208 };
1209
1210 const int flags = brw->draw_aux_buffer_disabled[i] ?
1211 INTEL_AUX_BUFFER_DISABLED : 0;
1212 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1213 tex_mocs[brw->gen],
1214 surf_offset, surf_index,
1215 I915_GEM_DOMAIN_SAMPLER, 0);
1216
1217 } else {
1218 brw->vtbl.emit_null_surface_state(
1219 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1220 _mesa_geometric_samples(fb), surf_offset);
1221 }
1222 }
1223
1224 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1225 }
1226 }
1227
1228 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1229 .dirty = {
1230 .mesa = _NEW_BUFFERS,
1231 .brw = BRW_NEW_BATCH |
1232 BRW_NEW_FRAGMENT_PROGRAM |
1233 BRW_NEW_FS_PROG_DATA,
1234 },
1235 .emit = update_renderbuffer_read_surfaces,
1236 };
1237
1238 static void
1239 update_stage_texture_surfaces(struct brw_context *brw,
1240 const struct gl_program *prog,
1241 struct brw_stage_state *stage_state,
1242 bool for_gather, uint32_t plane)
1243 {
1244 if (!prog)
1245 return;
1246
1247 struct gl_context *ctx = &brw->ctx;
1248
1249 uint32_t *surf_offset = stage_state->surf_offset;
1250
1251 /* BRW_NEW_*_PROG_DATA */
1252 if (for_gather)
1253 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1254 else
1255 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1256
1257 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1258 for (unsigned s = 0; s < num_samplers; s++) {
1259 surf_offset[s] = 0;
1260
1261 if (prog->SamplersUsed & (1 << s)) {
1262 const unsigned unit = prog->SamplerUnits[s];
1263
1264 /* _NEW_TEXTURE */
1265 if (ctx->Texture.Unit[unit]._Current) {
1266 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1267 }
1268 }
1269 }
1270 }
1271
1272
1273 /**
1274 * Construct SURFACE_STATE objects for enabled textures.
1275 */
1276 static void
1277 brw_update_texture_surfaces(struct brw_context *brw)
1278 {
1279 /* BRW_NEW_VERTEX_PROGRAM */
1280 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1281
1282 /* BRW_NEW_TESS_PROGRAMS */
1283 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1284 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1285
1286 /* BRW_NEW_GEOMETRY_PROGRAM */
1287 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1288
1289 /* BRW_NEW_FRAGMENT_PROGRAM */
1290 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1291
1292 /* _NEW_TEXTURE */
1293 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1294 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1295 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1296 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1297 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1298
1299 /* emit alternate set of surface state for gather. this
1300 * allows the surface format to be overriden for only the
1301 * gather4 messages. */
1302 if (brw->gen < 8) {
1303 if (vs && vs->nir->info->uses_texture_gather)
1304 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1305 if (tcs && tcs->nir->info->uses_texture_gather)
1306 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1307 if (tes && tes->nir->info->uses_texture_gather)
1308 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1309 if (gs && gs->nir->info->uses_texture_gather)
1310 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1311 if (fs && fs->nir->info->uses_texture_gather)
1312 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1313 }
1314
1315 if (fs) {
1316 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1317 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1318 }
1319
1320 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1321 }
1322
1323 const struct brw_tracked_state brw_texture_surfaces = {
1324 .dirty = {
1325 .mesa = _NEW_TEXTURE,
1326 .brw = BRW_NEW_BATCH |
1327 BRW_NEW_BLORP |
1328 BRW_NEW_FRAGMENT_PROGRAM |
1329 BRW_NEW_FS_PROG_DATA |
1330 BRW_NEW_GEOMETRY_PROGRAM |
1331 BRW_NEW_GS_PROG_DATA |
1332 BRW_NEW_TESS_PROGRAMS |
1333 BRW_NEW_TCS_PROG_DATA |
1334 BRW_NEW_TES_PROG_DATA |
1335 BRW_NEW_TEXTURE_BUFFER |
1336 BRW_NEW_VERTEX_PROGRAM |
1337 BRW_NEW_VS_PROG_DATA,
1338 },
1339 .emit = brw_update_texture_surfaces,
1340 };
1341
1342 static void
1343 brw_update_cs_texture_surfaces(struct brw_context *brw)
1344 {
1345 /* BRW_NEW_COMPUTE_PROGRAM */
1346 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1347
1348 /* _NEW_TEXTURE */
1349 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1350
1351 /* emit alternate set of surface state for gather. this
1352 * allows the surface format to be overriden for only the
1353 * gather4 messages.
1354 */
1355 if (brw->gen < 8) {
1356 if (cs && cs->nir->info->uses_texture_gather)
1357 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1358 }
1359
1360 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1361 }
1362
1363 const struct brw_tracked_state brw_cs_texture_surfaces = {
1364 .dirty = {
1365 .mesa = _NEW_TEXTURE,
1366 .brw = BRW_NEW_BATCH |
1367 BRW_NEW_BLORP |
1368 BRW_NEW_COMPUTE_PROGRAM,
1369 },
1370 .emit = brw_update_cs_texture_surfaces,
1371 };
1372
1373
1374 void
1375 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1376 struct brw_stage_state *stage_state,
1377 struct brw_stage_prog_data *prog_data)
1378 {
1379 struct gl_context *ctx = &brw->ctx;
1380
1381 if (!prog)
1382 return;
1383
1384 uint32_t *ubo_surf_offsets =
1385 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1386
1387 for (int i = 0; i < prog->info.num_ubos; i++) {
1388 struct gl_uniform_buffer_binding *binding =
1389 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1390
1391 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1392 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1393 } else {
1394 struct intel_buffer_object *intel_bo =
1395 intel_buffer_object(binding->BufferObject);
1396 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1397 if (!binding->AutomaticSize)
1398 size = MIN2(size, binding->Size);
1399 drm_intel_bo *bo =
1400 intel_bufferobj_buffer(brw, intel_bo,
1401 binding->Offset,
1402 size);
1403 brw_create_constant_surface(brw, bo, binding->Offset,
1404 size,
1405 &ubo_surf_offsets[i]);
1406 }
1407 }
1408
1409 uint32_t *ssbo_surf_offsets =
1410 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1411
1412 for (int i = 0; i < prog->info.num_ssbos; i++) {
1413 struct gl_shader_storage_buffer_binding *binding =
1414 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1415
1416 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1417 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1418 } else {
1419 struct intel_buffer_object *intel_bo =
1420 intel_buffer_object(binding->BufferObject);
1421 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1422 if (!binding->AutomaticSize)
1423 size = MIN2(size, binding->Size);
1424 drm_intel_bo *bo =
1425 intel_bufferobj_buffer(brw, intel_bo,
1426 binding->Offset,
1427 size);
1428 brw_create_buffer_surface(brw, bo, binding->Offset,
1429 size,
1430 &ssbo_surf_offsets[i]);
1431 }
1432 }
1433
1434 if (prog->info.num_ubos || prog->info.num_ssbos)
1435 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1436 }
1437
1438 static void
1439 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1440 {
1441 struct gl_context *ctx = &brw->ctx;
1442 /* _NEW_PROGRAM */
1443 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1444
1445 /* BRW_NEW_FS_PROG_DATA */
1446 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1447 }
1448
1449 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1450 .dirty = {
1451 .mesa = _NEW_PROGRAM,
1452 .brw = BRW_NEW_BATCH |
1453 BRW_NEW_BLORP |
1454 BRW_NEW_FS_PROG_DATA |
1455 BRW_NEW_UNIFORM_BUFFER,
1456 },
1457 .emit = brw_upload_wm_ubo_surfaces,
1458 };
1459
1460 static void
1461 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1462 {
1463 struct gl_context *ctx = &brw->ctx;
1464 /* _NEW_PROGRAM */
1465 struct gl_shader_program *prog =
1466 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1467
1468 if (!prog || !prog->_LinkedShaders[MESA_SHADER_COMPUTE])
1469 return;
1470
1471 /* BRW_NEW_CS_PROG_DATA */
1472 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE]->Program,
1473 &brw->cs.base, brw->cs.base.prog_data);
1474 }
1475
1476 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1477 .dirty = {
1478 .mesa = _NEW_PROGRAM,
1479 .brw = BRW_NEW_BATCH |
1480 BRW_NEW_BLORP |
1481 BRW_NEW_CS_PROG_DATA |
1482 BRW_NEW_UNIFORM_BUFFER,
1483 },
1484 .emit = brw_upload_cs_ubo_surfaces,
1485 };
1486
1487 void
1488 brw_upload_abo_surfaces(struct brw_context *brw,
1489 const struct gl_program *prog,
1490 struct brw_stage_state *stage_state,
1491 struct brw_stage_prog_data *prog_data)
1492 {
1493 struct gl_context *ctx = &brw->ctx;
1494 uint32_t *surf_offsets =
1495 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1496
1497 if (prog->info.num_abos) {
1498 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1499 struct gl_atomic_buffer_binding *binding =
1500 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1501 struct intel_buffer_object *intel_bo =
1502 intel_buffer_object(binding->BufferObject);
1503 drm_intel_bo *bo = intel_bufferobj_buffer(
1504 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1505
1506 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1507 binding->Offset, BRW_SURFACEFORMAT_RAW,
1508 bo->size - binding->Offset, 1, true);
1509 }
1510
1511 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1512 }
1513 }
1514
1515 static void
1516 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1517 {
1518 /* _NEW_PROGRAM */
1519 const struct gl_program *wm = brw->fragment_program;
1520
1521 if (wm) {
1522 /* BRW_NEW_FS_PROG_DATA */
1523 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1524 }
1525 }
1526
1527 const struct brw_tracked_state brw_wm_abo_surfaces = {
1528 .dirty = {
1529 .mesa = _NEW_PROGRAM,
1530 .brw = BRW_NEW_ATOMIC_BUFFER |
1531 BRW_NEW_BLORP |
1532 BRW_NEW_BATCH |
1533 BRW_NEW_FS_PROG_DATA,
1534 },
1535 .emit = brw_upload_wm_abo_surfaces,
1536 };
1537
1538 static void
1539 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1540 {
1541 /* _NEW_PROGRAM */
1542 const struct gl_program *cp = brw->compute_program;
1543
1544 if (cp) {
1545 /* BRW_NEW_CS_PROG_DATA */
1546 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1547 }
1548 }
1549
1550 const struct brw_tracked_state brw_cs_abo_surfaces = {
1551 .dirty = {
1552 .mesa = _NEW_PROGRAM,
1553 .brw = BRW_NEW_ATOMIC_BUFFER |
1554 BRW_NEW_BLORP |
1555 BRW_NEW_BATCH |
1556 BRW_NEW_CS_PROG_DATA,
1557 },
1558 .emit = brw_upload_cs_abo_surfaces,
1559 };
1560
1561 static void
1562 brw_upload_cs_image_surfaces(struct brw_context *brw)
1563 {
1564 /* _NEW_PROGRAM */
1565 const struct gl_program *cp = brw->compute_program;
1566
1567 if (cp) {
1568 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1569 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1570 brw->cs.base.prog_data);
1571 }
1572 }
1573
1574 const struct brw_tracked_state brw_cs_image_surfaces = {
1575 .dirty = {
1576 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1577 .brw = BRW_NEW_BATCH |
1578 BRW_NEW_BLORP |
1579 BRW_NEW_CS_PROG_DATA |
1580 BRW_NEW_IMAGE_UNITS
1581 },
1582 .emit = brw_upload_cs_image_surfaces,
1583 };
1584
1585 static uint32_t
1586 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1587 {
1588 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1589 uint32_t hw_format = brw_format_for_mesa_format(format);
1590 if (access == GL_WRITE_ONLY) {
1591 return hw_format;
1592 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1593 /* Typed surface reads support a very limited subset of the shader
1594 * image formats. Translate it into the closest format the
1595 * hardware supports.
1596 */
1597 return isl_lower_storage_image_format(devinfo, hw_format);
1598 } else {
1599 /* The hardware doesn't actually support a typed format that we can use
1600 * so we have to fall back to untyped read/write messages.
1601 */
1602 return BRW_SURFACEFORMAT_RAW;
1603 }
1604 }
1605
1606 static void
1607 update_default_image_param(struct brw_context *brw,
1608 struct gl_image_unit *u,
1609 unsigned surface_idx,
1610 struct brw_image_param *param)
1611 {
1612 memset(param, 0, sizeof(*param));
1613 param->surface_idx = surface_idx;
1614 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1615 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1616 * detailed explanation of these parameters.
1617 */
1618 param->swizzling[0] = 0xff;
1619 param->swizzling[1] = 0xff;
1620 }
1621
1622 static void
1623 update_buffer_image_param(struct brw_context *brw,
1624 struct gl_image_unit *u,
1625 unsigned surface_idx,
1626 struct brw_image_param *param)
1627 {
1628 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1629 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1630 update_default_image_param(brw, u, surface_idx, param);
1631
1632 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1633 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1634 }
1635
1636 static void
1637 update_texture_image_param(struct brw_context *brw,
1638 struct gl_image_unit *u,
1639 unsigned surface_idx,
1640 struct brw_image_param *param)
1641 {
1642 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1643
1644 update_default_image_param(brw, u, surface_idx, param);
1645
1646 param->size[0] = minify(mt->logical_width0, u->Level);
1647 param->size[1] = minify(mt->logical_height0, u->Level);
1648 param->size[2] = (!u->Layered ? 1 :
1649 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1650 u->TexObj->Target == GL_TEXTURE_3D ?
1651 minify(mt->logical_depth0, u->Level) :
1652 mt->logical_depth0);
1653
1654 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1655 &param->offset[0],
1656 &param->offset[1]);
1657
1658 param->stride[0] = mt->cpp;
1659 param->stride[1] = mt->pitch / mt->cpp;
1660 param->stride[2] =
1661 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1662 param->stride[3] =
1663 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1664
1665 if (mt->tiling == I915_TILING_X) {
1666 /* An X tile is a rectangular block of 512x8 bytes. */
1667 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1668 param->tiling[1] = _mesa_logbase2(8);
1669
1670 if (brw->has_swizzling) {
1671 /* Right shifts required to swizzle bits 9 and 10 of the memory
1672 * address with bit 6.
1673 */
1674 param->swizzling[0] = 3;
1675 param->swizzling[1] = 4;
1676 }
1677 } else if (mt->tiling == I915_TILING_Y) {
1678 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1679 * different to the layout of an X-tiled surface, we simply pretend that
1680 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1681 * one arranged in X-major order just like is the case for X-tiling.
1682 */
1683 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1684 param->tiling[1] = _mesa_logbase2(32);
1685
1686 if (brw->has_swizzling) {
1687 /* Right shift required to swizzle bit 9 of the memory address with
1688 * bit 6.
1689 */
1690 param->swizzling[0] = 3;
1691 }
1692 }
1693
1694 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1695 * address calculation algorithm (emit_address_calculation() in
1696 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1697 * modulus equal to the LOD.
1698 */
1699 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1700 0);
1701 }
1702
1703 static void
1704 update_image_surface(struct brw_context *brw,
1705 struct gl_image_unit *u,
1706 GLenum access,
1707 unsigned surface_idx,
1708 uint32_t *surf_offset,
1709 struct brw_image_param *param)
1710 {
1711 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1712 struct gl_texture_object *obj = u->TexObj;
1713 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1714
1715 if (obj->Target == GL_TEXTURE_BUFFER) {
1716 struct intel_buffer_object *intel_obj =
1717 intel_buffer_object(obj->BufferObject);
1718 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1719 _mesa_get_format_bytes(u->_ActualFormat));
1720
1721 brw_emit_buffer_surface_state(
1722 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1723 format, intel_obj->Base.Size, texel_size,
1724 access != GL_READ_ONLY);
1725
1726 update_buffer_image_param(brw, u, surface_idx, param);
1727
1728 } else {
1729 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1730 struct intel_mipmap_tree *mt = intel_obj->mt;
1731
1732 if (format == BRW_SURFACEFORMAT_RAW) {
1733 brw_emit_buffer_surface_state(
1734 brw, surf_offset, mt->bo, mt->offset,
1735 format, mt->bo->size - mt->offset, 1 /* pitch */,
1736 access != GL_READ_ONLY);
1737
1738 } else {
1739 const unsigned num_layers = (!u->Layered ? 1 :
1740 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1741 mt->logical_depth0);
1742
1743 struct isl_view view = {
1744 .format = format,
1745 .base_level = obj->MinLevel + u->Level,
1746 .levels = 1,
1747 .base_array_layer = obj->MinLayer + u->_Layer,
1748 .array_len = num_layers,
1749 .swizzle = ISL_SWIZZLE_IDENTITY,
1750 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1751 };
1752
1753 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1754 const bool unresolved = intel_miptree_has_color_unresolved(
1755 mt, view.base_level, view.levels,
1756 view.base_array_layer, view.array_len);
1757 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1758 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1759 tex_mocs[brw->gen],
1760 surf_offset, surf_index,
1761 I915_GEM_DOMAIN_SAMPLER,
1762 access == GL_READ_ONLY ? 0 :
1763 I915_GEM_DOMAIN_SAMPLER);
1764 }
1765
1766 update_texture_image_param(brw, u, surface_idx, param);
1767 }
1768
1769 } else {
1770 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1771 update_default_image_param(brw, u, surface_idx, param);
1772 }
1773 }
1774
1775 void
1776 brw_upload_image_surfaces(struct brw_context *brw,
1777 const struct gl_program *prog,
1778 struct brw_stage_state *stage_state,
1779 struct brw_stage_prog_data *prog_data)
1780 {
1781 assert(prog);
1782 struct gl_context *ctx = &brw->ctx;
1783
1784 if (prog->info.num_images) {
1785 for (unsigned i = 0; i < prog->info.num_images; i++) {
1786 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1787 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1788
1789 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1790 surf_idx,
1791 &stage_state->surf_offset[surf_idx],
1792 &prog_data->image_param[i]);
1793 }
1794
1795 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1796 /* This may have changed the image metadata dependent on the context
1797 * image unit state and passed to the program as uniforms, make sure
1798 * that push and pull constants are reuploaded.
1799 */
1800 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1801 }
1802 }
1803
1804 static void
1805 brw_upload_wm_image_surfaces(struct brw_context *brw)
1806 {
1807 /* BRW_NEW_FRAGMENT_PROGRAM */
1808 const struct gl_program *wm = brw->fragment_program;
1809
1810 if (wm) {
1811 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1812 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1813 brw->wm.base.prog_data);
1814 }
1815 }
1816
1817 const struct brw_tracked_state brw_wm_image_surfaces = {
1818 .dirty = {
1819 .mesa = _NEW_TEXTURE,
1820 .brw = BRW_NEW_BATCH |
1821 BRW_NEW_BLORP |
1822 BRW_NEW_FRAGMENT_PROGRAM |
1823 BRW_NEW_FS_PROG_DATA |
1824 BRW_NEW_IMAGE_UNITS
1825 },
1826 .emit = brw_upload_wm_image_surfaces,
1827 };
1828
1829 void
1830 gen4_init_vtable_surface_functions(struct brw_context *brw)
1831 {
1832 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1833 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1834 }
1835
1836 void
1837 gen6_init_vtable_surface_functions(struct brw_context *brw)
1838 {
1839 gen4_init_vtable_surface_functions(brw);
1840 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1841 }
1842
1843 static void
1844 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1845 {
1846 struct gl_context *ctx = &brw->ctx;
1847 /* _NEW_PROGRAM */
1848 struct gl_shader_program *prog =
1849 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1850 /* BRW_NEW_CS_PROG_DATA */
1851 const struct brw_cs_prog_data *cs_prog_data =
1852 brw_cs_prog_data(brw->cs.base.prog_data);
1853
1854 if (prog && cs_prog_data->uses_num_work_groups) {
1855 const unsigned surf_idx =
1856 cs_prog_data->binding_table.work_groups_start;
1857 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1858 drm_intel_bo *bo;
1859 uint32_t bo_offset;
1860
1861 if (brw->compute.num_work_groups_bo == NULL) {
1862 bo = NULL;
1863 intel_upload_data(brw,
1864 (void *)brw->compute.num_work_groups,
1865 3 * sizeof(GLuint),
1866 sizeof(GLuint),
1867 &bo,
1868 &bo_offset);
1869 } else {
1870 bo = brw->compute.num_work_groups_bo;
1871 bo_offset = brw->compute.num_work_groups_offset;
1872 }
1873
1874 brw_emit_buffer_surface_state(brw, surf_offset,
1875 bo, bo_offset,
1876 BRW_SURFACEFORMAT_RAW,
1877 3 * sizeof(GLuint), 1, true);
1878 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1879 }
1880 }
1881
1882 const struct brw_tracked_state brw_cs_work_groups_surface = {
1883 .dirty = {
1884 .brw = BRW_NEW_BLORP |
1885 BRW_NEW_CS_PROG_DATA |
1886 BRW_NEW_CS_WORK_GROUPS
1887 },
1888 .emit = brw_upload_cs_work_groups_surface,
1889 };