mesa: use gl_program for CurrentProgram rather than gl_shader_program
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 enum {
59 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61 };
62
63 uint32_t tex_mocs[] = {
64 [7] = GEN7_MOCS_L3,
65 [8] = BDW_MOCS_WB,
66 [9] = SKL_MOCS_WB,
67 };
68
69 uint32_t rb_mocs[] = {
70 [7] = GEN7_MOCS_L3,
71 [8] = BDW_MOCS_PTE,
72 [9] = SKL_MOCS_PTE,
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt, uint32_t flags,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 uint32_t tile_x = mt->level[0].slice[0].x_offset;
83 uint32_t tile_y = mt->level[0].slice[0].y_offset;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106 assert(tile_x == 0 && tile_y == 0);
107
108 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
109 view.base_array_layer,
110 &tile_x, &tile_y);
111
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l = view.base_level - mt->first_level;
114 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
115 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
116 minify(surf.logical_level0_px.height, l);
117 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
118 minify(surf.logical_level0_px.depth, l);
119
120 /* Only the base level and layer can be addressed with the overridden
121 * layout.
122 */
123 surf.logical_level0_px.array_len = 1;
124 surf.levels = 1;
125 surf.dim_layout = dim_layout;
126
127 /* The requested slice of the texture is now at the base level and
128 * layer.
129 */
130 view.base_level = 0;
131 view.base_array_layer = 0;
132 }
133
134 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135
136 drm_intel_bo *aux_bo;
137 struct isl_surf *aux_surf = NULL, aux_surf_s;
138 uint64_t aux_offset = 0;
139 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140 if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141 !(flags & INTEL_AUX_BUFFER_DISABLED)) {
142 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
143 aux_surf = &aux_surf_s;
144
145 if (mt->mcs_buf) {
146 aux_bo = mt->mcs_buf->bo;
147 aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
148 } else {
149 aux_bo = mt->hiz_buf->aux_base.bo;
150 aux_offset = mt->hiz_buf->aux_base.bo->offset64;
151 }
152
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
155 */
156 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
157 }
158
159 void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
160 brw->isl_dev.ss.size,
161 brw->isl_dev.ss.align,
162 surf_index, surf_offset);
163
164 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
165 .address = mt->bo->offset64 + offset,
166 .aux_surf = aux_surf, .aux_usage = aux_usage,
167 .aux_address = aux_offset,
168 .mocs = mocs, .clear_color = clear_color,
169 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
170
171 drm_intel_bo_emit_reloc(brw->batch.bo,
172 *surf_offset + brw->isl_dev.ss.addr_offset,
173 mt->bo, offset,
174 read_domains, write_domains);
175
176 if (aux_surf) {
177 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
178 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
179 * contain other control information. Since buffer addresses are always
180 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
181 * an ordinary reloc to do the necessary address translation.
182 */
183 assert((aux_offset & 0xfff) == 0);
184 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
185 drm_intel_bo_emit_reloc(brw->batch.bo,
186 *surf_offset + brw->isl_dev.ss.aux_addr_offset,
187 aux_bo, *aux_addr - aux_bo->offset64,
188 read_domains, write_domains);
189 }
190 }
191
192 uint32_t
193 brw_update_renderbuffer_surface(struct brw_context *brw,
194 struct gl_renderbuffer *rb,
195 uint32_t flags, unsigned unit /* unused */,
196 uint32_t surf_index)
197 {
198 struct gl_context *ctx = &brw->ctx;
199 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
200 struct intel_mipmap_tree *mt = irb->mt;
201
202 if (brw->gen < 9) {
203 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
204 }
205
206 assert(brw_render_target_supported(brw, rb));
207
208 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
209 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
210 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
211 __func__, _mesa_get_format_name(rb_format));
212 }
213
214 const unsigned layer_multiplier =
215 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
216 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
217 MAX2(irb->mt->num_samples, 1) : 1;
218
219 struct isl_view view = {
220 .format = brw->render_target_format[rb_format],
221 .base_level = irb->mt_level - irb->mt->first_level,
222 .levels = 1,
223 .base_array_layer = irb->mt_layer / layer_multiplier,
224 .array_len = MAX2(irb->layer_count, 1),
225 .swizzle = ISL_SWIZZLE_IDENTITY,
226 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
227 };
228
229 uint32_t offset;
230 brw_emit_surface_state(brw, mt, flags, mt->target, view,
231 rb_mocs[brw->gen],
232 &offset, surf_index,
233 I915_GEM_DOMAIN_RENDER,
234 I915_GEM_DOMAIN_RENDER);
235 return offset;
236 }
237
238 GLuint
239 translate_tex_target(GLenum target)
240 {
241 switch (target) {
242 case GL_TEXTURE_1D:
243 case GL_TEXTURE_1D_ARRAY_EXT:
244 return BRW_SURFACE_1D;
245
246 case GL_TEXTURE_RECTANGLE_NV:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_2D:
250 case GL_TEXTURE_2D_ARRAY_EXT:
251 case GL_TEXTURE_EXTERNAL_OES:
252 case GL_TEXTURE_2D_MULTISAMPLE:
253 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
254 return BRW_SURFACE_2D;
255
256 case GL_TEXTURE_3D:
257 return BRW_SURFACE_3D;
258
259 case GL_TEXTURE_CUBE_MAP:
260 case GL_TEXTURE_CUBE_MAP_ARRAY:
261 return BRW_SURFACE_CUBE;
262
263 default:
264 unreachable("not reached");
265 }
266 }
267
268 uint32_t
269 brw_get_surface_tiling_bits(uint32_t tiling)
270 {
271 switch (tiling) {
272 case I915_TILING_X:
273 return BRW_SURFACE_TILED;
274 case I915_TILING_Y:
275 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
276 default:
277 return 0;
278 }
279 }
280
281
282 uint32_t
283 brw_get_surface_num_multisamples(unsigned num_samples)
284 {
285 if (num_samples > 1)
286 return BRW_SURFACE_MULTISAMPLECOUNT_4;
287 else
288 return BRW_SURFACE_MULTISAMPLECOUNT_1;
289 }
290
291 /**
292 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
293 * swizzling.
294 */
295 int
296 brw_get_texture_swizzle(const struct gl_context *ctx,
297 const struct gl_texture_object *t)
298 {
299 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
300
301 int swizzles[SWIZZLE_NIL + 1] = {
302 SWIZZLE_X,
303 SWIZZLE_Y,
304 SWIZZLE_Z,
305 SWIZZLE_W,
306 SWIZZLE_ZERO,
307 SWIZZLE_ONE,
308 SWIZZLE_NIL
309 };
310
311 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
312 img->_BaseFormat == GL_DEPTH_STENCIL) {
313 GLenum depth_mode = t->DepthMode;
314
315 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316 * with depth component data specified with a sized internal format.
317 * Otherwise, it's left at the old default, GL_LUMINANCE.
318 */
319 if (_mesa_is_gles3(ctx) &&
320 img->InternalFormat != GL_DEPTH_COMPONENT &&
321 img->InternalFormat != GL_DEPTH_STENCIL) {
322 depth_mode = GL_RED;
323 }
324
325 switch (depth_mode) {
326 case GL_ALPHA:
327 swizzles[0] = SWIZZLE_ZERO;
328 swizzles[1] = SWIZZLE_ZERO;
329 swizzles[2] = SWIZZLE_ZERO;
330 swizzles[3] = SWIZZLE_X;
331 break;
332 case GL_LUMINANCE:
333 swizzles[0] = SWIZZLE_X;
334 swizzles[1] = SWIZZLE_X;
335 swizzles[2] = SWIZZLE_X;
336 swizzles[3] = SWIZZLE_ONE;
337 break;
338 case GL_INTENSITY:
339 swizzles[0] = SWIZZLE_X;
340 swizzles[1] = SWIZZLE_X;
341 swizzles[2] = SWIZZLE_X;
342 swizzles[3] = SWIZZLE_X;
343 break;
344 case GL_RED:
345 swizzles[0] = SWIZZLE_X;
346 swizzles[1] = SWIZZLE_ZERO;
347 swizzles[2] = SWIZZLE_ZERO;
348 swizzles[3] = SWIZZLE_ONE;
349 break;
350 }
351 }
352
353 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
354
355 /* If the texture's format is alpha-only, force R, G, and B to
356 * 0.0. Similarly, if the texture's format has no alpha channel,
357 * force the alpha value read to 1.0. This allows for the
358 * implementation to use an RGBA texture for any of these formats
359 * without leaking any unexpected values.
360 */
361 switch (img->_BaseFormat) {
362 case GL_ALPHA:
363 swizzles[0] = SWIZZLE_ZERO;
364 swizzles[1] = SWIZZLE_ZERO;
365 swizzles[2] = SWIZZLE_ZERO;
366 break;
367 case GL_LUMINANCE:
368 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
369 swizzles[0] = SWIZZLE_X;
370 swizzles[1] = SWIZZLE_X;
371 swizzles[2] = SWIZZLE_X;
372 swizzles[3] = SWIZZLE_ONE;
373 }
374 break;
375 case GL_LUMINANCE_ALPHA:
376 if (datatype == GL_SIGNED_NORMALIZED) {
377 swizzles[0] = SWIZZLE_X;
378 swizzles[1] = SWIZZLE_X;
379 swizzles[2] = SWIZZLE_X;
380 swizzles[3] = SWIZZLE_W;
381 }
382 break;
383 case GL_INTENSITY:
384 if (datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_X;
389 }
390 break;
391 case GL_RED:
392 case GL_RG:
393 case GL_RGB:
394 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
395 swizzles[3] = SWIZZLE_ONE;
396 break;
397 }
398
399 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400 swizzles[GET_SWZ(t->_Swizzle, 1)],
401 swizzles[GET_SWZ(t->_Swizzle, 2)],
402 swizzles[GET_SWZ(t->_Swizzle, 3)]);
403 }
404
405 /**
406 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
408 *
409 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * 0 1 2 3 4 5
411 * 4 5 6 7 0 1
412 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
413 *
414 * which is simply adding 4 then modding by 8 (or anding with 7).
415 *
416 * We then may need to apply workarounds for textureGather hardware bugs.
417 */
418 static unsigned
419 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
420 {
421 unsigned scs = (swizzle + 4) & 7;
422
423 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
424 }
425
426 static unsigned
427 brw_find_matching_rb(const struct gl_framebuffer *fb,
428 const struct intel_mipmap_tree *mt)
429 {
430 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431 const struct intel_renderbuffer *irb =
432 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433
434 if (irb && irb->mt == mt)
435 return i;
436 }
437
438 return fb->_NumColorDrawBuffers;
439 }
440
441 static inline bool
442 brw_texture_view_sane(const struct brw_context *brw,
443 const struct intel_mipmap_tree *mt,
444 const struct isl_view *view)
445 {
446 /* There are special cases only for lossless compression. */
447 if (!intel_miptree_is_lossless_compressed(brw, mt))
448 return true;
449
450 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
451 view->format))
452 return true;
453
454 /* Logic elsewhere needs to take care to resolve the color buffer prior
455 * to sampling it as non-compressed.
456 */
457 if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
458 view->base_array_layer,
459 view->array_len))
460 return false;
461
462 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
463 const unsigned rb_index = brw_find_matching_rb(fb, mt);
464
465 if (rb_index == fb->_NumColorDrawBuffers)
466 return true;
467
468 /* Underlying surface is compressed but it is sampled using a format that
469 * the sampling engine doesn't support as compressed. Compression must be
470 * disabled for both sampling engine and data port in case the same surface
471 * is used also as render target.
472 */
473 return brw->draw_aux_buffer_disabled[rb_index];
474 }
475
476 static bool
477 brw_disable_aux_surface(const struct brw_context *brw,
478 const struct intel_mipmap_tree *mt,
479 const struct isl_view *view)
480 {
481 /* Nothing to disable. */
482 if (!mt->mcs_buf)
483 return false;
484
485 const bool is_unresolved = intel_miptree_has_color_unresolved(
486 mt, view->base_level, view->levels,
487 view->base_array_layer, view->array_len);
488
489 /* There are special cases only for lossless compression. */
490 if (!intel_miptree_is_lossless_compressed(brw, mt))
491 return !is_unresolved;
492
493 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
494 const unsigned rb_index = brw_find_matching_rb(fb, mt);
495
496 /* If we are drawing into this with compression enabled, then we must also
497 * enable compression when texturing from it regardless of
498 * fast_clear_state. If we don't then, after the first draw call with
499 * this setup, there will be data in the CCS which won't get picked up by
500 * subsequent texturing operations as required by ARB_texture_barrier.
501 * Since we don't want to re-emit the binding table or do a resolve
502 * operation every draw call, the easiest thing to do is just enable
503 * compression on the texturing side. This is completely safe to do
504 * since, if compressed texturing weren't allowed, we would have disabled
505 * compression of render targets in whatever_that_function_is_called().
506 */
507 if (rb_index < fb->_NumColorDrawBuffers) {
508 if (brw->draw_aux_buffer_disabled[rb_index]) {
509 assert(!is_unresolved);
510 }
511
512 return brw->draw_aux_buffer_disabled[rb_index];
513 }
514
515 return !is_unresolved;
516 }
517
518 void
519 brw_update_texture_surface(struct gl_context *ctx,
520 unsigned unit,
521 uint32_t *surf_offset,
522 bool for_gather,
523 uint32_t plane)
524 {
525 struct brw_context *brw = brw_context(ctx);
526 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
527
528 if (obj->Target == GL_TEXTURE_BUFFER) {
529 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
530
531 } else {
532 struct intel_texture_object *intel_obj = intel_texture_object(obj);
533 struct intel_mipmap_tree *mt = intel_obj->mt;
534
535 if (plane > 0) {
536 if (mt->plane[plane - 1] == NULL)
537 return;
538 mt = mt->plane[plane - 1];
539 }
540
541 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
542 /* If this is a view with restricted NumLayers, then our effective depth
543 * is not just the miptree depth.
544 */
545 const unsigned view_num_layers =
546 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
547 mt->logical_depth0;
548
549 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
550 * texturing functions that return a float, as our code generation always
551 * selects the .x channel (which would always be 0).
552 */
553 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
554 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
555 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
556 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
557 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
558 brw_get_texture_swizzle(&brw->ctx, obj));
559
560 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
561 unsigned format = translate_tex_format(brw, mesa_fmt,
562 sampler->sRGBDecode);
563
564 /* Implement gen6 and gen7 gather work-around */
565 bool need_green_to_blue = false;
566 if (for_gather) {
567 if (brw->gen == 7 && (format == BRW_SURFACEFORMAT_R32G32_FLOAT ||
568 format == BRW_SURFACEFORMAT_R32G32_SINT ||
569 format == BRW_SURFACEFORMAT_R32G32_UINT)) {
570 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
571 need_green_to_blue = brw->is_haswell;
572 } else if (brw->gen == 6) {
573 /* Sandybridge's gather4 message is broken for integer formats.
574 * To work around this, we pretend the surface is UNORM for
575 * 8 or 16-bit formats, and emit shader instructions to recover
576 * the real INT/UINT value. For 32-bit formats, we pretend
577 * the surface is FLOAT, and simply reinterpret the resulting
578 * bits.
579 */
580 switch (format) {
581 case BRW_SURFACEFORMAT_R8_SINT:
582 case BRW_SURFACEFORMAT_R8_UINT:
583 format = BRW_SURFACEFORMAT_R8_UNORM;
584 break;
585
586 case BRW_SURFACEFORMAT_R16_SINT:
587 case BRW_SURFACEFORMAT_R16_UINT:
588 format = BRW_SURFACEFORMAT_R16_UNORM;
589 break;
590
591 case BRW_SURFACEFORMAT_R32_SINT:
592 case BRW_SURFACEFORMAT_R32_UINT:
593 format = BRW_SURFACEFORMAT_R32_FLOAT;
594 break;
595
596 default:
597 break;
598 }
599 }
600 }
601
602 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
603 if (brw->gen <= 7) {
604 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
605 mt = mt->r8stencil_mt;
606 } else {
607 mt = mt->stencil_mt;
608 }
609 format = BRW_SURFACEFORMAT_R8_UINT;
610 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
611 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
612 mt = mt->r8stencil_mt;
613 format = BRW_SURFACEFORMAT_R8_UINT;
614 }
615
616 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
617
618 struct isl_view view = {
619 .format = format,
620 .base_level = obj->MinLevel + obj->BaseLevel,
621 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
622 .base_array_layer = obj->MinLayer,
623 .array_len = view_num_layers,
624 .swizzle = {
625 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
626 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
627 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
628 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
629 },
630 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
631 };
632
633 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
634 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
635 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
636
637 assert(brw_texture_view_sane(brw, mt, &view));
638
639 const int flags = brw_disable_aux_surface(brw, mt, &view) ?
640 INTEL_AUX_BUFFER_DISABLED : 0;
641 brw_emit_surface_state(brw, mt, flags, mt->target, view,
642 tex_mocs[brw->gen],
643 surf_offset, surf_index,
644 I915_GEM_DOMAIN_SAMPLER, 0);
645 }
646 }
647
648 void
649 brw_emit_buffer_surface_state(struct brw_context *brw,
650 uint32_t *out_offset,
651 drm_intel_bo *bo,
652 unsigned buffer_offset,
653 unsigned surface_format,
654 unsigned buffer_size,
655 unsigned pitch,
656 bool rw)
657 {
658 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
659 brw->isl_dev.ss.size,
660 brw->isl_dev.ss.align,
661 out_offset);
662
663 isl_buffer_fill_state(&brw->isl_dev, dw,
664 .address = (bo ? bo->offset64 : 0) + buffer_offset,
665 .size = buffer_size,
666 .format = surface_format,
667 .stride = pitch,
668 .mocs = tex_mocs[brw->gen]);
669
670 if (bo) {
671 drm_intel_bo_emit_reloc(brw->batch.bo,
672 *out_offset + brw->isl_dev.ss.addr_offset,
673 bo, buffer_offset,
674 I915_GEM_DOMAIN_SAMPLER,
675 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
676 }
677 }
678
679 void
680 brw_update_buffer_texture_surface(struct gl_context *ctx,
681 unsigned unit,
682 uint32_t *surf_offset)
683 {
684 struct brw_context *brw = brw_context(ctx);
685 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
686 struct intel_buffer_object *intel_obj =
687 intel_buffer_object(tObj->BufferObject);
688 uint32_t size = tObj->BufferSize;
689 drm_intel_bo *bo = NULL;
690 mesa_format format = tObj->_BufferObjectFormat;
691 uint32_t brw_format = brw_format_for_mesa_format(format);
692 int texel_size = _mesa_get_format_bytes(format);
693
694 if (intel_obj) {
695 size = MIN2(size, intel_obj->Base.Size);
696 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
697 }
698
699 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
700 _mesa_problem(NULL, "bad format %s for texture buffer\n",
701 _mesa_get_format_name(format));
702 }
703
704 brw_emit_buffer_surface_state(brw, surf_offset, bo,
705 tObj->BufferOffset,
706 brw_format,
707 size,
708 texel_size,
709 false /* rw */);
710 }
711
712 /**
713 * Create the constant buffer surface. Vertex/fragment shader constants will be
714 * read from this buffer with Data Port Read instructions/messages.
715 */
716 void
717 brw_create_constant_surface(struct brw_context *brw,
718 drm_intel_bo *bo,
719 uint32_t offset,
720 uint32_t size,
721 uint32_t *out_offset)
722 {
723 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
724 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
725 size, 1, false);
726 }
727
728 /**
729 * Create the buffer surface. Shader buffer variables will be
730 * read from / write to this buffer with Data Port Read/Write
731 * instructions/messages.
732 */
733 void
734 brw_create_buffer_surface(struct brw_context *brw,
735 drm_intel_bo *bo,
736 uint32_t offset,
737 uint32_t size,
738 uint32_t *out_offset)
739 {
740 /* Use a raw surface so we can reuse existing untyped read/write/atomic
741 * messages. We need these specifically for the fragment shader since they
742 * include a pixel mask header that we need to ensure correct behavior
743 * with helper invocations, which cannot write to the buffer.
744 */
745 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
746 BRW_SURFACEFORMAT_RAW,
747 size, 1, true);
748 }
749
750 /**
751 * Set up a binding table entry for use by stream output logic (transform
752 * feedback).
753 *
754 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
755 */
756 void
757 brw_update_sol_surface(struct brw_context *brw,
758 struct gl_buffer_object *buffer_obj,
759 uint32_t *out_offset, unsigned num_vector_components,
760 unsigned stride_dwords, unsigned offset_dwords)
761 {
762 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
763 uint32_t offset_bytes = 4 * offset_dwords;
764 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
765 offset_bytes,
766 buffer_obj->Size - offset_bytes);
767 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
768 out_offset);
769 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
770 size_t size_dwords = buffer_obj->Size / 4;
771 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
772
773 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
774 * too big to map using a single binding table entry?
775 */
776 assert((size_dwords - offset_dwords) / stride_dwords
777 <= BRW_MAX_NUM_BUFFER_ENTRIES);
778
779 if (size_dwords > offset_dwords + num_vector_components) {
780 /* There is room for at least 1 transform feedback output in the buffer.
781 * Compute the number of additional transform feedback outputs the
782 * buffer has room for.
783 */
784 buffer_size_minus_1 =
785 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
786 } else {
787 /* There isn't even room for a single transform feedback output in the
788 * buffer. We can't configure the binding table entry to prevent output
789 * entirely; we'll have to rely on the geometry shader to detect
790 * overflow. But to minimize the damage in case of a bug, set up the
791 * binding table entry to just allow a single output.
792 */
793 buffer_size_minus_1 = 0;
794 }
795 width = buffer_size_minus_1 & 0x7f;
796 height = (buffer_size_minus_1 & 0xfff80) >> 7;
797 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
798
799 switch (num_vector_components) {
800 case 1:
801 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
802 break;
803 case 2:
804 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
805 break;
806 case 3:
807 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
808 break;
809 case 4:
810 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
811 break;
812 default:
813 unreachable("Invalid vector size for transform feedback output");
814 }
815
816 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
817 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
818 surface_format << BRW_SURFACE_FORMAT_SHIFT |
819 BRW_SURFACE_RC_READ_WRITE;
820 surf[1] = bo->offset64 + offset_bytes; /* reloc */
821 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
822 height << BRW_SURFACE_HEIGHT_SHIFT);
823 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
824 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
825 surf[4] = 0;
826 surf[5] = 0;
827
828 /* Emit relocation to surface contents. */
829 drm_intel_bo_emit_reloc(brw->batch.bo,
830 *out_offset + 4,
831 bo, offset_bytes,
832 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
833 }
834
835 /* Creates a new WM constant buffer reflecting the current fragment program's
836 * constants, if needed by the fragment program.
837 *
838 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
839 * state atom.
840 */
841 static void
842 brw_upload_wm_pull_constants(struct brw_context *brw)
843 {
844 struct brw_stage_state *stage_state = &brw->wm.base;
845 /* BRW_NEW_FRAGMENT_PROGRAM */
846 struct brw_program *fp = (struct brw_program *) brw->fragment_program;
847 /* BRW_NEW_FS_PROG_DATA */
848 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
849
850 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
851 /* _NEW_PROGRAM_CONSTANTS */
852 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
853 stage_state, prog_data);
854 }
855
856 const struct brw_tracked_state brw_wm_pull_constants = {
857 .dirty = {
858 .mesa = _NEW_PROGRAM_CONSTANTS,
859 .brw = BRW_NEW_BATCH |
860 BRW_NEW_BLORP |
861 BRW_NEW_FRAGMENT_PROGRAM |
862 BRW_NEW_FS_PROG_DATA,
863 },
864 .emit = brw_upload_wm_pull_constants,
865 };
866
867 /**
868 * Creates a null renderbuffer surface.
869 *
870 * This is used when the shader doesn't write to any color output. An FB
871 * write to target 0 will still be emitted, because that's how the thread is
872 * terminated (and computed depth is returned), so we need to have the
873 * hardware discard the target 0 color output..
874 */
875 static void
876 brw_emit_null_surface_state(struct brw_context *brw,
877 unsigned width,
878 unsigned height,
879 unsigned samples,
880 uint32_t *out_offset)
881 {
882 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
883 * Notes):
884 *
885 * A null surface will be used in instances where an actual surface is
886 * not bound. When a write message is generated to a null surface, no
887 * actual surface is written to. When a read message (including any
888 * sampling engine message) is generated to a null surface, the result
889 * is all zeros. Note that a null surface type is allowed to be used
890 * with all messages, even if it is not specificially indicated as
891 * supported. All of the remaining fields in surface state are ignored
892 * for null surfaces, with the following exceptions:
893 *
894 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
895 * depth buffer’s corresponding state for all render target surfaces,
896 * including null.
897 *
898 * - Surface Format must be R8G8B8A8_UNORM.
899 */
900 unsigned surface_type = BRW_SURFACE_NULL;
901 drm_intel_bo *bo = NULL;
902 unsigned pitch_minus_1 = 0;
903 uint32_t multisampling_state = 0;
904 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
905 out_offset);
906
907 if (samples > 1) {
908 /* On Gen6, null render targets seem to cause GPU hangs when
909 * multisampling. So work around this problem by rendering into dummy
910 * color buffer.
911 *
912 * To decrease the amount of memory needed by the workaround buffer, we
913 * set its pitch to 128 bytes (the width of a Y tile). This means that
914 * the amount of memory needed for the workaround buffer is
915 * (width_in_tiles + height_in_tiles - 1) tiles.
916 *
917 * Note that since the workaround buffer will be interpreted by the
918 * hardware as an interleaved multisampled buffer, we need to compute
919 * width_in_tiles and height_in_tiles by dividing the width and height
920 * by 16 rather than the normal Y-tile size of 32.
921 */
922 unsigned width_in_tiles = ALIGN(width, 16) / 16;
923 unsigned height_in_tiles = ALIGN(height, 16) / 16;
924 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
925 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
926 size_needed);
927 bo = brw->wm.multisampled_null_render_target_bo;
928 surface_type = BRW_SURFACE_2D;
929 pitch_minus_1 = 127;
930 multisampling_state = brw_get_surface_num_multisamples(samples);
931 }
932
933 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
934 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
935 if (brw->gen < 6) {
936 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
937 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
938 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
939 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
940 }
941 surf[1] = bo ? bo->offset64 : 0;
942 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
943 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
944
945 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
946 * Notes):
947 *
948 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
949 */
950 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
951 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
952 surf[4] = multisampling_state;
953 surf[5] = 0;
954
955 if (bo) {
956 drm_intel_bo_emit_reloc(brw->batch.bo,
957 *out_offset + 4,
958 bo, 0,
959 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
960 }
961 }
962
963 /**
964 * Sets up a surface state structure to point at the given region.
965 * While it is only used for the front/back buffer currently, it should be
966 * usable for further buffers when doing ARB_draw_buffer support.
967 */
968 static uint32_t
969 gen4_update_renderbuffer_surface(struct brw_context *brw,
970 struct gl_renderbuffer *rb,
971 uint32_t flags, unsigned unit,
972 uint32_t surf_index)
973 {
974 struct gl_context *ctx = &brw->ctx;
975 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
976 struct intel_mipmap_tree *mt = irb->mt;
977 uint32_t *surf;
978 uint32_t tile_x, tile_y;
979 uint32_t format = 0;
980 uint32_t offset;
981 /* _NEW_BUFFERS */
982 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
983 /* BRW_NEW_FS_PROG_DATA */
984
985 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
986 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
987
988 if (rb->TexImage && !brw->has_surface_tile_offset) {
989 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
990
991 if (tile_x != 0 || tile_y != 0) {
992 /* Original gen4 hardware couldn't draw to a non-tile-aligned
993 * destination in a miptree unless you actually setup your renderbuffer
994 * as a miptree and used the fragile lod/array_index/etc. controls to
995 * select the image. So, instead, we just make a new single-level
996 * miptree and render into that.
997 */
998 intel_renderbuffer_move_to_temp(brw, irb, false);
999 mt = irb->mt;
1000 }
1001 }
1002
1003 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1004
1005 format = brw->render_target_format[rb_format];
1006 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1007 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1008 __func__, _mesa_get_format_name(rb_format));
1009 }
1010
1011 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1012 format << BRW_SURFACE_FORMAT_SHIFT);
1013
1014 /* reloc */
1015 assert(mt->offset % mt->cpp == 0);
1016 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1017 mt->bo->offset64 + mt->offset);
1018
1019 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1020 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1021
1022 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1023 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1024
1025 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1026
1027 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1028 /* Note that the low bits of these fields are missing, so
1029 * there's the possibility of getting in trouble.
1030 */
1031 assert(tile_x % 4 == 0);
1032 assert(tile_y % 2 == 0);
1033 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1034 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1035 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1036
1037 if (brw->gen < 6) {
1038 /* _NEW_COLOR */
1039 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1040 (ctx->Color.BlendEnabled & (1 << unit)))
1041 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1042
1043 if (!ctx->Color.ColorMask[unit][0])
1044 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1045 if (!ctx->Color.ColorMask[unit][1])
1046 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1047 if (!ctx->Color.ColorMask[unit][2])
1048 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1049
1050 /* As mentioned above, disable writes to the alpha component when the
1051 * renderbuffer is XRGB.
1052 */
1053 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1054 !ctx->Color.ColorMask[unit][3]) {
1055 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1056 }
1057 }
1058
1059 drm_intel_bo_emit_reloc(brw->batch.bo,
1060 offset + 4,
1061 mt->bo,
1062 surf[1] - mt->bo->offset64,
1063 I915_GEM_DOMAIN_RENDER,
1064 I915_GEM_DOMAIN_RENDER);
1065
1066 return offset;
1067 }
1068
1069 /**
1070 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1071 */
1072 void
1073 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1074 const struct gl_framebuffer *fb,
1075 uint32_t render_target_start,
1076 uint32_t *surf_offset)
1077 {
1078 GLuint i;
1079 const unsigned int w = _mesa_geometric_width(fb);
1080 const unsigned int h = _mesa_geometric_height(fb);
1081 const unsigned int s = _mesa_geometric_samples(fb);
1082
1083 /* Update surfaces for drawing buffers */
1084 if (fb->_NumColorDrawBuffers >= 1) {
1085 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1086 const uint32_t surf_index = render_target_start + i;
1087 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1088 INTEL_RENDERBUFFER_LAYERED : 0) |
1089 (brw->draw_aux_buffer_disabled[i] ?
1090 INTEL_AUX_BUFFER_DISABLED : 0);
1091
1092 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1093 surf_offset[surf_index] =
1094 brw->vtbl.update_renderbuffer_surface(
1095 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1096 } else {
1097 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1098 &surf_offset[surf_index]);
1099 }
1100 }
1101 } else {
1102 const uint32_t surf_index = render_target_start;
1103 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1104 &surf_offset[surf_index]);
1105 }
1106 }
1107
1108 static void
1109 update_renderbuffer_surfaces(struct brw_context *brw)
1110 {
1111 const struct gl_context *ctx = &brw->ctx;
1112
1113 /* BRW_NEW_FS_PROG_DATA */
1114 const struct brw_wm_prog_data *wm_prog_data =
1115 brw_wm_prog_data(brw->wm.base.prog_data);
1116
1117 /* _NEW_BUFFERS | _NEW_COLOR */
1118 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1119 brw_update_renderbuffer_surfaces(
1120 brw, fb,
1121 wm_prog_data->binding_table.render_target_start,
1122 brw->wm.base.surf_offset);
1123 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1124 }
1125
1126 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1127 .dirty = {
1128 .mesa = _NEW_BUFFERS |
1129 _NEW_COLOR,
1130 .brw = BRW_NEW_BATCH |
1131 BRW_NEW_BLORP |
1132 BRW_NEW_FS_PROG_DATA,
1133 },
1134 .emit = update_renderbuffer_surfaces,
1135 };
1136
1137 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1138 .dirty = {
1139 .mesa = _NEW_BUFFERS,
1140 .brw = BRW_NEW_BATCH |
1141 BRW_NEW_BLORP,
1142 },
1143 .emit = update_renderbuffer_surfaces,
1144 };
1145
1146 static void
1147 update_renderbuffer_read_surfaces(struct brw_context *brw)
1148 {
1149 const struct gl_context *ctx = &brw->ctx;
1150
1151 /* BRW_NEW_FS_PROG_DATA */
1152 const struct brw_wm_prog_data *wm_prog_data =
1153 brw_wm_prog_data(brw->wm.base.prog_data);
1154
1155 /* BRW_NEW_FRAGMENT_PROGRAM */
1156 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1157 brw->fragment_program && brw->fragment_program->info.outputs_read) {
1158 /* _NEW_BUFFERS */
1159 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1160
1161 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1162 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1163 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1164 const unsigned surf_index =
1165 wm_prog_data->binding_table.render_target_read_start + i;
1166 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1167
1168 if (irb) {
1169 const unsigned format = brw->render_target_format[
1170 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1171 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1172 format));
1173
1174 /* Override the target of the texture if the render buffer is a
1175 * single slice of a 3D texture (since the minimum array element
1176 * field of the surface state structure is ignored by the sampler
1177 * unit for 3D textures on some hardware), or if the render buffer
1178 * is a 1D array (since shaders always provide the array index
1179 * coordinate at the Z component to avoid state-dependent
1180 * recompiles when changing the texture target of the
1181 * framebuffer).
1182 */
1183 const GLenum target =
1184 (irb->mt->target == GL_TEXTURE_3D &&
1185 irb->layer_count == 1) ? GL_TEXTURE_2D :
1186 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1187 irb->mt->target;
1188
1189 /* intel_renderbuffer::mt_layer is expressed in sample units for
1190 * the UMS and CMS multisample layouts, but
1191 * intel_renderbuffer::layer_count is expressed in units of whole
1192 * logical layers regardless of the multisample layout.
1193 */
1194 const unsigned mt_layer_unit =
1195 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1196 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1197 MAX2(irb->mt->num_samples, 1) : 1;
1198
1199 const struct isl_view view = {
1200 .format = format,
1201 .base_level = irb->mt_level - irb->mt->first_level,
1202 .levels = 1,
1203 .base_array_layer = irb->mt_layer / mt_layer_unit,
1204 .array_len = irb->layer_count,
1205 .swizzle = ISL_SWIZZLE_IDENTITY,
1206 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1207 };
1208
1209 const int flags = brw->draw_aux_buffer_disabled[i] ?
1210 INTEL_AUX_BUFFER_DISABLED : 0;
1211 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1212 tex_mocs[brw->gen],
1213 surf_offset, surf_index,
1214 I915_GEM_DOMAIN_SAMPLER, 0);
1215
1216 } else {
1217 brw->vtbl.emit_null_surface_state(
1218 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1219 _mesa_geometric_samples(fb), surf_offset);
1220 }
1221 }
1222
1223 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1224 }
1225 }
1226
1227 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1228 .dirty = {
1229 .mesa = _NEW_BUFFERS,
1230 .brw = BRW_NEW_BATCH |
1231 BRW_NEW_FRAGMENT_PROGRAM |
1232 BRW_NEW_FS_PROG_DATA,
1233 },
1234 .emit = update_renderbuffer_read_surfaces,
1235 };
1236
1237 static void
1238 update_stage_texture_surfaces(struct brw_context *brw,
1239 const struct gl_program *prog,
1240 struct brw_stage_state *stage_state,
1241 bool for_gather, uint32_t plane)
1242 {
1243 if (!prog)
1244 return;
1245
1246 struct gl_context *ctx = &brw->ctx;
1247
1248 uint32_t *surf_offset = stage_state->surf_offset;
1249
1250 /* BRW_NEW_*_PROG_DATA */
1251 if (for_gather)
1252 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1253 else
1254 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1255
1256 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1257 for (unsigned s = 0; s < num_samplers; s++) {
1258 surf_offset[s] = 0;
1259
1260 if (prog->SamplersUsed & (1 << s)) {
1261 const unsigned unit = prog->SamplerUnits[s];
1262
1263 /* _NEW_TEXTURE */
1264 if (ctx->Texture.Unit[unit]._Current) {
1265 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1266 }
1267 }
1268 }
1269 }
1270
1271
1272 /**
1273 * Construct SURFACE_STATE objects for enabled textures.
1274 */
1275 static void
1276 brw_update_texture_surfaces(struct brw_context *brw)
1277 {
1278 /* BRW_NEW_VERTEX_PROGRAM */
1279 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1280
1281 /* BRW_NEW_TESS_PROGRAMS */
1282 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1283 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1284
1285 /* BRW_NEW_GEOMETRY_PROGRAM */
1286 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1287
1288 /* BRW_NEW_FRAGMENT_PROGRAM */
1289 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1290
1291 /* _NEW_TEXTURE */
1292 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1293 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1294 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1295 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1296 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1297
1298 /* emit alternate set of surface state for gather. this
1299 * allows the surface format to be overriden for only the
1300 * gather4 messages. */
1301 if (brw->gen < 8) {
1302 if (vs && vs->nir->info->uses_texture_gather)
1303 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1304 if (tcs && tcs->nir->info->uses_texture_gather)
1305 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1306 if (tes && tes->nir->info->uses_texture_gather)
1307 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1308 if (gs && gs->nir->info->uses_texture_gather)
1309 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1310 if (fs && fs->nir->info->uses_texture_gather)
1311 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1312 }
1313
1314 if (fs) {
1315 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1316 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1317 }
1318
1319 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1320 }
1321
1322 const struct brw_tracked_state brw_texture_surfaces = {
1323 .dirty = {
1324 .mesa = _NEW_TEXTURE,
1325 .brw = BRW_NEW_BATCH |
1326 BRW_NEW_BLORP |
1327 BRW_NEW_FRAGMENT_PROGRAM |
1328 BRW_NEW_FS_PROG_DATA |
1329 BRW_NEW_GEOMETRY_PROGRAM |
1330 BRW_NEW_GS_PROG_DATA |
1331 BRW_NEW_TESS_PROGRAMS |
1332 BRW_NEW_TCS_PROG_DATA |
1333 BRW_NEW_TES_PROG_DATA |
1334 BRW_NEW_TEXTURE_BUFFER |
1335 BRW_NEW_VERTEX_PROGRAM |
1336 BRW_NEW_VS_PROG_DATA,
1337 },
1338 .emit = brw_update_texture_surfaces,
1339 };
1340
1341 static void
1342 brw_update_cs_texture_surfaces(struct brw_context *brw)
1343 {
1344 /* BRW_NEW_COMPUTE_PROGRAM */
1345 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1346
1347 /* _NEW_TEXTURE */
1348 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1349
1350 /* emit alternate set of surface state for gather. this
1351 * allows the surface format to be overriden for only the
1352 * gather4 messages.
1353 */
1354 if (brw->gen < 8) {
1355 if (cs && cs->nir->info->uses_texture_gather)
1356 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1357 }
1358
1359 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1360 }
1361
1362 const struct brw_tracked_state brw_cs_texture_surfaces = {
1363 .dirty = {
1364 .mesa = _NEW_TEXTURE,
1365 .brw = BRW_NEW_BATCH |
1366 BRW_NEW_BLORP |
1367 BRW_NEW_COMPUTE_PROGRAM,
1368 },
1369 .emit = brw_update_cs_texture_surfaces,
1370 };
1371
1372
1373 void
1374 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1375 struct brw_stage_state *stage_state,
1376 struct brw_stage_prog_data *prog_data)
1377 {
1378 struct gl_context *ctx = &brw->ctx;
1379
1380 if (!prog)
1381 return;
1382
1383 uint32_t *ubo_surf_offsets =
1384 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1385
1386 for (int i = 0; i < prog->info.num_ubos; i++) {
1387 struct gl_uniform_buffer_binding *binding =
1388 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1389
1390 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1391 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1392 } else {
1393 struct intel_buffer_object *intel_bo =
1394 intel_buffer_object(binding->BufferObject);
1395 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1396 if (!binding->AutomaticSize)
1397 size = MIN2(size, binding->Size);
1398 drm_intel_bo *bo =
1399 intel_bufferobj_buffer(brw, intel_bo,
1400 binding->Offset,
1401 size);
1402 brw_create_constant_surface(brw, bo, binding->Offset,
1403 size,
1404 &ubo_surf_offsets[i]);
1405 }
1406 }
1407
1408 uint32_t *ssbo_surf_offsets =
1409 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1410
1411 for (int i = 0; i < prog->info.num_ssbos; i++) {
1412 struct gl_shader_storage_buffer_binding *binding =
1413 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1414
1415 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1416 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1417 } else {
1418 struct intel_buffer_object *intel_bo =
1419 intel_buffer_object(binding->BufferObject);
1420 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1421 if (!binding->AutomaticSize)
1422 size = MIN2(size, binding->Size);
1423 drm_intel_bo *bo =
1424 intel_bufferobj_buffer(brw, intel_bo,
1425 binding->Offset,
1426 size);
1427 brw_create_buffer_surface(brw, bo, binding->Offset,
1428 size,
1429 &ssbo_surf_offsets[i]);
1430 }
1431 }
1432
1433 if (prog->info.num_ubos || prog->info.num_ssbos)
1434 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1435 }
1436
1437 static void
1438 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1439 {
1440 struct gl_context *ctx = &brw->ctx;
1441 /* _NEW_PROGRAM */
1442 struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1443
1444 /* BRW_NEW_FS_PROG_DATA */
1445 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1446 }
1447
1448 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1449 .dirty = {
1450 .mesa = _NEW_PROGRAM,
1451 .brw = BRW_NEW_BATCH |
1452 BRW_NEW_BLORP |
1453 BRW_NEW_FS_PROG_DATA |
1454 BRW_NEW_UNIFORM_BUFFER,
1455 },
1456 .emit = brw_upload_wm_ubo_surfaces,
1457 };
1458
1459 static void
1460 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1461 {
1462 struct gl_context *ctx = &brw->ctx;
1463 /* _NEW_PROGRAM */
1464 struct gl_program *prog =
1465 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1466
1467 /* BRW_NEW_CS_PROG_DATA */
1468 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1469 }
1470
1471 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1472 .dirty = {
1473 .mesa = _NEW_PROGRAM,
1474 .brw = BRW_NEW_BATCH |
1475 BRW_NEW_BLORP |
1476 BRW_NEW_CS_PROG_DATA |
1477 BRW_NEW_UNIFORM_BUFFER,
1478 },
1479 .emit = brw_upload_cs_ubo_surfaces,
1480 };
1481
1482 void
1483 brw_upload_abo_surfaces(struct brw_context *brw,
1484 const struct gl_program *prog,
1485 struct brw_stage_state *stage_state,
1486 struct brw_stage_prog_data *prog_data)
1487 {
1488 struct gl_context *ctx = &brw->ctx;
1489 uint32_t *surf_offsets =
1490 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1491
1492 if (prog->info.num_abos) {
1493 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1494 struct gl_atomic_buffer_binding *binding =
1495 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1496 struct intel_buffer_object *intel_bo =
1497 intel_buffer_object(binding->BufferObject);
1498 drm_intel_bo *bo = intel_bufferobj_buffer(
1499 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1500
1501 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1502 binding->Offset, BRW_SURFACEFORMAT_RAW,
1503 bo->size - binding->Offset, 1, true);
1504 }
1505
1506 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1507 }
1508 }
1509
1510 static void
1511 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1512 {
1513 /* _NEW_PROGRAM */
1514 const struct gl_program *wm = brw->fragment_program;
1515
1516 if (wm) {
1517 /* BRW_NEW_FS_PROG_DATA */
1518 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1519 }
1520 }
1521
1522 const struct brw_tracked_state brw_wm_abo_surfaces = {
1523 .dirty = {
1524 .mesa = _NEW_PROGRAM,
1525 .brw = BRW_NEW_ATOMIC_BUFFER |
1526 BRW_NEW_BLORP |
1527 BRW_NEW_BATCH |
1528 BRW_NEW_FS_PROG_DATA,
1529 },
1530 .emit = brw_upload_wm_abo_surfaces,
1531 };
1532
1533 static void
1534 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1535 {
1536 /* _NEW_PROGRAM */
1537 const struct gl_program *cp = brw->compute_program;
1538
1539 if (cp) {
1540 /* BRW_NEW_CS_PROG_DATA */
1541 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1542 }
1543 }
1544
1545 const struct brw_tracked_state brw_cs_abo_surfaces = {
1546 .dirty = {
1547 .mesa = _NEW_PROGRAM,
1548 .brw = BRW_NEW_ATOMIC_BUFFER |
1549 BRW_NEW_BLORP |
1550 BRW_NEW_BATCH |
1551 BRW_NEW_CS_PROG_DATA,
1552 },
1553 .emit = brw_upload_cs_abo_surfaces,
1554 };
1555
1556 static void
1557 brw_upload_cs_image_surfaces(struct brw_context *brw)
1558 {
1559 /* _NEW_PROGRAM */
1560 const struct gl_program *cp = brw->compute_program;
1561
1562 if (cp) {
1563 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1564 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1565 brw->cs.base.prog_data);
1566 }
1567 }
1568
1569 const struct brw_tracked_state brw_cs_image_surfaces = {
1570 .dirty = {
1571 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1572 .brw = BRW_NEW_BATCH |
1573 BRW_NEW_BLORP |
1574 BRW_NEW_CS_PROG_DATA |
1575 BRW_NEW_IMAGE_UNITS
1576 },
1577 .emit = brw_upload_cs_image_surfaces,
1578 };
1579
1580 static uint32_t
1581 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1582 {
1583 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1584 uint32_t hw_format = brw_format_for_mesa_format(format);
1585 if (access == GL_WRITE_ONLY) {
1586 return hw_format;
1587 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1588 /* Typed surface reads support a very limited subset of the shader
1589 * image formats. Translate it into the closest format the
1590 * hardware supports.
1591 */
1592 return isl_lower_storage_image_format(devinfo, hw_format);
1593 } else {
1594 /* The hardware doesn't actually support a typed format that we can use
1595 * so we have to fall back to untyped read/write messages.
1596 */
1597 return BRW_SURFACEFORMAT_RAW;
1598 }
1599 }
1600
1601 static void
1602 update_default_image_param(struct brw_context *brw,
1603 struct gl_image_unit *u,
1604 unsigned surface_idx,
1605 struct brw_image_param *param)
1606 {
1607 memset(param, 0, sizeof(*param));
1608 param->surface_idx = surface_idx;
1609 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1610 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1611 * detailed explanation of these parameters.
1612 */
1613 param->swizzling[0] = 0xff;
1614 param->swizzling[1] = 0xff;
1615 }
1616
1617 static void
1618 update_buffer_image_param(struct brw_context *brw,
1619 struct gl_image_unit *u,
1620 unsigned surface_idx,
1621 struct brw_image_param *param)
1622 {
1623 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1624 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1625 update_default_image_param(brw, u, surface_idx, param);
1626
1627 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1628 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1629 }
1630
1631 static void
1632 update_texture_image_param(struct brw_context *brw,
1633 struct gl_image_unit *u,
1634 unsigned surface_idx,
1635 struct brw_image_param *param)
1636 {
1637 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1638
1639 update_default_image_param(brw, u, surface_idx, param);
1640
1641 param->size[0] = minify(mt->logical_width0, u->Level);
1642 param->size[1] = minify(mt->logical_height0, u->Level);
1643 param->size[2] = (!u->Layered ? 1 :
1644 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1645 u->TexObj->Target == GL_TEXTURE_3D ?
1646 minify(mt->logical_depth0, u->Level) :
1647 mt->logical_depth0);
1648
1649 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1650 &param->offset[0],
1651 &param->offset[1]);
1652
1653 param->stride[0] = mt->cpp;
1654 param->stride[1] = mt->pitch / mt->cpp;
1655 param->stride[2] =
1656 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1657 param->stride[3] =
1658 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1659
1660 if (mt->tiling == I915_TILING_X) {
1661 /* An X tile is a rectangular block of 512x8 bytes. */
1662 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1663 param->tiling[1] = _mesa_logbase2(8);
1664
1665 if (brw->has_swizzling) {
1666 /* Right shifts required to swizzle bits 9 and 10 of the memory
1667 * address with bit 6.
1668 */
1669 param->swizzling[0] = 3;
1670 param->swizzling[1] = 4;
1671 }
1672 } else if (mt->tiling == I915_TILING_Y) {
1673 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1674 * different to the layout of an X-tiled surface, we simply pretend that
1675 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1676 * one arranged in X-major order just like is the case for X-tiling.
1677 */
1678 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1679 param->tiling[1] = _mesa_logbase2(32);
1680
1681 if (brw->has_swizzling) {
1682 /* Right shift required to swizzle bit 9 of the memory address with
1683 * bit 6.
1684 */
1685 param->swizzling[0] = 3;
1686 }
1687 }
1688
1689 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1690 * address calculation algorithm (emit_address_calculation() in
1691 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1692 * modulus equal to the LOD.
1693 */
1694 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1695 0);
1696 }
1697
1698 static void
1699 update_image_surface(struct brw_context *brw,
1700 struct gl_image_unit *u,
1701 GLenum access,
1702 unsigned surface_idx,
1703 uint32_t *surf_offset,
1704 struct brw_image_param *param)
1705 {
1706 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1707 struct gl_texture_object *obj = u->TexObj;
1708 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1709
1710 if (obj->Target == GL_TEXTURE_BUFFER) {
1711 struct intel_buffer_object *intel_obj =
1712 intel_buffer_object(obj->BufferObject);
1713 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1714 _mesa_get_format_bytes(u->_ActualFormat));
1715
1716 brw_emit_buffer_surface_state(
1717 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1718 format, intel_obj->Base.Size, texel_size,
1719 access != GL_READ_ONLY);
1720
1721 update_buffer_image_param(brw, u, surface_idx, param);
1722
1723 } else {
1724 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1725 struct intel_mipmap_tree *mt = intel_obj->mt;
1726
1727 if (format == BRW_SURFACEFORMAT_RAW) {
1728 brw_emit_buffer_surface_state(
1729 brw, surf_offset, mt->bo, mt->offset,
1730 format, mt->bo->size - mt->offset, 1 /* pitch */,
1731 access != GL_READ_ONLY);
1732
1733 } else {
1734 const unsigned num_layers = (!u->Layered ? 1 :
1735 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1736 mt->logical_depth0);
1737
1738 struct isl_view view = {
1739 .format = format,
1740 .base_level = obj->MinLevel + u->Level,
1741 .levels = 1,
1742 .base_array_layer = obj->MinLayer + u->_Layer,
1743 .array_len = num_layers,
1744 .swizzle = ISL_SWIZZLE_IDENTITY,
1745 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1746 };
1747
1748 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1749 const bool unresolved = intel_miptree_has_color_unresolved(
1750 mt, view.base_level, view.levels,
1751 view.base_array_layer, view.array_len);
1752 const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1753 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1754 tex_mocs[brw->gen],
1755 surf_offset, surf_index,
1756 I915_GEM_DOMAIN_SAMPLER,
1757 access == GL_READ_ONLY ? 0 :
1758 I915_GEM_DOMAIN_SAMPLER);
1759 }
1760
1761 update_texture_image_param(brw, u, surface_idx, param);
1762 }
1763
1764 } else {
1765 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1766 update_default_image_param(brw, u, surface_idx, param);
1767 }
1768 }
1769
1770 void
1771 brw_upload_image_surfaces(struct brw_context *brw,
1772 const struct gl_program *prog,
1773 struct brw_stage_state *stage_state,
1774 struct brw_stage_prog_data *prog_data)
1775 {
1776 assert(prog);
1777 struct gl_context *ctx = &brw->ctx;
1778
1779 if (prog->info.num_images) {
1780 for (unsigned i = 0; i < prog->info.num_images; i++) {
1781 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1782 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1783
1784 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1785 surf_idx,
1786 &stage_state->surf_offset[surf_idx],
1787 &prog_data->image_param[i]);
1788 }
1789
1790 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1791 /* This may have changed the image metadata dependent on the context
1792 * image unit state and passed to the program as uniforms, make sure
1793 * that push and pull constants are reuploaded.
1794 */
1795 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1796 }
1797 }
1798
1799 static void
1800 brw_upload_wm_image_surfaces(struct brw_context *brw)
1801 {
1802 /* BRW_NEW_FRAGMENT_PROGRAM */
1803 const struct gl_program *wm = brw->fragment_program;
1804
1805 if (wm) {
1806 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1807 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1808 brw->wm.base.prog_data);
1809 }
1810 }
1811
1812 const struct brw_tracked_state brw_wm_image_surfaces = {
1813 .dirty = {
1814 .mesa = _NEW_TEXTURE,
1815 .brw = BRW_NEW_BATCH |
1816 BRW_NEW_BLORP |
1817 BRW_NEW_FRAGMENT_PROGRAM |
1818 BRW_NEW_FS_PROG_DATA |
1819 BRW_NEW_IMAGE_UNITS
1820 },
1821 .emit = brw_upload_wm_image_surfaces,
1822 };
1823
1824 void
1825 gen4_init_vtable_surface_functions(struct brw_context *brw)
1826 {
1827 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1828 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1829 }
1830
1831 void
1832 gen6_init_vtable_surface_functions(struct brw_context *brw)
1833 {
1834 gen4_init_vtable_surface_functions(brw);
1835 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1836 }
1837
1838 static void
1839 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1840 {
1841 struct gl_context *ctx = &brw->ctx;
1842 /* _NEW_PROGRAM */
1843 struct gl_program *prog =
1844 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1845 /* BRW_NEW_CS_PROG_DATA */
1846 const struct brw_cs_prog_data *cs_prog_data =
1847 brw_cs_prog_data(brw->cs.base.prog_data);
1848
1849 if (prog && cs_prog_data->uses_num_work_groups) {
1850 const unsigned surf_idx =
1851 cs_prog_data->binding_table.work_groups_start;
1852 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1853 drm_intel_bo *bo;
1854 uint32_t bo_offset;
1855
1856 if (brw->compute.num_work_groups_bo == NULL) {
1857 bo = NULL;
1858 intel_upload_data(brw,
1859 (void *)brw->compute.num_work_groups,
1860 3 * sizeof(GLuint),
1861 sizeof(GLuint),
1862 &bo,
1863 &bo_offset);
1864 } else {
1865 bo = brw->compute.num_work_groups_bo;
1866 bo_offset = brw->compute.num_work_groups_offset;
1867 }
1868
1869 brw_emit_buffer_surface_state(brw, surf_offset,
1870 bo, bo_offset,
1871 BRW_SURFACEFORMAT_RAW,
1872 3 * sizeof(GLuint), 1, true);
1873 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1874 }
1875 }
1876
1877 const struct brw_tracked_state brw_cs_work_groups_surface = {
1878 .dirty = {
1879 .brw = BRW_NEW_BLORP |
1880 BRW_NEW_CS_PROG_DATA |
1881 BRW_NEW_CS_WORK_GROUPS
1882 },
1883 .emit = brw_upload_cs_work_groups_surface,
1884 };