i965: Add missing BRW_CS_PROG_DATA to CS work group surface atom.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
42 #include "main/shaderapi.h"
43
44 #include "isl/isl.h"
45
46 #include "intel_mipmap_tree.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_tex.h"
49 #include "intel_fbo.h"
50 #include "intel_buffer_objects.h"
51
52 #include "brw_context.h"
53 #include "brw_state.h"
54 #include "brw_defines.h"
55 #include "brw_wm.h"
56
57 enum {
58 INTEL_RENDERBUFFER_LAYERED = 1 << 0,
59 INTEL_AUX_BUFFER_DISABLED = 1 << 1,
60 };
61
62 struct surface_state_info {
63 unsigned num_dwords;
64 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
65 unsigned reloc_dw;
66 unsigned aux_reloc_dw;
67 unsigned tex_mocs;
68 unsigned rb_mocs;
69 };
70
71 static const struct surface_state_info surface_state_infos[] = {
72 [4] = {6, 32, 1, 0},
73 [5] = {6, 32, 1, 0},
74 [6] = {6, 32, 1, 0},
75 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
76 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
77 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
78 };
79
80 static void
81 brw_emit_surface_state(struct brw_context *brw,
82 struct intel_mipmap_tree *mt, uint32_t flags,
83 GLenum target, struct isl_view view,
84 uint32_t mocs, uint32_t *surf_offset, int surf_index,
85 unsigned read_domains, unsigned write_domains)
86 {
87 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
88 uint32_t tile_x = 0, tile_y = 0;
89 uint32_t offset = mt->offset;
90
91 struct isl_surf surf;
92 intel_miptree_get_isl_surf(brw, mt, &surf);
93
94 surf.dim = get_isl_surf_dim(target);
95
96 const enum isl_dim_layout dim_layout =
97 get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
98
99 if (surf.dim_layout != dim_layout) {
100 /* The layout of the specified texture target is not compatible with the
101 * actual layout of the miptree structure in memory -- You're entering
102 * dangerous territory, this can only possibly work if you only intended
103 * to access a single level and slice of the texture, and the hardware
104 * supports the tile offset feature in order to allow non-tile-aligned
105 * base offsets, since we'll have to point the hardware to the first
106 * texel of the level instead of relying on the usual base level/layer
107 * controls.
108 */
109 assert(brw->has_surface_tile_offset);
110 assert(view.levels == 1 && view.array_len == 1);
111
112 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
113 view.base_array_layer,
114 &tile_x, &tile_y);
115
116 /* Minify the logical dimensions of the texture. */
117 const unsigned l = view.base_level - mt->first_level;
118 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
119 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
120 minify(surf.logical_level0_px.height, l);
121 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
122 minify(surf.logical_level0_px.depth, l);
123
124 /* Only the base level and layer can be addressed with the overridden
125 * layout.
126 */
127 surf.logical_level0_px.array_len = 1;
128 surf.levels = 1;
129 surf.dim_layout = dim_layout;
130
131 /* The requested slice of the texture is now at the base level and
132 * layer.
133 */
134 view.base_level = 0;
135 view.base_array_layer = 0;
136 }
137
138 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
139
140 struct isl_surf *aux_surf = NULL, aux_surf_s;
141 uint64_t aux_offset = 0;
142 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
143 if (mt->mcs_mt && !(flags & INTEL_AUX_BUFFER_DISABLED)) {
144 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
145 aux_surf = &aux_surf_s;
146 assert(mt->mcs_mt->offset == 0);
147 aux_offset = mt->mcs_mt->bo->offset64;
148
149 /* We only really need a clear color if we also have an auxiliary
150 * surfacae. Without one, it does nothing.
151 */
152 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
153 }
154
155 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
156 ss_info.num_dwords * 4, ss_info.ss_align,
157 surf_index, surf_offset);
158
159 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
160 .address = mt->bo->offset64 + offset,
161 .aux_surf = aux_surf, .aux_usage = aux_usage,
162 .aux_address = aux_offset,
163 .mocs = mocs, .clear_color = clear_color,
164 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
165
166 drm_intel_bo_emit_reloc(brw->batch.bo,
167 *surf_offset + 4 * ss_info.reloc_dw,
168 mt->bo, offset,
169 read_domains, write_domains);
170
171 if (aux_surf) {
172 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
173 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
174 * contain other control information. Since buffer addresses are always
175 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
176 * an ordinary reloc to do the necessary address translation.
177 */
178 assert((aux_offset & 0xfff) == 0);
179 drm_intel_bo_emit_reloc(brw->batch.bo,
180 *surf_offset + 4 * ss_info.aux_reloc_dw,
181 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
182 read_domains, write_domains);
183 }
184 }
185
186 uint32_t
187 brw_update_renderbuffer_surface(struct brw_context *brw,
188 struct gl_renderbuffer *rb,
189 uint32_t flags, unsigned unit /* unused */,
190 uint32_t surf_index)
191 {
192 struct gl_context *ctx = &brw->ctx;
193 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
194 struct intel_mipmap_tree *mt = irb->mt;
195
196 if (brw->gen < 9) {
197 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
198 }
199
200 assert(brw_render_target_supported(brw, rb));
201 intel_miptree_used_for_rendering(mt);
202
203 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
204 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
205 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
206 __func__, _mesa_get_format_name(rb_format));
207 }
208
209 const unsigned layer_multiplier =
210 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
211 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
212 MAX2(irb->mt->num_samples, 1) : 1;
213
214 struct isl_view view = {
215 .format = brw->render_target_format[rb_format],
216 .base_level = irb->mt_level - irb->mt->first_level,
217 .levels = 1,
218 .base_array_layer = irb->mt_layer / layer_multiplier,
219 .array_len = MAX2(irb->layer_count, 1),
220 .swizzle = ISL_SWIZZLE_IDENTITY,
221 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
222 };
223
224 uint32_t offset;
225 brw_emit_surface_state(brw, mt, flags, mt->target, view,
226 surface_state_infos[brw->gen].rb_mocs,
227 &offset, surf_index,
228 I915_GEM_DOMAIN_RENDER,
229 I915_GEM_DOMAIN_RENDER);
230 return offset;
231 }
232
233 GLuint
234 translate_tex_target(GLenum target)
235 {
236 switch (target) {
237 case GL_TEXTURE_1D:
238 case GL_TEXTURE_1D_ARRAY_EXT:
239 return BRW_SURFACE_1D;
240
241 case GL_TEXTURE_RECTANGLE_NV:
242 return BRW_SURFACE_2D;
243
244 case GL_TEXTURE_2D:
245 case GL_TEXTURE_2D_ARRAY_EXT:
246 case GL_TEXTURE_EXTERNAL_OES:
247 case GL_TEXTURE_2D_MULTISAMPLE:
248 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
249 return BRW_SURFACE_2D;
250
251 case GL_TEXTURE_3D:
252 return BRW_SURFACE_3D;
253
254 case GL_TEXTURE_CUBE_MAP:
255 case GL_TEXTURE_CUBE_MAP_ARRAY:
256 return BRW_SURFACE_CUBE;
257
258 default:
259 unreachable("not reached");
260 }
261 }
262
263 uint32_t
264 brw_get_surface_tiling_bits(uint32_t tiling)
265 {
266 switch (tiling) {
267 case I915_TILING_X:
268 return BRW_SURFACE_TILED;
269 case I915_TILING_Y:
270 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
271 default:
272 return 0;
273 }
274 }
275
276
277 uint32_t
278 brw_get_surface_num_multisamples(unsigned num_samples)
279 {
280 if (num_samples > 1)
281 return BRW_SURFACE_MULTISAMPLECOUNT_4;
282 else
283 return BRW_SURFACE_MULTISAMPLECOUNT_1;
284 }
285
286 /**
287 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
288 * swizzling.
289 */
290 int
291 brw_get_texture_swizzle(const struct gl_context *ctx,
292 const struct gl_texture_object *t)
293 {
294 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
295
296 int swizzles[SWIZZLE_NIL + 1] = {
297 SWIZZLE_X,
298 SWIZZLE_Y,
299 SWIZZLE_Z,
300 SWIZZLE_W,
301 SWIZZLE_ZERO,
302 SWIZZLE_ONE,
303 SWIZZLE_NIL
304 };
305
306 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
307 img->_BaseFormat == GL_DEPTH_STENCIL) {
308 GLenum depth_mode = t->DepthMode;
309
310 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
311 * with depth component data specified with a sized internal format.
312 * Otherwise, it's left at the old default, GL_LUMINANCE.
313 */
314 if (_mesa_is_gles3(ctx) &&
315 img->InternalFormat != GL_DEPTH_COMPONENT &&
316 img->InternalFormat != GL_DEPTH_STENCIL) {
317 depth_mode = GL_RED;
318 }
319
320 switch (depth_mode) {
321 case GL_ALPHA:
322 swizzles[0] = SWIZZLE_ZERO;
323 swizzles[1] = SWIZZLE_ZERO;
324 swizzles[2] = SWIZZLE_ZERO;
325 swizzles[3] = SWIZZLE_X;
326 break;
327 case GL_LUMINANCE:
328 swizzles[0] = SWIZZLE_X;
329 swizzles[1] = SWIZZLE_X;
330 swizzles[2] = SWIZZLE_X;
331 swizzles[3] = SWIZZLE_ONE;
332 break;
333 case GL_INTENSITY:
334 swizzles[0] = SWIZZLE_X;
335 swizzles[1] = SWIZZLE_X;
336 swizzles[2] = SWIZZLE_X;
337 swizzles[3] = SWIZZLE_X;
338 break;
339 case GL_RED:
340 swizzles[0] = SWIZZLE_X;
341 swizzles[1] = SWIZZLE_ZERO;
342 swizzles[2] = SWIZZLE_ZERO;
343 swizzles[3] = SWIZZLE_ONE;
344 break;
345 }
346 }
347
348 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
349
350 /* If the texture's format is alpha-only, force R, G, and B to
351 * 0.0. Similarly, if the texture's format has no alpha channel,
352 * force the alpha value read to 1.0. This allows for the
353 * implementation to use an RGBA texture for any of these formats
354 * without leaking any unexpected values.
355 */
356 switch (img->_BaseFormat) {
357 case GL_ALPHA:
358 swizzles[0] = SWIZZLE_ZERO;
359 swizzles[1] = SWIZZLE_ZERO;
360 swizzles[2] = SWIZZLE_ZERO;
361 break;
362 case GL_LUMINANCE:
363 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
364 swizzles[0] = SWIZZLE_X;
365 swizzles[1] = SWIZZLE_X;
366 swizzles[2] = SWIZZLE_X;
367 swizzles[3] = SWIZZLE_ONE;
368 }
369 break;
370 case GL_LUMINANCE_ALPHA:
371 if (datatype == GL_SIGNED_NORMALIZED) {
372 swizzles[0] = SWIZZLE_X;
373 swizzles[1] = SWIZZLE_X;
374 swizzles[2] = SWIZZLE_X;
375 swizzles[3] = SWIZZLE_W;
376 }
377 break;
378 case GL_INTENSITY:
379 if (datatype == GL_SIGNED_NORMALIZED) {
380 swizzles[0] = SWIZZLE_X;
381 swizzles[1] = SWIZZLE_X;
382 swizzles[2] = SWIZZLE_X;
383 swizzles[3] = SWIZZLE_X;
384 }
385 break;
386 case GL_RED:
387 case GL_RG:
388 case GL_RGB:
389 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
390 swizzles[3] = SWIZZLE_ONE;
391 break;
392 }
393
394 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
395 swizzles[GET_SWZ(t->_Swizzle, 1)],
396 swizzles[GET_SWZ(t->_Swizzle, 2)],
397 swizzles[GET_SWZ(t->_Swizzle, 3)]);
398 }
399
400 /**
401 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
402 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
403 *
404 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
405 * 0 1 2 3 4 5
406 * 4 5 6 7 0 1
407 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
408 *
409 * which is simply adding 4 then modding by 8 (or anding with 7).
410 *
411 * We then may need to apply workarounds for textureGather hardware bugs.
412 */
413 static unsigned
414 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
415 {
416 unsigned scs = (swizzle + 4) & 7;
417
418 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
419 }
420
421 static unsigned
422 brw_find_matching_rb(const struct gl_framebuffer *fb,
423 const struct intel_mipmap_tree *mt)
424 {
425 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
426 const struct intel_renderbuffer *irb =
427 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
428
429 if (irb && irb->mt == mt)
430 return i;
431 }
432
433 return fb->_NumColorDrawBuffers;
434 }
435
436 static inline bool
437 brw_texture_view_sane(const struct brw_context *brw,
438 const struct intel_mipmap_tree *mt, unsigned format)
439 {
440 /* There are special cases only for lossless compression. */
441 if (!intel_miptree_is_lossless_compressed(brw, mt))
442 return true;
443
444 if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
445 format))
446 return true;
447
448 /* Logic elsewhere needs to take care to resolve the color buffer prior
449 * to sampling it as non-compressed.
450 */
451 if (mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)
452 return false;
453
454 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
455 const unsigned rb_index = brw_find_matching_rb(fb, mt);
456
457 if (rb_index == fb->_NumColorDrawBuffers)
458 return true;
459
460 /* Underlying surface is compressed but it is sampled using a format that
461 * the sampling engine doesn't support as compressed. Compression must be
462 * disabled for both sampling engine and data port in case the same surface
463 * is used also as render target.
464 */
465 return brw->draw_aux_buffer_disabled[rb_index];
466 }
467
468 static bool
469 brw_disable_aux_surface(const struct brw_context *brw,
470 const struct intel_mipmap_tree *mt)
471 {
472 /* Nothing to disable. */
473 if (!mt->mcs_mt)
474 return false;
475
476 /* There are special cases only for lossless compression. */
477 if (!intel_miptree_is_lossless_compressed(brw, mt))
478 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
479
480 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
481 const unsigned rb_index = brw_find_matching_rb(fb, mt);
482
483 /* If we are drawing into this with compression enabled, then we must also
484 * enable compression when texturing from it regardless of
485 * fast_clear_state. If we don't then, after the first draw call with
486 * this setup, there will be data in the CCS which won't get picked up by
487 * subsequent texturing operations as required by ARB_texture_barrier.
488 * Since we don't want to re-emit the binding table or do a resolve
489 * operation every draw call, the easiest thing to do is just enable
490 * compression on the texturing side. This is completely safe to do
491 * since, if compressed texturing weren't allowed, we would have disabled
492 * compression of render targets in whatever_that_function_is_called().
493 */
494 if (rb_index < fb->_NumColorDrawBuffers) {
495 if (brw->draw_aux_buffer_disabled[rb_index]) {
496 assert(mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED);
497 }
498
499 return brw->draw_aux_buffer_disabled[rb_index];
500 }
501
502 return mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED;
503 }
504
505 void
506 brw_update_texture_surface(struct gl_context *ctx,
507 unsigned unit,
508 uint32_t *surf_offset,
509 bool for_gather,
510 uint32_t plane)
511 {
512 struct brw_context *brw = brw_context(ctx);
513 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
514
515 if (obj->Target == GL_TEXTURE_BUFFER) {
516 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
517
518 } else {
519 struct intel_texture_object *intel_obj = intel_texture_object(obj);
520 struct intel_mipmap_tree *mt = intel_obj->mt;
521
522 if (plane > 0) {
523 if (mt->plane[plane - 1] == NULL)
524 return;
525 mt = mt->plane[plane - 1];
526 }
527
528 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
529 /* If this is a view with restricted NumLayers, then our effective depth
530 * is not just the miptree depth.
531 */
532 const unsigned view_num_layers =
533 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
534 mt->logical_depth0;
535
536 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
537 * texturing functions that return a float, as our code generation always
538 * selects the .x channel (which would always be 0).
539 */
540 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
541 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
542 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
543 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
544 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
545 brw_get_texture_swizzle(&brw->ctx, obj));
546
547 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
548 unsigned format = translate_tex_format(brw, mesa_fmt,
549 sampler->sRGBDecode);
550
551 /* Implement gen6 and gen7 gather work-around */
552 bool need_green_to_blue = false;
553 if (for_gather) {
554 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
555 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
556 need_green_to_blue = brw->is_haswell;
557 } else if (brw->gen == 6) {
558 /* Sandybridge's gather4 message is broken for integer formats.
559 * To work around this, we pretend the surface is UNORM for
560 * 8 or 16-bit formats, and emit shader instructions to recover
561 * the real INT/UINT value. For 32-bit formats, we pretend
562 * the surface is FLOAT, and simply reinterpret the resulting
563 * bits.
564 */
565 switch (format) {
566 case BRW_SURFACEFORMAT_R8_SINT:
567 case BRW_SURFACEFORMAT_R8_UINT:
568 format = BRW_SURFACEFORMAT_R8_UNORM;
569 break;
570
571 case BRW_SURFACEFORMAT_R16_SINT:
572 case BRW_SURFACEFORMAT_R16_UINT:
573 format = BRW_SURFACEFORMAT_R16_UNORM;
574 break;
575
576 case BRW_SURFACEFORMAT_R32_SINT:
577 case BRW_SURFACEFORMAT_R32_UINT:
578 format = BRW_SURFACEFORMAT_R32_FLOAT;
579 break;
580
581 default:
582 break;
583 }
584 }
585 }
586
587 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
588 if (brw->gen <= 7) {
589 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
590 mt = mt->r8stencil_mt;
591 } else {
592 mt = mt->stencil_mt;
593 }
594 format = BRW_SURFACEFORMAT_R8_UINT;
595 } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
596 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
597 mt = mt->r8stencil_mt;
598 format = BRW_SURFACEFORMAT_R8_UINT;
599 }
600
601 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
602
603 struct isl_view view = {
604 .format = format,
605 .base_level = obj->MinLevel + obj->BaseLevel,
606 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
607 .base_array_layer = obj->MinLayer,
608 .array_len = view_num_layers,
609 .swizzle = {
610 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
611 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
612 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
613 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
614 },
615 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
616 };
617
618 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
619 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
620 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
621
622 assert(brw_texture_view_sane(brw, mt, format));
623
624 const int flags =
625 brw_disable_aux_surface(brw, mt) ? INTEL_AUX_BUFFER_DISABLED : 0;
626 brw_emit_surface_state(brw, mt, flags, mt->target, view,
627 surface_state_infos[brw->gen].tex_mocs,
628 surf_offset, surf_index,
629 I915_GEM_DOMAIN_SAMPLER, 0);
630 }
631 }
632
633 void
634 brw_emit_buffer_surface_state(struct brw_context *brw,
635 uint32_t *out_offset,
636 drm_intel_bo *bo,
637 unsigned buffer_offset,
638 unsigned surface_format,
639 unsigned buffer_size,
640 unsigned pitch,
641 bool rw)
642 {
643 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
644
645 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
646 ss_info.num_dwords * 4, ss_info.ss_align,
647 out_offset);
648
649 isl_buffer_fill_state(&brw->isl_dev, dw,
650 .address = (bo ? bo->offset64 : 0) + buffer_offset,
651 .size = buffer_size,
652 .format = surface_format,
653 .stride = pitch,
654 .mocs = ss_info.tex_mocs);
655
656 if (bo) {
657 drm_intel_bo_emit_reloc(brw->batch.bo,
658 *out_offset + 4 * ss_info.reloc_dw,
659 bo, buffer_offset,
660 I915_GEM_DOMAIN_SAMPLER,
661 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
662 }
663 }
664
665 void
666 brw_update_buffer_texture_surface(struct gl_context *ctx,
667 unsigned unit,
668 uint32_t *surf_offset)
669 {
670 struct brw_context *brw = brw_context(ctx);
671 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
672 struct intel_buffer_object *intel_obj =
673 intel_buffer_object(tObj->BufferObject);
674 uint32_t size = tObj->BufferSize;
675 drm_intel_bo *bo = NULL;
676 mesa_format format = tObj->_BufferObjectFormat;
677 uint32_t brw_format = brw_format_for_mesa_format(format);
678 int texel_size = _mesa_get_format_bytes(format);
679
680 if (intel_obj) {
681 size = MIN2(size, intel_obj->Base.Size);
682 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
683 }
684
685 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
686 _mesa_problem(NULL, "bad format %s for texture buffer\n",
687 _mesa_get_format_name(format));
688 }
689
690 brw_emit_buffer_surface_state(brw, surf_offset, bo,
691 tObj->BufferOffset,
692 brw_format,
693 size,
694 texel_size,
695 false /* rw */);
696 }
697
698 /**
699 * Create the constant buffer surface. Vertex/fragment shader constants will be
700 * read from this buffer with Data Port Read instructions/messages.
701 */
702 void
703 brw_create_constant_surface(struct brw_context *brw,
704 drm_intel_bo *bo,
705 uint32_t offset,
706 uint32_t size,
707 uint32_t *out_offset)
708 {
709 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
710 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
711 size, 1, false);
712 }
713
714 /**
715 * Create the buffer surface. Shader buffer variables will be
716 * read from / write to this buffer with Data Port Read/Write
717 * instructions/messages.
718 */
719 void
720 brw_create_buffer_surface(struct brw_context *brw,
721 drm_intel_bo *bo,
722 uint32_t offset,
723 uint32_t size,
724 uint32_t *out_offset)
725 {
726 /* Use a raw surface so we can reuse existing untyped read/write/atomic
727 * messages. We need these specifically for the fragment shader since they
728 * include a pixel mask header that we need to ensure correct behavior
729 * with helper invocations, which cannot write to the buffer.
730 */
731 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
732 BRW_SURFACEFORMAT_RAW,
733 size, 1, true);
734 }
735
736 /**
737 * Set up a binding table entry for use by stream output logic (transform
738 * feedback).
739 *
740 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
741 */
742 void
743 brw_update_sol_surface(struct brw_context *brw,
744 struct gl_buffer_object *buffer_obj,
745 uint32_t *out_offset, unsigned num_vector_components,
746 unsigned stride_dwords, unsigned offset_dwords)
747 {
748 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
749 uint32_t offset_bytes = 4 * offset_dwords;
750 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
751 offset_bytes,
752 buffer_obj->Size - offset_bytes);
753 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
754 out_offset);
755 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
756 size_t size_dwords = buffer_obj->Size / 4;
757 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
758
759 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
760 * too big to map using a single binding table entry?
761 */
762 assert((size_dwords - offset_dwords) / stride_dwords
763 <= BRW_MAX_NUM_BUFFER_ENTRIES);
764
765 if (size_dwords > offset_dwords + num_vector_components) {
766 /* There is room for at least 1 transform feedback output in the buffer.
767 * Compute the number of additional transform feedback outputs the
768 * buffer has room for.
769 */
770 buffer_size_minus_1 =
771 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
772 } else {
773 /* There isn't even room for a single transform feedback output in the
774 * buffer. We can't configure the binding table entry to prevent output
775 * entirely; we'll have to rely on the geometry shader to detect
776 * overflow. But to minimize the damage in case of a bug, set up the
777 * binding table entry to just allow a single output.
778 */
779 buffer_size_minus_1 = 0;
780 }
781 width = buffer_size_minus_1 & 0x7f;
782 height = (buffer_size_minus_1 & 0xfff80) >> 7;
783 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
784
785 switch (num_vector_components) {
786 case 1:
787 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
788 break;
789 case 2:
790 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
791 break;
792 case 3:
793 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
794 break;
795 case 4:
796 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
797 break;
798 default:
799 unreachable("Invalid vector size for transform feedback output");
800 }
801
802 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
803 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
804 surface_format << BRW_SURFACE_FORMAT_SHIFT |
805 BRW_SURFACE_RC_READ_WRITE;
806 surf[1] = bo->offset64 + offset_bytes; /* reloc */
807 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
808 height << BRW_SURFACE_HEIGHT_SHIFT);
809 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
810 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
811 surf[4] = 0;
812 surf[5] = 0;
813
814 /* Emit relocation to surface contents. */
815 drm_intel_bo_emit_reloc(brw->batch.bo,
816 *out_offset + 4,
817 bo, offset_bytes,
818 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
819 }
820
821 /* Creates a new WM constant buffer reflecting the current fragment program's
822 * constants, if needed by the fragment program.
823 *
824 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
825 * state atom.
826 */
827 static void
828 brw_upload_wm_pull_constants(struct brw_context *brw)
829 {
830 struct brw_stage_state *stage_state = &brw->wm.base;
831 /* BRW_NEW_FRAGMENT_PROGRAM */
832 struct brw_fragment_program *fp =
833 (struct brw_fragment_program *) brw->fragment_program;
834 /* BRW_NEW_FS_PROG_DATA */
835 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
836
837 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
838 /* _NEW_PROGRAM_CONSTANTS */
839 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
840 stage_state, prog_data);
841 }
842
843 const struct brw_tracked_state brw_wm_pull_constants = {
844 .dirty = {
845 .mesa = _NEW_PROGRAM_CONSTANTS,
846 .brw = BRW_NEW_BATCH |
847 BRW_NEW_BLORP |
848 BRW_NEW_FRAGMENT_PROGRAM |
849 BRW_NEW_FS_PROG_DATA,
850 },
851 .emit = brw_upload_wm_pull_constants,
852 };
853
854 /**
855 * Creates a null renderbuffer surface.
856 *
857 * This is used when the shader doesn't write to any color output. An FB
858 * write to target 0 will still be emitted, because that's how the thread is
859 * terminated (and computed depth is returned), so we need to have the
860 * hardware discard the target 0 color output..
861 */
862 static void
863 brw_emit_null_surface_state(struct brw_context *brw,
864 unsigned width,
865 unsigned height,
866 unsigned samples,
867 uint32_t *out_offset)
868 {
869 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
870 * Notes):
871 *
872 * A null surface will be used in instances where an actual surface is
873 * not bound. When a write message is generated to a null surface, no
874 * actual surface is written to. When a read message (including any
875 * sampling engine message) is generated to a null surface, the result
876 * is all zeros. Note that a null surface type is allowed to be used
877 * with all messages, even if it is not specificially indicated as
878 * supported. All of the remaining fields in surface state are ignored
879 * for null surfaces, with the following exceptions:
880 *
881 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
882 * depth buffer’s corresponding state for all render target surfaces,
883 * including null.
884 *
885 * - Surface Format must be R8G8B8A8_UNORM.
886 */
887 unsigned surface_type = BRW_SURFACE_NULL;
888 drm_intel_bo *bo = NULL;
889 unsigned pitch_minus_1 = 0;
890 uint32_t multisampling_state = 0;
891 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
892 out_offset);
893
894 if (samples > 1) {
895 /* On Gen6, null render targets seem to cause GPU hangs when
896 * multisampling. So work around this problem by rendering into dummy
897 * color buffer.
898 *
899 * To decrease the amount of memory needed by the workaround buffer, we
900 * set its pitch to 128 bytes (the width of a Y tile). This means that
901 * the amount of memory needed for the workaround buffer is
902 * (width_in_tiles + height_in_tiles - 1) tiles.
903 *
904 * Note that since the workaround buffer will be interpreted by the
905 * hardware as an interleaved multisampled buffer, we need to compute
906 * width_in_tiles and height_in_tiles by dividing the width and height
907 * by 16 rather than the normal Y-tile size of 32.
908 */
909 unsigned width_in_tiles = ALIGN(width, 16) / 16;
910 unsigned height_in_tiles = ALIGN(height, 16) / 16;
911 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
912 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
913 size_needed);
914 bo = brw->wm.multisampled_null_render_target_bo;
915 surface_type = BRW_SURFACE_2D;
916 pitch_minus_1 = 127;
917 multisampling_state = brw_get_surface_num_multisamples(samples);
918 }
919
920 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
921 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
922 if (brw->gen < 6) {
923 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
924 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
925 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
926 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
927 }
928 surf[1] = bo ? bo->offset64 : 0;
929 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
930 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
931
932 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
933 * Notes):
934 *
935 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
936 */
937 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
938 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
939 surf[4] = multisampling_state;
940 surf[5] = 0;
941
942 if (bo) {
943 drm_intel_bo_emit_reloc(brw->batch.bo,
944 *out_offset + 4,
945 bo, 0,
946 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
947 }
948 }
949
950 /**
951 * Sets up a surface state structure to point at the given region.
952 * While it is only used for the front/back buffer currently, it should be
953 * usable for further buffers when doing ARB_draw_buffer support.
954 */
955 static uint32_t
956 gen4_update_renderbuffer_surface(struct brw_context *brw,
957 struct gl_renderbuffer *rb,
958 uint32_t flags, unsigned unit,
959 uint32_t surf_index)
960 {
961 struct gl_context *ctx = &brw->ctx;
962 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
963 struct intel_mipmap_tree *mt = irb->mt;
964 uint32_t *surf;
965 uint32_t tile_x, tile_y;
966 uint32_t format = 0;
967 uint32_t offset;
968 /* _NEW_BUFFERS */
969 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
970 /* BRW_NEW_FS_PROG_DATA */
971
972 assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
973 assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
974
975 if (rb->TexImage && !brw->has_surface_tile_offset) {
976 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
977
978 if (tile_x != 0 || tile_y != 0) {
979 /* Original gen4 hardware couldn't draw to a non-tile-aligned
980 * destination in a miptree unless you actually setup your renderbuffer
981 * as a miptree and used the fragile lod/array_index/etc. controls to
982 * select the image. So, instead, we just make a new single-level
983 * miptree and render into that.
984 */
985 intel_renderbuffer_move_to_temp(brw, irb, false);
986 mt = irb->mt;
987 }
988 }
989
990 intel_miptree_used_for_rendering(irb->mt);
991
992 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
993
994 format = brw->render_target_format[rb_format];
995 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
996 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
997 __func__, _mesa_get_format_name(rb_format));
998 }
999
1000 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1001 format << BRW_SURFACE_FORMAT_SHIFT);
1002
1003 /* reloc */
1004 assert(mt->offset % mt->cpp == 0);
1005 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1006 mt->bo->offset64 + mt->offset);
1007
1008 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1009 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1010
1011 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1012 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1013
1014 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1015
1016 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1017 /* Note that the low bits of these fields are missing, so
1018 * there's the possibility of getting in trouble.
1019 */
1020 assert(tile_x % 4 == 0);
1021 assert(tile_y % 2 == 0);
1022 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1023 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1024 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1025
1026 if (brw->gen < 6) {
1027 /* _NEW_COLOR */
1028 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1029 (ctx->Color.BlendEnabled & (1 << unit)))
1030 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1031
1032 if (!ctx->Color.ColorMask[unit][0])
1033 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1034 if (!ctx->Color.ColorMask[unit][1])
1035 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1036 if (!ctx->Color.ColorMask[unit][2])
1037 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1038
1039 /* As mentioned above, disable writes to the alpha component when the
1040 * renderbuffer is XRGB.
1041 */
1042 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1043 !ctx->Color.ColorMask[unit][3]) {
1044 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1045 }
1046 }
1047
1048 drm_intel_bo_emit_reloc(brw->batch.bo,
1049 offset + 4,
1050 mt->bo,
1051 surf[1] - mt->bo->offset64,
1052 I915_GEM_DOMAIN_RENDER,
1053 I915_GEM_DOMAIN_RENDER);
1054
1055 return offset;
1056 }
1057
1058 /**
1059 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1060 */
1061 void
1062 brw_update_renderbuffer_surfaces(struct brw_context *brw,
1063 const struct gl_framebuffer *fb,
1064 uint32_t render_target_start,
1065 uint32_t *surf_offset)
1066 {
1067 GLuint i;
1068 const unsigned int w = _mesa_geometric_width(fb);
1069 const unsigned int h = _mesa_geometric_height(fb);
1070 const unsigned int s = _mesa_geometric_samples(fb);
1071
1072 /* Update surfaces for drawing buffers */
1073 if (fb->_NumColorDrawBuffers >= 1) {
1074 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1075 const uint32_t surf_index = render_target_start + i;
1076 const int flags = (_mesa_geometric_layers(fb) > 0 ?
1077 INTEL_RENDERBUFFER_LAYERED : 0) |
1078 (brw->draw_aux_buffer_disabled[i] ?
1079 INTEL_AUX_BUFFER_DISABLED : 0);
1080
1081 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1082 surf_offset[surf_index] =
1083 brw->vtbl.update_renderbuffer_surface(
1084 brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1085 } else {
1086 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1087 &surf_offset[surf_index]);
1088 }
1089 }
1090 } else {
1091 const uint32_t surf_index = render_target_start;
1092 brw->vtbl.emit_null_surface_state(brw, w, h, s,
1093 &surf_offset[surf_index]);
1094 }
1095 }
1096
1097 static void
1098 update_renderbuffer_surfaces(struct brw_context *brw)
1099 {
1100 const struct gl_context *ctx = &brw->ctx;
1101
1102 /* _NEW_BUFFERS | _NEW_COLOR */
1103 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1104 brw_update_renderbuffer_surfaces(
1105 brw, fb,
1106 brw->wm.prog_data->binding_table.render_target_start,
1107 brw->wm.base.surf_offset);
1108 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1109 }
1110
1111 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1112 .dirty = {
1113 .mesa = _NEW_BUFFERS |
1114 _NEW_COLOR,
1115 .brw = BRW_NEW_BATCH |
1116 BRW_NEW_BLORP |
1117 BRW_NEW_FS_PROG_DATA,
1118 },
1119 .emit = update_renderbuffer_surfaces,
1120 };
1121
1122 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1123 .dirty = {
1124 .mesa = _NEW_BUFFERS,
1125 .brw = BRW_NEW_BATCH |
1126 BRW_NEW_BLORP,
1127 },
1128 .emit = update_renderbuffer_surfaces,
1129 };
1130
1131 static void
1132 update_renderbuffer_read_surfaces(struct brw_context *brw)
1133 {
1134 const struct gl_context *ctx = &brw->ctx;
1135
1136 /* BRW_NEW_FRAGMENT_PROGRAM */
1137 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1138 brw->fragment_program &&
1139 brw->fragment_program->Base.OutputsRead) {
1140 /* _NEW_BUFFERS */
1141 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1142
1143 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1144 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1145 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1146 /* BRW_NEW_FS_PROG_DATA */
1147 const unsigned surf_index =
1148 brw->wm.prog_data->binding_table.render_target_read_start + i;
1149 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1150
1151 if (irb) {
1152 const unsigned format = brw->render_target_format[
1153 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1154 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1155 format));
1156
1157 /* Override the target of the texture if the render buffer is a
1158 * single slice of a 3D texture (since the minimum array element
1159 * field of the surface state structure is ignored by the sampler
1160 * unit for 3D textures on some hardware), or if the render buffer
1161 * is a 1D array (since shaders always provide the array index
1162 * coordinate at the Z component to avoid state-dependent
1163 * recompiles when changing the texture target of the
1164 * framebuffer).
1165 */
1166 const GLenum target =
1167 (irb->mt->target == GL_TEXTURE_3D &&
1168 irb->layer_count == 1) ? GL_TEXTURE_2D :
1169 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1170 irb->mt->target;
1171
1172 /* intel_renderbuffer::mt_layer is expressed in sample units for
1173 * the UMS and CMS multisample layouts, but
1174 * intel_renderbuffer::layer_count is expressed in units of whole
1175 * logical layers regardless of the multisample layout.
1176 */
1177 const unsigned mt_layer_unit =
1178 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1179 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1180 MAX2(irb->mt->num_samples, 1) : 1;
1181
1182 const struct isl_view view = {
1183 .format = format,
1184 .base_level = irb->mt_level - irb->mt->first_level,
1185 .levels = 1,
1186 .base_array_layer = irb->mt_layer / mt_layer_unit,
1187 .array_len = irb->layer_count,
1188 .swizzle = ISL_SWIZZLE_IDENTITY,
1189 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1190 };
1191
1192 const int flags = brw->draw_aux_buffer_disabled[i] ?
1193 INTEL_AUX_BUFFER_DISABLED : 0;
1194 brw_emit_surface_state(brw, irb->mt, flags, target, view,
1195 surface_state_infos[brw->gen].tex_mocs,
1196 surf_offset, surf_index,
1197 I915_GEM_DOMAIN_SAMPLER, 0);
1198
1199 } else {
1200 brw->vtbl.emit_null_surface_state(
1201 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1202 _mesa_geometric_samples(fb), surf_offset);
1203 }
1204 }
1205
1206 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1207 }
1208 }
1209
1210 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1211 .dirty = {
1212 .mesa = _NEW_BUFFERS,
1213 .brw = BRW_NEW_BATCH |
1214 BRW_NEW_FRAGMENT_PROGRAM |
1215 BRW_NEW_FS_PROG_DATA,
1216 },
1217 .emit = update_renderbuffer_read_surfaces,
1218 };
1219
1220 static void
1221 update_stage_texture_surfaces(struct brw_context *brw,
1222 const struct gl_program *prog,
1223 struct brw_stage_state *stage_state,
1224 bool for_gather, uint32_t plane)
1225 {
1226 if (!prog)
1227 return;
1228
1229 struct gl_context *ctx = &brw->ctx;
1230
1231 uint32_t *surf_offset = stage_state->surf_offset;
1232
1233 /* BRW_NEW_*_PROG_DATA */
1234 if (for_gather)
1235 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1236 else
1237 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1238
1239 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1240 for (unsigned s = 0; s < num_samplers; s++) {
1241 surf_offset[s] = 0;
1242
1243 if (prog->SamplersUsed & (1 << s)) {
1244 const unsigned unit = prog->SamplerUnits[s];
1245
1246 /* _NEW_TEXTURE */
1247 if (ctx->Texture.Unit[unit]._Current) {
1248 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1249 }
1250 }
1251 }
1252 }
1253
1254
1255 /**
1256 * Construct SURFACE_STATE objects for enabled textures.
1257 */
1258 static void
1259 brw_update_texture_surfaces(struct brw_context *brw)
1260 {
1261 /* BRW_NEW_VERTEX_PROGRAM */
1262 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1263
1264 /* BRW_NEW_TESS_PROGRAMS */
1265 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1266 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1267
1268 /* BRW_NEW_GEOMETRY_PROGRAM */
1269 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1270
1271 /* BRW_NEW_FRAGMENT_PROGRAM */
1272 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1273
1274 /* _NEW_TEXTURE */
1275 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1276 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1277 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1278 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1279 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1280
1281 /* emit alternate set of surface state for gather. this
1282 * allows the surface format to be overriden for only the
1283 * gather4 messages. */
1284 if (brw->gen < 8) {
1285 if (vs && vs->UsesGather)
1286 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1287 if (tcs && tcs->UsesGather)
1288 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1289 if (tes && tes->UsesGather)
1290 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1291 if (gs && gs->UsesGather)
1292 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1293 if (fs && fs->UsesGather)
1294 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1295 }
1296
1297 if (fs) {
1298 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1299 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1300 }
1301
1302 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1303 }
1304
1305 const struct brw_tracked_state brw_texture_surfaces = {
1306 .dirty = {
1307 .mesa = _NEW_TEXTURE,
1308 .brw = BRW_NEW_BATCH |
1309 BRW_NEW_BLORP |
1310 BRW_NEW_FRAGMENT_PROGRAM |
1311 BRW_NEW_FS_PROG_DATA |
1312 BRW_NEW_GEOMETRY_PROGRAM |
1313 BRW_NEW_GS_PROG_DATA |
1314 BRW_NEW_TESS_PROGRAMS |
1315 BRW_NEW_TCS_PROG_DATA |
1316 BRW_NEW_TES_PROG_DATA |
1317 BRW_NEW_TEXTURE_BUFFER |
1318 BRW_NEW_VERTEX_PROGRAM |
1319 BRW_NEW_VS_PROG_DATA,
1320 },
1321 .emit = brw_update_texture_surfaces,
1322 };
1323
1324 static void
1325 brw_update_cs_texture_surfaces(struct brw_context *brw)
1326 {
1327 /* BRW_NEW_COMPUTE_PROGRAM */
1328 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1329
1330 /* _NEW_TEXTURE */
1331 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1332
1333 /* emit alternate set of surface state for gather. this
1334 * allows the surface format to be overriden for only the
1335 * gather4 messages.
1336 */
1337 if (brw->gen < 8) {
1338 if (cs && cs->UsesGather)
1339 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1340 }
1341
1342 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1343 }
1344
1345 const struct brw_tracked_state brw_cs_texture_surfaces = {
1346 .dirty = {
1347 .mesa = _NEW_TEXTURE,
1348 .brw = BRW_NEW_BATCH |
1349 BRW_NEW_BLORP |
1350 BRW_NEW_COMPUTE_PROGRAM,
1351 },
1352 .emit = brw_update_cs_texture_surfaces,
1353 };
1354
1355
1356 void
1357 brw_upload_ubo_surfaces(struct brw_context *brw,
1358 struct gl_linked_shader *shader,
1359 struct brw_stage_state *stage_state,
1360 struct brw_stage_prog_data *prog_data)
1361 {
1362 struct gl_context *ctx = &brw->ctx;
1363
1364 if (!shader)
1365 return;
1366
1367 uint32_t *ubo_surf_offsets =
1368 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1369
1370 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1371 struct gl_uniform_buffer_binding *binding =
1372 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1373
1374 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1375 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1376 } else {
1377 struct intel_buffer_object *intel_bo =
1378 intel_buffer_object(binding->BufferObject);
1379 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1380 if (!binding->AutomaticSize)
1381 size = MIN2(size, binding->Size);
1382 drm_intel_bo *bo =
1383 intel_bufferobj_buffer(brw, intel_bo,
1384 binding->Offset,
1385 size);
1386 brw_create_constant_surface(brw, bo, binding->Offset,
1387 size,
1388 &ubo_surf_offsets[i]);
1389 }
1390 }
1391
1392 uint32_t *ssbo_surf_offsets =
1393 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1394
1395 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1396 struct gl_shader_storage_buffer_binding *binding =
1397 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1398
1399 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1400 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1401 } else {
1402 struct intel_buffer_object *intel_bo =
1403 intel_buffer_object(binding->BufferObject);
1404 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1405 if (!binding->AutomaticSize)
1406 size = MIN2(size, binding->Size);
1407 drm_intel_bo *bo =
1408 intel_bufferobj_buffer(brw, intel_bo,
1409 binding->Offset,
1410 size);
1411 brw_create_buffer_surface(brw, bo, binding->Offset,
1412 size,
1413 &ssbo_surf_offsets[i]);
1414 }
1415 }
1416
1417 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1418 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1419 }
1420
1421 static void
1422 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1423 {
1424 struct gl_context *ctx = &brw->ctx;
1425 /* _NEW_PROGRAM */
1426 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1427
1428 if (!prog)
1429 return;
1430
1431 /* BRW_NEW_FS_PROG_DATA */
1432 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1433 &brw->wm.base, &brw->wm.prog_data->base);
1434 }
1435
1436 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1437 .dirty = {
1438 .mesa = _NEW_PROGRAM,
1439 .brw = BRW_NEW_BATCH |
1440 BRW_NEW_BLORP |
1441 BRW_NEW_FS_PROG_DATA |
1442 BRW_NEW_UNIFORM_BUFFER,
1443 },
1444 .emit = brw_upload_wm_ubo_surfaces,
1445 };
1446
1447 static void
1448 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1449 {
1450 struct gl_context *ctx = &brw->ctx;
1451 /* _NEW_PROGRAM */
1452 struct gl_shader_program *prog =
1453 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1454
1455 if (!prog)
1456 return;
1457
1458 /* BRW_NEW_CS_PROG_DATA */
1459 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1460 &brw->cs.base, &brw->cs.prog_data->base);
1461 }
1462
1463 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1464 .dirty = {
1465 .mesa = _NEW_PROGRAM,
1466 .brw = BRW_NEW_BATCH |
1467 BRW_NEW_BLORP |
1468 BRW_NEW_CS_PROG_DATA |
1469 BRW_NEW_UNIFORM_BUFFER,
1470 },
1471 .emit = brw_upload_cs_ubo_surfaces,
1472 };
1473
1474 void
1475 brw_upload_abo_surfaces(struct brw_context *brw,
1476 struct gl_linked_shader *shader,
1477 struct brw_stage_state *stage_state,
1478 struct brw_stage_prog_data *prog_data)
1479 {
1480 struct gl_context *ctx = &brw->ctx;
1481 uint32_t *surf_offsets =
1482 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1483
1484 if (shader && shader->NumAtomicBuffers) {
1485 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1486 struct gl_atomic_buffer_binding *binding =
1487 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1488 struct intel_buffer_object *intel_bo =
1489 intel_buffer_object(binding->BufferObject);
1490 drm_intel_bo *bo = intel_bufferobj_buffer(
1491 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1492
1493 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1494 binding->Offset, BRW_SURFACEFORMAT_RAW,
1495 bo->size - binding->Offset, 1, true);
1496 }
1497
1498 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1499 }
1500 }
1501
1502 static void
1503 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1504 {
1505 struct gl_context *ctx = &brw->ctx;
1506 /* _NEW_PROGRAM */
1507 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1508
1509 if (prog) {
1510 /* BRW_NEW_FS_PROG_DATA */
1511 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1512 &brw->wm.base, &brw->wm.prog_data->base);
1513 }
1514 }
1515
1516 const struct brw_tracked_state brw_wm_abo_surfaces = {
1517 .dirty = {
1518 .mesa = _NEW_PROGRAM,
1519 .brw = BRW_NEW_ATOMIC_BUFFER |
1520 BRW_NEW_BLORP |
1521 BRW_NEW_BATCH |
1522 BRW_NEW_FS_PROG_DATA,
1523 },
1524 .emit = brw_upload_wm_abo_surfaces,
1525 };
1526
1527 static void
1528 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1529 {
1530 struct gl_context *ctx = &brw->ctx;
1531 /* _NEW_PROGRAM */
1532 struct gl_shader_program *prog =
1533 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1534
1535 if (prog) {
1536 /* BRW_NEW_CS_PROG_DATA */
1537 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1538 &brw->cs.base, &brw->cs.prog_data->base);
1539 }
1540 }
1541
1542 const struct brw_tracked_state brw_cs_abo_surfaces = {
1543 .dirty = {
1544 .mesa = _NEW_PROGRAM,
1545 .brw = BRW_NEW_ATOMIC_BUFFER |
1546 BRW_NEW_BLORP |
1547 BRW_NEW_BATCH |
1548 BRW_NEW_CS_PROG_DATA,
1549 },
1550 .emit = brw_upload_cs_abo_surfaces,
1551 };
1552
1553 static void
1554 brw_upload_cs_image_surfaces(struct brw_context *brw)
1555 {
1556 struct gl_context *ctx = &brw->ctx;
1557 /* _NEW_PROGRAM */
1558 struct gl_shader_program *prog =
1559 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1560
1561 if (prog) {
1562 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1563 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1564 &brw->cs.base, &brw->cs.prog_data->base);
1565 }
1566 }
1567
1568 const struct brw_tracked_state brw_cs_image_surfaces = {
1569 .dirty = {
1570 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1571 .brw = BRW_NEW_BATCH |
1572 BRW_NEW_BLORP |
1573 BRW_NEW_CS_PROG_DATA |
1574 BRW_NEW_IMAGE_UNITS
1575 },
1576 .emit = brw_upload_cs_image_surfaces,
1577 };
1578
1579 static uint32_t
1580 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1581 {
1582 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1583 uint32_t hw_format = brw_format_for_mesa_format(format);
1584 if (access == GL_WRITE_ONLY) {
1585 return hw_format;
1586 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1587 /* Typed surface reads support a very limited subset of the shader
1588 * image formats. Translate it into the closest format the
1589 * hardware supports.
1590 */
1591 return isl_lower_storage_image_format(devinfo, hw_format);
1592 } else {
1593 /* The hardware doesn't actually support a typed format that we can use
1594 * so we have to fall back to untyped read/write messages.
1595 */
1596 return BRW_SURFACEFORMAT_RAW;
1597 }
1598 }
1599
1600 static void
1601 update_default_image_param(struct brw_context *brw,
1602 struct gl_image_unit *u,
1603 unsigned surface_idx,
1604 struct brw_image_param *param)
1605 {
1606 memset(param, 0, sizeof(*param));
1607 param->surface_idx = surface_idx;
1608 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1609 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1610 * detailed explanation of these parameters.
1611 */
1612 param->swizzling[0] = 0xff;
1613 param->swizzling[1] = 0xff;
1614 }
1615
1616 static void
1617 update_buffer_image_param(struct brw_context *brw,
1618 struct gl_image_unit *u,
1619 unsigned surface_idx,
1620 struct brw_image_param *param)
1621 {
1622 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1623 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1624 update_default_image_param(brw, u, surface_idx, param);
1625
1626 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1627 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1628 }
1629
1630 static void
1631 update_texture_image_param(struct brw_context *brw,
1632 struct gl_image_unit *u,
1633 unsigned surface_idx,
1634 struct brw_image_param *param)
1635 {
1636 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1637
1638 update_default_image_param(brw, u, surface_idx, param);
1639
1640 param->size[0] = minify(mt->logical_width0, u->Level);
1641 param->size[1] = minify(mt->logical_height0, u->Level);
1642 param->size[2] = (!u->Layered ? 1 :
1643 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1644 u->TexObj->Target == GL_TEXTURE_3D ?
1645 minify(mt->logical_depth0, u->Level) :
1646 mt->logical_depth0);
1647
1648 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1649 &param->offset[0],
1650 &param->offset[1]);
1651
1652 param->stride[0] = mt->cpp;
1653 param->stride[1] = mt->pitch / mt->cpp;
1654 param->stride[2] =
1655 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1656 param->stride[3] =
1657 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1658
1659 if (mt->tiling == I915_TILING_X) {
1660 /* An X tile is a rectangular block of 512x8 bytes. */
1661 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1662 param->tiling[1] = _mesa_logbase2(8);
1663
1664 if (brw->has_swizzling) {
1665 /* Right shifts required to swizzle bits 9 and 10 of the memory
1666 * address with bit 6.
1667 */
1668 param->swizzling[0] = 3;
1669 param->swizzling[1] = 4;
1670 }
1671 } else if (mt->tiling == I915_TILING_Y) {
1672 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1673 * different to the layout of an X-tiled surface, we simply pretend that
1674 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1675 * one arranged in X-major order just like is the case for X-tiling.
1676 */
1677 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1678 param->tiling[1] = _mesa_logbase2(32);
1679
1680 if (brw->has_swizzling) {
1681 /* Right shift required to swizzle bit 9 of the memory address with
1682 * bit 6.
1683 */
1684 param->swizzling[0] = 3;
1685 }
1686 }
1687
1688 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1689 * address calculation algorithm (emit_address_calculation() in
1690 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1691 * modulus equal to the LOD.
1692 */
1693 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1694 0);
1695 }
1696
1697 static void
1698 update_image_surface(struct brw_context *brw,
1699 struct gl_image_unit *u,
1700 GLenum access,
1701 unsigned surface_idx,
1702 uint32_t *surf_offset,
1703 struct brw_image_param *param)
1704 {
1705 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1706 struct gl_texture_object *obj = u->TexObj;
1707 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1708
1709 if (obj->Target == GL_TEXTURE_BUFFER) {
1710 struct intel_buffer_object *intel_obj =
1711 intel_buffer_object(obj->BufferObject);
1712 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1713 _mesa_get_format_bytes(u->_ActualFormat));
1714
1715 brw_emit_buffer_surface_state(
1716 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1717 format, intel_obj->Base.Size, texel_size,
1718 access != GL_READ_ONLY);
1719
1720 update_buffer_image_param(brw, u, surface_idx, param);
1721
1722 } else {
1723 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1724 struct intel_mipmap_tree *mt = intel_obj->mt;
1725
1726 if (format == BRW_SURFACEFORMAT_RAW) {
1727 brw_emit_buffer_surface_state(
1728 brw, surf_offset, mt->bo, mt->offset,
1729 format, mt->bo->size - mt->offset, 1 /* pitch */,
1730 access != GL_READ_ONLY);
1731
1732 } else {
1733 const unsigned num_layers = (!u->Layered ? 1 :
1734 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1735 mt->logical_depth0);
1736
1737 struct isl_view view = {
1738 .format = format,
1739 .base_level = obj->MinLevel + u->Level,
1740 .levels = 1,
1741 .base_array_layer = obj->MinLayer + u->_Layer,
1742 .array_len = num_layers,
1743 .swizzle = ISL_SWIZZLE_IDENTITY,
1744 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1745 };
1746
1747 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1748 const int flags =
1749 mt->fast_clear_state == INTEL_FAST_CLEAR_STATE_RESOLVED ?
1750 INTEL_AUX_BUFFER_DISABLED : 0;
1751 brw_emit_surface_state(brw, mt, flags, mt->target, view,
1752 surface_state_infos[brw->gen].tex_mocs,
1753 surf_offset, surf_index,
1754 I915_GEM_DOMAIN_SAMPLER,
1755 access == GL_READ_ONLY ? 0 :
1756 I915_GEM_DOMAIN_SAMPLER);
1757 }
1758
1759 update_texture_image_param(brw, u, surface_idx, param);
1760 }
1761
1762 } else {
1763 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1764 update_default_image_param(brw, u, surface_idx, param);
1765 }
1766 }
1767
1768 void
1769 brw_upload_image_surfaces(struct brw_context *brw,
1770 struct gl_linked_shader *shader,
1771 struct brw_stage_state *stage_state,
1772 struct brw_stage_prog_data *prog_data)
1773 {
1774 struct gl_context *ctx = &brw->ctx;
1775
1776 if (shader && shader->NumImages) {
1777 for (unsigned i = 0; i < shader->NumImages; i++) {
1778 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1779 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1780
1781 update_image_surface(brw, u, shader->ImageAccess[i],
1782 surf_idx,
1783 &stage_state->surf_offset[surf_idx],
1784 &prog_data->image_param[i]);
1785 }
1786
1787 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1788 /* This may have changed the image metadata dependent on the context
1789 * image unit state and passed to the program as uniforms, make sure
1790 * that push and pull constants are reuploaded.
1791 */
1792 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1793 }
1794 }
1795
1796 static void
1797 brw_upload_wm_image_surfaces(struct brw_context *brw)
1798 {
1799 struct gl_context *ctx = &brw->ctx;
1800 /* BRW_NEW_FRAGMENT_PROGRAM */
1801 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1802
1803 if (prog) {
1804 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1805 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1806 &brw->wm.base, &brw->wm.prog_data->base);
1807 }
1808 }
1809
1810 const struct brw_tracked_state brw_wm_image_surfaces = {
1811 .dirty = {
1812 .mesa = _NEW_TEXTURE,
1813 .brw = BRW_NEW_BATCH |
1814 BRW_NEW_BLORP |
1815 BRW_NEW_FRAGMENT_PROGRAM |
1816 BRW_NEW_FS_PROG_DATA |
1817 BRW_NEW_IMAGE_UNITS
1818 },
1819 .emit = brw_upload_wm_image_surfaces,
1820 };
1821
1822 void
1823 gen4_init_vtable_surface_functions(struct brw_context *brw)
1824 {
1825 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1826 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1827 }
1828
1829 void
1830 gen6_init_vtable_surface_functions(struct brw_context *brw)
1831 {
1832 gen4_init_vtable_surface_functions(brw);
1833 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1834 }
1835
1836 static void
1837 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1838 {
1839 struct gl_context *ctx = &brw->ctx;
1840 /* _NEW_PROGRAM */
1841 struct gl_shader_program *prog =
1842 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1843 /* BRW_NEW_CS_PROG_DATA */
1844 const struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
1845
1846 if (prog && cs_prog_data->uses_num_work_groups) {
1847 const unsigned surf_idx =
1848 cs_prog_data->binding_table.work_groups_start;
1849 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1850 drm_intel_bo *bo;
1851 uint32_t bo_offset;
1852
1853 if (brw->compute.num_work_groups_bo == NULL) {
1854 bo = NULL;
1855 intel_upload_data(brw,
1856 (void *)brw->compute.num_work_groups,
1857 3 * sizeof(GLuint),
1858 sizeof(GLuint),
1859 &bo,
1860 &bo_offset);
1861 } else {
1862 bo = brw->compute.num_work_groups_bo;
1863 bo_offset = brw->compute.num_work_groups_offset;
1864 }
1865
1866 brw_emit_buffer_surface_state(brw, surf_offset,
1867 bo, bo_offset,
1868 BRW_SURFACEFORMAT_RAW,
1869 3 * sizeof(GLuint), 1, true);
1870 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1871 }
1872 }
1873
1874 const struct brw_tracked_state brw_cs_work_groups_surface = {
1875 .dirty = {
1876 .brw = BRW_NEW_BLORP |
1877 BRW_NEW_CS_PROG_DATA |
1878 BRW_NEW_CS_WORK_GROUPS
1879 },
1880 .emit = brw_upload_cs_work_groups_surface,
1881 };