c9383b82245ee5db911075c34dea0937bd2f7989
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t tex_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 };
64
65 uint32_t rb_mocs[] = {
66 [7] = GEN7_MOCS_L3,
67 [8] = BDW_MOCS_PTE,
68 [9] = SKL_MOCS_PTE,
69 [10] = CNL_MOCS_PTE,
70 };
71
72 static void
73 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
74 GLenum target, struct isl_view *view,
75 uint32_t *tile_x, uint32_t *tile_y,
76 uint32_t *offset, struct isl_surf *surf)
77 {
78 *surf = mt->surf;
79
80 const struct gen_device_info *devinfo = &brw->screen->devinfo;
81 const enum isl_dim_layout dim_layout =
82 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
83
84 if (surf->dim_layout == dim_layout)
85 return;
86
87 /* The layout of the specified texture target is not compatible with the
88 * actual layout of the miptree structure in memory -- You're entering
89 * dangerous territory, this can only possibly work if you only intended
90 * to access a single level and slice of the texture, and the hardware
91 * supports the tile offset feature in order to allow non-tile-aligned
92 * base offsets, since we'll have to point the hardware to the first
93 * texel of the level instead of relying on the usual base level/layer
94 * controls.
95 */
96 assert(devinfo->has_surface_tile_offset);
97 assert(view->levels == 1 && view->array_len == 1);
98 assert(*tile_x == 0 && *tile_y == 0);
99
100 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
101 view->base_array_layer,
102 tile_x, tile_y);
103
104 /* Minify the logical dimensions of the texture. */
105 const unsigned l = view->base_level - mt->first_level;
106 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
107 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
108 minify(surf->logical_level0_px.height, l);
109 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
110 minify(surf->logical_level0_px.depth, l);
111
112 /* Only the base level and layer can be addressed with the overridden
113 * layout.
114 */
115 surf->logical_level0_px.array_len = 1;
116 surf->levels = 1;
117 surf->dim_layout = dim_layout;
118
119 /* The requested slice of the texture is now at the base level and
120 * layer.
121 */
122 view->base_level = 0;
123 view->base_array_layer = 0;
124 }
125
126 static void
127 brw_emit_surface_state(struct brw_context *brw,
128 struct intel_mipmap_tree *mt,
129 GLenum target, struct isl_view view,
130 enum isl_aux_usage aux_usage,
131 uint32_t mocs, uint32_t *surf_offset, int surf_index,
132 unsigned reloc_flags)
133 {
134 uint32_t tile_x = mt->level[0].level_x;
135 uint32_t tile_y = mt->level[0].level_y;
136 uint32_t offset = mt->offset;
137
138 struct isl_surf surf;
139
140 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
141
142 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
143
144 struct brw_bo *aux_bo;
145 struct isl_surf *aux_surf = NULL;
146 uint64_t aux_offset = 0;
147 switch (aux_usage) {
148 case ISL_AUX_USAGE_MCS:
149 case ISL_AUX_USAGE_CCS_D:
150 case ISL_AUX_USAGE_CCS_E:
151 aux_surf = &mt->mcs_buf->surf;
152 aux_bo = mt->mcs_buf->bo;
153 aux_offset = mt->mcs_buf->offset;
154 break;
155
156 case ISL_AUX_USAGE_HIZ:
157 aux_surf = &mt->hiz_buf->surf;
158 aux_bo = mt->hiz_buf->bo;
159 aux_offset = 0;
160 break;
161
162 case ISL_AUX_USAGE_NONE:
163 break;
164 }
165
166 if (aux_usage != ISL_AUX_USAGE_NONE) {
167 /* We only really need a clear color if we also have an auxiliary
168 * surface. Without one, it does nothing.
169 */
170 clear_color = mt->fast_clear_color;
171 }
172
173 void *state = brw_state_batch(brw,
174 brw->isl_dev.ss.size,
175 brw->isl_dev.ss.align,
176 surf_offset);
177
178 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
179 .address = brw_state_reloc(&brw->batch,
180 *surf_offset + brw->isl_dev.ss.addr_offset,
181 mt->bo, offset, reloc_flags),
182 .aux_surf = aux_surf, .aux_usage = aux_usage,
183 .aux_address = aux_offset,
184 .mocs = mocs, .clear_color = clear_color,
185 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
186 if (aux_surf) {
187 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
188 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
189 * contain other control information. Since buffer addresses are always
190 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
191 * an ordinary reloc to do the necessary address translation.
192 *
193 * FIXME: move to the point of assignment.
194 */
195 assert((aux_offset & 0xfff) == 0);
196 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
197 *aux_addr = brw_state_reloc(&brw->batch,
198 *surf_offset +
199 brw->isl_dev.ss.aux_addr_offset,
200 aux_bo, *aux_addr,
201 reloc_flags);
202 }
203 }
204
205 static uint32_t
206 gen6_update_renderbuffer_surface(struct brw_context *brw,
207 struct gl_renderbuffer *rb,
208 unsigned unit,
209 uint32_t surf_index)
210 {
211 const struct gen_device_info *devinfo = &brw->screen->devinfo;
212 struct gl_context *ctx = &brw->ctx;
213 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
214 struct intel_mipmap_tree *mt = irb->mt;
215
216 enum isl_aux_usage aux_usage =
217 brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
218 intel_miptree_render_aux_usage(brw, mt, ctx->Color.sRGBEnabled,
219 ctx->Color.BlendEnabled & (1 << unit));
220
221 assert(brw_render_target_supported(brw, rb));
222
223 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
224 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
225 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
226 __func__, _mesa_get_format_name(rb_format));
227 }
228
229 struct isl_view view = {
230 .format = brw->mesa_to_isl_render_format[rb_format],
231 .base_level = irb->mt_level - irb->mt->first_level,
232 .levels = 1,
233 .base_array_layer = irb->mt_layer,
234 .array_len = MAX2(irb->layer_count, 1),
235 .swizzle = ISL_SWIZZLE_IDENTITY,
236 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
237 };
238
239 uint32_t offset;
240 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
241 rb_mocs[devinfo->gen],
242 &offset, surf_index,
243 RELOC_WRITE);
244 return offset;
245 }
246
247 GLuint
248 translate_tex_target(GLenum target)
249 {
250 switch (target) {
251 case GL_TEXTURE_1D:
252 case GL_TEXTURE_1D_ARRAY_EXT:
253 return BRW_SURFACE_1D;
254
255 case GL_TEXTURE_RECTANGLE_NV:
256 return BRW_SURFACE_2D;
257
258 case GL_TEXTURE_2D:
259 case GL_TEXTURE_2D_ARRAY_EXT:
260 case GL_TEXTURE_EXTERNAL_OES:
261 case GL_TEXTURE_2D_MULTISAMPLE:
262 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_3D:
266 return BRW_SURFACE_3D;
267
268 case GL_TEXTURE_CUBE_MAP:
269 case GL_TEXTURE_CUBE_MAP_ARRAY:
270 return BRW_SURFACE_CUBE;
271
272 default:
273 unreachable("not reached");
274 }
275 }
276
277 uint32_t
278 brw_get_surface_tiling_bits(enum isl_tiling tiling)
279 {
280 switch (tiling) {
281 case ISL_TILING_X:
282 return BRW_SURFACE_TILED;
283 case ISL_TILING_Y0:
284 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
285 default:
286 return 0;
287 }
288 }
289
290
291 uint32_t
292 brw_get_surface_num_multisamples(unsigned num_samples)
293 {
294 if (num_samples > 1)
295 return BRW_SURFACE_MULTISAMPLECOUNT_4;
296 else
297 return BRW_SURFACE_MULTISAMPLECOUNT_1;
298 }
299
300 /**
301 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
302 * swizzling.
303 */
304 int
305 brw_get_texture_swizzle(const struct gl_context *ctx,
306 const struct gl_texture_object *t)
307 {
308 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
309
310 int swizzles[SWIZZLE_NIL + 1] = {
311 SWIZZLE_X,
312 SWIZZLE_Y,
313 SWIZZLE_Z,
314 SWIZZLE_W,
315 SWIZZLE_ZERO,
316 SWIZZLE_ONE,
317 SWIZZLE_NIL
318 };
319
320 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
321 img->_BaseFormat == GL_DEPTH_STENCIL) {
322 GLenum depth_mode = t->DepthMode;
323
324 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
325 * with depth component data specified with a sized internal format.
326 * Otherwise, it's left at the old default, GL_LUMINANCE.
327 */
328 if (_mesa_is_gles3(ctx) &&
329 img->InternalFormat != GL_DEPTH_COMPONENT &&
330 img->InternalFormat != GL_DEPTH_STENCIL) {
331 depth_mode = GL_RED;
332 }
333
334 switch (depth_mode) {
335 case GL_ALPHA:
336 swizzles[0] = SWIZZLE_ZERO;
337 swizzles[1] = SWIZZLE_ZERO;
338 swizzles[2] = SWIZZLE_ZERO;
339 swizzles[3] = SWIZZLE_X;
340 break;
341 case GL_LUMINANCE:
342 swizzles[0] = SWIZZLE_X;
343 swizzles[1] = SWIZZLE_X;
344 swizzles[2] = SWIZZLE_X;
345 swizzles[3] = SWIZZLE_ONE;
346 break;
347 case GL_INTENSITY:
348 swizzles[0] = SWIZZLE_X;
349 swizzles[1] = SWIZZLE_X;
350 swizzles[2] = SWIZZLE_X;
351 swizzles[3] = SWIZZLE_X;
352 break;
353 case GL_RED:
354 swizzles[0] = SWIZZLE_X;
355 swizzles[1] = SWIZZLE_ZERO;
356 swizzles[2] = SWIZZLE_ZERO;
357 swizzles[3] = SWIZZLE_ONE;
358 break;
359 }
360 }
361
362 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
363
364 /* If the texture's format is alpha-only, force R, G, and B to
365 * 0.0. Similarly, if the texture's format has no alpha channel,
366 * force the alpha value read to 1.0. This allows for the
367 * implementation to use an RGBA texture for any of these formats
368 * without leaking any unexpected values.
369 */
370 switch (img->_BaseFormat) {
371 case GL_ALPHA:
372 swizzles[0] = SWIZZLE_ZERO;
373 swizzles[1] = SWIZZLE_ZERO;
374 swizzles[2] = SWIZZLE_ZERO;
375 break;
376 case GL_LUMINANCE:
377 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_ONE;
382 }
383 break;
384 case GL_LUMINANCE_ALPHA:
385 if (datatype == GL_SIGNED_NORMALIZED) {
386 swizzles[0] = SWIZZLE_X;
387 swizzles[1] = SWIZZLE_X;
388 swizzles[2] = SWIZZLE_X;
389 swizzles[3] = SWIZZLE_W;
390 }
391 break;
392 case GL_INTENSITY:
393 if (datatype == GL_SIGNED_NORMALIZED) {
394 swizzles[0] = SWIZZLE_X;
395 swizzles[1] = SWIZZLE_X;
396 swizzles[2] = SWIZZLE_X;
397 swizzles[3] = SWIZZLE_X;
398 }
399 break;
400 case GL_RED:
401 case GL_RG:
402 case GL_RGB:
403 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
404 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
405 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
406 swizzles[3] = SWIZZLE_ONE;
407 break;
408 }
409
410 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
411 swizzles[GET_SWZ(t->_Swizzle, 1)],
412 swizzles[GET_SWZ(t->_Swizzle, 2)],
413 swizzles[GET_SWZ(t->_Swizzle, 3)]);
414 }
415
416 /**
417 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
418 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
419 *
420 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
421 * 0 1 2 3 4 5
422 * 4 5 6 7 0 1
423 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
424 *
425 * which is simply adding 4 then modding by 8 (or anding with 7).
426 *
427 * We then may need to apply workarounds for textureGather hardware bugs.
428 */
429 static unsigned
430 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
431 {
432 unsigned scs = (swizzle + 4) & 7;
433
434 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
435 }
436
437 static bool
438 brw_aux_surface_disabled(const struct brw_context *brw,
439 const struct intel_mipmap_tree *mt)
440 {
441 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
442
443 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
444 const struct intel_renderbuffer *irb =
445 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
446
447 if (irb && irb->mt == mt)
448 return brw->draw_aux_buffer_disabled[i];
449 }
450
451 return false;
452 }
453
454 void
455 brw_update_texture_surface(struct gl_context *ctx,
456 unsigned unit,
457 uint32_t *surf_offset,
458 bool for_gather,
459 uint32_t plane)
460 {
461 struct brw_context *brw = brw_context(ctx);
462 const struct gen_device_info *devinfo = &brw->screen->devinfo;
463 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
464
465 if (obj->Target == GL_TEXTURE_BUFFER) {
466 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
467
468 } else {
469 struct intel_texture_object *intel_obj = intel_texture_object(obj);
470 struct intel_mipmap_tree *mt = intel_obj->mt;
471
472 if (plane > 0) {
473 if (mt->plane[plane - 1] == NULL)
474 return;
475 mt = mt->plane[plane - 1];
476 }
477
478 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
479 /* If this is a view with restricted NumLayers, then our effective depth
480 * is not just the miptree depth.
481 */
482 unsigned view_num_layers;
483 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
484 view_num_layers = obj->NumLayers;
485 } else {
486 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
487 mt->surf.logical_level0_px.depth :
488 mt->surf.logical_level0_px.array_len;
489 }
490
491 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
492 * texturing functions that return a float, as our code generation always
493 * selects the .x channel (which would always be 0).
494 */
495 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
496 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
497 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
498 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
499 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
500 brw_get_texture_swizzle(&brw->ctx, obj));
501
502 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
503 enum isl_format format = translate_tex_format(brw, mesa_fmt,
504 sampler->sRGBDecode);
505
506 /* Implement gen6 and gen7 gather work-around */
507 bool need_green_to_blue = false;
508 if (for_gather) {
509 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
510 format == ISL_FORMAT_R32G32_SINT ||
511 format == ISL_FORMAT_R32G32_UINT)) {
512 format = ISL_FORMAT_R32G32_FLOAT_LD;
513 need_green_to_blue = devinfo->is_haswell;
514 } else if (devinfo->gen == 6) {
515 /* Sandybridge's gather4 message is broken for integer formats.
516 * To work around this, we pretend the surface is UNORM for
517 * 8 or 16-bit formats, and emit shader instructions to recover
518 * the real INT/UINT value. For 32-bit formats, we pretend
519 * the surface is FLOAT, and simply reinterpret the resulting
520 * bits.
521 */
522 switch (format) {
523 case ISL_FORMAT_R8_SINT:
524 case ISL_FORMAT_R8_UINT:
525 format = ISL_FORMAT_R8_UNORM;
526 break;
527
528 case ISL_FORMAT_R16_SINT:
529 case ISL_FORMAT_R16_UINT:
530 format = ISL_FORMAT_R16_UNORM;
531 break;
532
533 case ISL_FORMAT_R32_SINT:
534 case ISL_FORMAT_R32_UINT:
535 format = ISL_FORMAT_R32_FLOAT;
536 break;
537
538 default:
539 break;
540 }
541 }
542 }
543
544 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
545 if (devinfo->gen <= 7) {
546 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
547 mt = mt->r8stencil_mt;
548 } else {
549 mt = mt->stencil_mt;
550 }
551 format = ISL_FORMAT_R8_UINT;
552 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
553 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
554 mt = mt->r8stencil_mt;
555 format = ISL_FORMAT_R8_UINT;
556 }
557
558 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
559
560 struct isl_view view = {
561 .format = format,
562 .base_level = obj->MinLevel + obj->BaseLevel,
563 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
564 .base_array_layer = obj->MinLayer,
565 .array_len = view_num_layers,
566 .swizzle = {
567 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
568 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
569 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
570 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
571 },
572 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
573 };
574
575 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
576 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
577 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
578
579 enum isl_aux_usage aux_usage =
580 intel_miptree_texture_aux_usage(brw, mt, format);
581
582 if (brw_aux_surface_disabled(brw, mt))
583 aux_usage = ISL_AUX_USAGE_NONE;
584
585 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
586 tex_mocs[devinfo->gen],
587 surf_offset, surf_index,
588 0);
589 }
590 }
591
592 void
593 brw_emit_buffer_surface_state(struct brw_context *brw,
594 uint32_t *out_offset,
595 struct brw_bo *bo,
596 unsigned buffer_offset,
597 unsigned surface_format,
598 unsigned buffer_size,
599 unsigned pitch,
600 unsigned reloc_flags)
601 {
602 const struct gen_device_info *devinfo = &brw->screen->devinfo;
603 uint32_t *dw = brw_state_batch(brw,
604 brw->isl_dev.ss.size,
605 brw->isl_dev.ss.align,
606 out_offset);
607
608 isl_buffer_fill_state(&brw->isl_dev, dw,
609 .address = !bo ? buffer_offset :
610 brw_state_reloc(&brw->batch,
611 *out_offset + brw->isl_dev.ss.addr_offset,
612 bo, buffer_offset,
613 reloc_flags),
614 .size = buffer_size,
615 .format = surface_format,
616 .stride = pitch,
617 .mocs = tex_mocs[devinfo->gen]);
618 }
619
620 void
621 brw_update_buffer_texture_surface(struct gl_context *ctx,
622 unsigned unit,
623 uint32_t *surf_offset)
624 {
625 struct brw_context *brw = brw_context(ctx);
626 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
627 struct intel_buffer_object *intel_obj =
628 intel_buffer_object(tObj->BufferObject);
629 uint32_t size = tObj->BufferSize;
630 struct brw_bo *bo = NULL;
631 mesa_format format = tObj->_BufferObjectFormat;
632 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
633 int texel_size = _mesa_get_format_bytes(format);
634
635 if (intel_obj) {
636 size = MIN2(size, intel_obj->Base.Size);
637 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
638 false);
639 }
640
641 /* The ARB_texture_buffer_specification says:
642 *
643 * "The number of texels in the buffer texture's texel array is given by
644 *
645 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
646 *
647 * where <buffer_size> is the size of the buffer object, in basic
648 * machine units and <components> and <base_type> are the element count
649 * and base data type for elements, as specified in Table X.1. The
650 * number of texels in the texel array is then clamped to the
651 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
652 *
653 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
654 * so that when ISL divides by stride to obtain the number of texels, that
655 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
656 */
657 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
658
659 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
660 _mesa_problem(NULL, "bad format %s for texture buffer\n",
661 _mesa_get_format_name(format));
662 }
663
664 brw_emit_buffer_surface_state(brw, surf_offset, bo,
665 tObj->BufferOffset,
666 isl_format,
667 size,
668 texel_size,
669 0);
670 }
671
672 /**
673 * Create the constant buffer surface. Vertex/fragment shader constants will be
674 * read from this buffer with Data Port Read instructions/messages.
675 */
676 void
677 brw_create_constant_surface(struct brw_context *brw,
678 struct brw_bo *bo,
679 uint32_t offset,
680 uint32_t size,
681 uint32_t *out_offset)
682 {
683 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
684 ISL_FORMAT_R32G32B32A32_FLOAT,
685 size, 1, 0);
686 }
687
688 /**
689 * Create the buffer surface. Shader buffer variables will be
690 * read from / write to this buffer with Data Port Read/Write
691 * instructions/messages.
692 */
693 void
694 brw_create_buffer_surface(struct brw_context *brw,
695 struct brw_bo *bo,
696 uint32_t offset,
697 uint32_t size,
698 uint32_t *out_offset)
699 {
700 /* Use a raw surface so we can reuse existing untyped read/write/atomic
701 * messages. We need these specifically for the fragment shader since they
702 * include a pixel mask header that we need to ensure correct behavior
703 * with helper invocations, which cannot write to the buffer.
704 */
705 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
706 ISL_FORMAT_RAW,
707 size, 1, RELOC_WRITE);
708 }
709
710 /**
711 * Set up a binding table entry for use by stream output logic (transform
712 * feedback).
713 *
714 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
715 */
716 void
717 brw_update_sol_surface(struct brw_context *brw,
718 struct gl_buffer_object *buffer_obj,
719 uint32_t *out_offset, unsigned num_vector_components,
720 unsigned stride_dwords, unsigned offset_dwords)
721 {
722 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
723 uint32_t offset_bytes = 4 * offset_dwords;
724 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
725 offset_bytes,
726 buffer_obj->Size - offset_bytes,
727 true);
728 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
729 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
730 size_t size_dwords = buffer_obj->Size / 4;
731 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
732
733 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
734 * too big to map using a single binding table entry?
735 */
736 assert((size_dwords - offset_dwords) / stride_dwords
737 <= BRW_MAX_NUM_BUFFER_ENTRIES);
738
739 if (size_dwords > offset_dwords + num_vector_components) {
740 /* There is room for at least 1 transform feedback output in the buffer.
741 * Compute the number of additional transform feedback outputs the
742 * buffer has room for.
743 */
744 buffer_size_minus_1 =
745 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
746 } else {
747 /* There isn't even room for a single transform feedback output in the
748 * buffer. We can't configure the binding table entry to prevent output
749 * entirely; we'll have to rely on the geometry shader to detect
750 * overflow. But to minimize the damage in case of a bug, set up the
751 * binding table entry to just allow a single output.
752 */
753 buffer_size_minus_1 = 0;
754 }
755 width = buffer_size_minus_1 & 0x7f;
756 height = (buffer_size_minus_1 & 0xfff80) >> 7;
757 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
758
759 switch (num_vector_components) {
760 case 1:
761 surface_format = ISL_FORMAT_R32_FLOAT;
762 break;
763 case 2:
764 surface_format = ISL_FORMAT_R32G32_FLOAT;
765 break;
766 case 3:
767 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
768 break;
769 case 4:
770 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
771 break;
772 default:
773 unreachable("Invalid vector size for transform feedback output");
774 }
775
776 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
777 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
778 surface_format << BRW_SURFACE_FORMAT_SHIFT |
779 BRW_SURFACE_RC_READ_WRITE;
780 surf[1] = brw_state_reloc(&brw->batch,
781 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
782 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
783 height << BRW_SURFACE_HEIGHT_SHIFT);
784 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
785 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
786 surf[4] = 0;
787 surf[5] = 0;
788 }
789
790 /* Creates a new WM constant buffer reflecting the current fragment program's
791 * constants, if needed by the fragment program.
792 *
793 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
794 * state atom.
795 */
796 static void
797 brw_upload_wm_pull_constants(struct brw_context *brw)
798 {
799 struct brw_stage_state *stage_state = &brw->wm.base;
800 /* BRW_NEW_FRAGMENT_PROGRAM */
801 struct brw_program *fp =
802 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
803
804 /* BRW_NEW_FS_PROG_DATA */
805 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
806
807 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
808 /* _NEW_PROGRAM_CONSTANTS */
809 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
810 stage_state, prog_data);
811 }
812
813 const struct brw_tracked_state brw_wm_pull_constants = {
814 .dirty = {
815 .mesa = _NEW_PROGRAM_CONSTANTS,
816 .brw = BRW_NEW_BATCH |
817 BRW_NEW_FRAGMENT_PROGRAM |
818 BRW_NEW_FS_PROG_DATA,
819 },
820 .emit = brw_upload_wm_pull_constants,
821 };
822
823 /**
824 * Creates a null renderbuffer surface.
825 *
826 * This is used when the shader doesn't write to any color output. An FB
827 * write to target 0 will still be emitted, because that's how the thread is
828 * terminated (and computed depth is returned), so we need to have the
829 * hardware discard the target 0 color output..
830 */
831 static void
832 emit_null_surface_state(struct brw_context *brw,
833 const struct gl_framebuffer *fb,
834 uint32_t *out_offset)
835 {
836 const struct gen_device_info *devinfo = &brw->screen->devinfo;
837 uint32_t *surf = brw_state_batch(brw,
838 brw->isl_dev.ss.size,
839 brw->isl_dev.ss.align,
840 out_offset);
841
842 /* Use the fb dimensions or 1x1x1 */
843 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
844 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
845 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
846
847 if (devinfo->gen != 6 || samples <= 1) {
848 isl_null_fill_state(&brw->isl_dev, surf,
849 isl_extent3d(width, height, 1));
850 return;
851 }
852
853 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
854 * So work around this problem by rendering into dummy color buffer.
855 *
856 * To decrease the amount of memory needed by the workaround buffer, we
857 * set its pitch to 128 bytes (the width of a Y tile). This means that
858 * the amount of memory needed for the workaround buffer is
859 * (width_in_tiles + height_in_tiles - 1) tiles.
860 *
861 * Note that since the workaround buffer will be interpreted by the
862 * hardware as an interleaved multisampled buffer, we need to compute
863 * width_in_tiles and height_in_tiles by dividing the width and height
864 * by 16 rather than the normal Y-tile size of 32.
865 */
866 unsigned width_in_tiles = ALIGN(width, 16) / 16;
867 unsigned height_in_tiles = ALIGN(height, 16) / 16;
868 unsigned pitch_minus_1 = 127;
869 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
870 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
871 size_needed);
872
873 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
874 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
875 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
876 brw->wm.multisampled_null_render_target_bo,
877 0, RELOC_WRITE);
878
879 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
880 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
881
882 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
883 * Notes):
884 *
885 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
886 */
887 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
888 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
889 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
890 surf[5] = 0;
891 }
892
893 /**
894 * Sets up a surface state structure to point at the given region.
895 * While it is only used for the front/back buffer currently, it should be
896 * usable for further buffers when doing ARB_draw_buffer support.
897 */
898 static uint32_t
899 gen4_update_renderbuffer_surface(struct brw_context *brw,
900 struct gl_renderbuffer *rb,
901 unsigned unit,
902 uint32_t surf_index)
903 {
904 const struct gen_device_info *devinfo = &brw->screen->devinfo;
905 struct gl_context *ctx = &brw->ctx;
906 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
907 struct intel_mipmap_tree *mt = irb->mt;
908 uint32_t *surf;
909 uint32_t tile_x, tile_y;
910 enum isl_format format;
911 uint32_t offset;
912 /* _NEW_BUFFERS */
913 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
914 /* BRW_NEW_FS_PROG_DATA */
915
916 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
917 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
918
919 if (tile_x != 0 || tile_y != 0) {
920 /* Original gen4 hardware couldn't draw to a non-tile-aligned
921 * destination in a miptree unless you actually setup your renderbuffer
922 * as a miptree and used the fragile lod/array_index/etc. controls to
923 * select the image. So, instead, we just make a new single-level
924 * miptree and render into that.
925 */
926 intel_renderbuffer_move_to_temp(brw, irb, false);
927 assert(irb->align_wa_mt);
928 mt = irb->align_wa_mt;
929 }
930 }
931
932 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
933
934 format = brw->mesa_to_isl_render_format[rb_format];
935 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
936 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
937 __func__, _mesa_get_format_name(rb_format));
938 }
939
940 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
941 format << BRW_SURFACE_FORMAT_SHIFT);
942
943 /* reloc */
944 assert(mt->offset % mt->cpp == 0);
945 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
946 mt->offset +
947 intel_renderbuffer_get_tile_offsets(irb,
948 &tile_x,
949 &tile_y),
950 RELOC_WRITE);
951
952 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
953 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
954
955 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
956 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
957
958 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
959
960 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
961 /* Note that the low bits of these fields are missing, so
962 * there's the possibility of getting in trouble.
963 */
964 assert(tile_x % 4 == 0);
965 assert(tile_y % 2 == 0);
966 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
967 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
968 (mt->surf.image_alignment_el.height == 4 ?
969 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
970
971 if (devinfo->gen < 6) {
972 /* _NEW_COLOR */
973 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
974 (ctx->Color.BlendEnabled & (1 << unit)))
975 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
976
977 if (!ctx->Color.ColorMask[unit][0])
978 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
979 if (!ctx->Color.ColorMask[unit][1])
980 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
981 if (!ctx->Color.ColorMask[unit][2])
982 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
983
984 /* As mentioned above, disable writes to the alpha component when the
985 * renderbuffer is XRGB.
986 */
987 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
988 !ctx->Color.ColorMask[unit][3]) {
989 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
990 }
991 }
992
993 return offset;
994 }
995
996 static void
997 update_renderbuffer_surfaces(struct brw_context *brw)
998 {
999 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1000 const struct gl_context *ctx = &brw->ctx;
1001
1002 /* _NEW_BUFFERS | _NEW_COLOR */
1003 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1004
1005 /* Render targets always start at binding table index 0. */
1006 const unsigned rt_start = 0;
1007
1008 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1009
1010 /* Update surfaces for drawing buffers */
1011 if (fb->_NumColorDrawBuffers >= 1) {
1012 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1013 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1014
1015 if (intel_renderbuffer(rb)) {
1016 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1017 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1018 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1019 } else {
1020 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1021 }
1022 }
1023 } else {
1024 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1025 }
1026
1027 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1028 }
1029
1030 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1031 .dirty = {
1032 .mesa = _NEW_BUFFERS |
1033 _NEW_COLOR,
1034 .brw = BRW_NEW_BATCH,
1035 },
1036 .emit = update_renderbuffer_surfaces,
1037 };
1038
1039 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1040 .dirty = {
1041 .mesa = _NEW_BUFFERS,
1042 .brw = BRW_NEW_BATCH |
1043 BRW_NEW_AUX_STATE,
1044 },
1045 .emit = update_renderbuffer_surfaces,
1046 };
1047
1048 static void
1049 update_renderbuffer_read_surfaces(struct brw_context *brw)
1050 {
1051 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1052 const struct gl_context *ctx = &brw->ctx;
1053
1054 /* BRW_NEW_FS_PROG_DATA */
1055 const struct brw_wm_prog_data *wm_prog_data =
1056 brw_wm_prog_data(brw->wm.base.prog_data);
1057
1058 if (wm_prog_data->has_render_target_reads &&
1059 !ctx->Extensions.MESA_shader_framebuffer_fetch) {
1060 /* _NEW_BUFFERS */
1061 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1062
1063 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1064 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1065 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1066 const unsigned surf_index =
1067 wm_prog_data->binding_table.render_target_read_start + i;
1068 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1069
1070 if (irb) {
1071 const enum isl_format format = brw->mesa_to_isl_render_format[
1072 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1073 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1074 format));
1075
1076 /* Override the target of the texture if the render buffer is a
1077 * single slice of a 3D texture (since the minimum array element
1078 * field of the surface state structure is ignored by the sampler
1079 * unit for 3D textures on some hardware), or if the render buffer
1080 * is a 1D array (since shaders always provide the array index
1081 * coordinate at the Z component to avoid state-dependent
1082 * recompiles when changing the texture target of the
1083 * framebuffer).
1084 */
1085 const GLenum target =
1086 (irb->mt->target == GL_TEXTURE_3D &&
1087 irb->layer_count == 1) ? GL_TEXTURE_2D :
1088 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1089 irb->mt->target;
1090
1091 const struct isl_view view = {
1092 .format = format,
1093 .base_level = irb->mt_level - irb->mt->first_level,
1094 .levels = 1,
1095 .base_array_layer = irb->mt_layer,
1096 .array_len = irb->layer_count,
1097 .swizzle = ISL_SWIZZLE_IDENTITY,
1098 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1099 };
1100
1101 enum isl_aux_usage aux_usage =
1102 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1103 if (brw->draw_aux_buffer_disabled[i])
1104 aux_usage = ISL_AUX_USAGE_NONE;
1105
1106 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1107 tex_mocs[devinfo->gen],
1108 surf_offset, surf_index,
1109 0);
1110
1111 } else {
1112 emit_null_surface_state(brw, fb, surf_offset);
1113 }
1114 }
1115
1116 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1117 }
1118 }
1119
1120 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1121 .dirty = {
1122 .mesa = _NEW_BUFFERS,
1123 .brw = BRW_NEW_BATCH |
1124 BRW_NEW_AUX_STATE |
1125 BRW_NEW_FS_PROG_DATA,
1126 },
1127 .emit = update_renderbuffer_read_surfaces,
1128 };
1129
1130 static void
1131 update_stage_texture_surfaces(struct brw_context *brw,
1132 const struct gl_program *prog,
1133 struct brw_stage_state *stage_state,
1134 bool for_gather, uint32_t plane)
1135 {
1136 if (!prog)
1137 return;
1138
1139 struct gl_context *ctx = &brw->ctx;
1140
1141 uint32_t *surf_offset = stage_state->surf_offset;
1142
1143 /* BRW_NEW_*_PROG_DATA */
1144 if (for_gather)
1145 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1146 else
1147 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1148
1149 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1150 for (unsigned s = 0; s < num_samplers; s++) {
1151 surf_offset[s] = 0;
1152
1153 if (prog->SamplersUsed & (1 << s)) {
1154 const unsigned unit = prog->SamplerUnits[s];
1155
1156 /* _NEW_TEXTURE */
1157 if (ctx->Texture.Unit[unit]._Current) {
1158 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1159 }
1160 }
1161 }
1162 }
1163
1164
1165 /**
1166 * Construct SURFACE_STATE objects for enabled textures.
1167 */
1168 static void
1169 brw_update_texture_surfaces(struct brw_context *brw)
1170 {
1171 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1172
1173 /* BRW_NEW_VERTEX_PROGRAM */
1174 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1175
1176 /* BRW_NEW_TESS_PROGRAMS */
1177 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1178 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1179
1180 /* BRW_NEW_GEOMETRY_PROGRAM */
1181 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1182
1183 /* BRW_NEW_FRAGMENT_PROGRAM */
1184 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1185
1186 /* _NEW_TEXTURE */
1187 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1188 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1189 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1190 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1191 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1192
1193 /* emit alternate set of surface state for gather. this
1194 * allows the surface format to be overriden for only the
1195 * gather4 messages. */
1196 if (devinfo->gen < 8) {
1197 if (vs && vs->nir->info.uses_texture_gather)
1198 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1199 if (tcs && tcs->nir->info.uses_texture_gather)
1200 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1201 if (tes && tes->nir->info.uses_texture_gather)
1202 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1203 if (gs && gs->nir->info.uses_texture_gather)
1204 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1205 if (fs && fs->nir->info.uses_texture_gather)
1206 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1207 }
1208
1209 if (fs) {
1210 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1211 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1212 }
1213
1214 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1215 }
1216
1217 const struct brw_tracked_state brw_texture_surfaces = {
1218 .dirty = {
1219 .mesa = _NEW_TEXTURE,
1220 .brw = BRW_NEW_BATCH |
1221 BRW_NEW_AUX_STATE |
1222 BRW_NEW_FRAGMENT_PROGRAM |
1223 BRW_NEW_FS_PROG_DATA |
1224 BRW_NEW_GEOMETRY_PROGRAM |
1225 BRW_NEW_GS_PROG_DATA |
1226 BRW_NEW_TESS_PROGRAMS |
1227 BRW_NEW_TCS_PROG_DATA |
1228 BRW_NEW_TES_PROG_DATA |
1229 BRW_NEW_TEXTURE_BUFFER |
1230 BRW_NEW_VERTEX_PROGRAM |
1231 BRW_NEW_VS_PROG_DATA,
1232 },
1233 .emit = brw_update_texture_surfaces,
1234 };
1235
1236 static void
1237 brw_update_cs_texture_surfaces(struct brw_context *brw)
1238 {
1239 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1240
1241 /* BRW_NEW_COMPUTE_PROGRAM */
1242 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1243
1244 /* _NEW_TEXTURE */
1245 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1246
1247 /* emit alternate set of surface state for gather. this
1248 * allows the surface format to be overriden for only the
1249 * gather4 messages.
1250 */
1251 if (devinfo->gen < 8) {
1252 if (cs && cs->nir->info.uses_texture_gather)
1253 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1254 }
1255
1256 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1257 }
1258
1259 const struct brw_tracked_state brw_cs_texture_surfaces = {
1260 .dirty = {
1261 .mesa = _NEW_TEXTURE,
1262 .brw = BRW_NEW_BATCH |
1263 BRW_NEW_COMPUTE_PROGRAM |
1264 BRW_NEW_AUX_STATE,
1265 },
1266 .emit = brw_update_cs_texture_surfaces,
1267 };
1268
1269
1270 void
1271 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1272 struct brw_stage_state *stage_state,
1273 struct brw_stage_prog_data *prog_data)
1274 {
1275 struct gl_context *ctx = &brw->ctx;
1276
1277 if (!prog)
1278 return;
1279
1280 uint32_t *ubo_surf_offsets =
1281 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1282
1283 for (int i = 0; i < prog->info.num_ubos; i++) {
1284 struct gl_buffer_binding *binding =
1285 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1286
1287 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1288 emit_null_surface_state(brw, NULL, &ubo_surf_offsets[i]);
1289 } else {
1290 struct intel_buffer_object *intel_bo =
1291 intel_buffer_object(binding->BufferObject);
1292 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1293 if (!binding->AutomaticSize)
1294 size = MIN2(size, binding->Size);
1295 struct brw_bo *bo =
1296 intel_bufferobj_buffer(brw, intel_bo,
1297 binding->Offset,
1298 size, false);
1299 brw_create_constant_surface(brw, bo, binding->Offset,
1300 size,
1301 &ubo_surf_offsets[i]);
1302 }
1303 }
1304
1305 uint32_t *ssbo_surf_offsets =
1306 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1307
1308 for (int i = 0; i < prog->info.num_ssbos; i++) {
1309 struct gl_buffer_binding *binding =
1310 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1311
1312 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1313 emit_null_surface_state(brw, NULL, &ssbo_surf_offsets[i]);
1314 } else {
1315 struct intel_buffer_object *intel_bo =
1316 intel_buffer_object(binding->BufferObject);
1317 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1318 if (!binding->AutomaticSize)
1319 size = MIN2(size, binding->Size);
1320 struct brw_bo *bo =
1321 intel_bufferobj_buffer(brw, intel_bo,
1322 binding->Offset,
1323 size, true);
1324 brw_create_buffer_surface(brw, bo, binding->Offset,
1325 size,
1326 &ssbo_surf_offsets[i]);
1327 }
1328 }
1329
1330 stage_state->push_constants_dirty = true;
1331
1332 if (prog->info.num_ubos || prog->info.num_ssbos)
1333 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1334 }
1335
1336 static void
1337 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1338 {
1339 struct gl_context *ctx = &brw->ctx;
1340 /* _NEW_PROGRAM */
1341 struct gl_program *prog = ctx->FragmentProgram._Current;
1342
1343 /* BRW_NEW_FS_PROG_DATA */
1344 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1345 }
1346
1347 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1348 .dirty = {
1349 .mesa = _NEW_PROGRAM,
1350 .brw = BRW_NEW_BATCH |
1351 BRW_NEW_FS_PROG_DATA |
1352 BRW_NEW_UNIFORM_BUFFER,
1353 },
1354 .emit = brw_upload_wm_ubo_surfaces,
1355 };
1356
1357 static void
1358 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1359 {
1360 struct gl_context *ctx = &brw->ctx;
1361 /* _NEW_PROGRAM */
1362 struct gl_program *prog =
1363 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1364
1365 /* BRW_NEW_CS_PROG_DATA */
1366 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1367 }
1368
1369 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1370 .dirty = {
1371 .mesa = _NEW_PROGRAM,
1372 .brw = BRW_NEW_BATCH |
1373 BRW_NEW_CS_PROG_DATA |
1374 BRW_NEW_UNIFORM_BUFFER,
1375 },
1376 .emit = brw_upload_cs_ubo_surfaces,
1377 };
1378
1379 void
1380 brw_upload_abo_surfaces(struct brw_context *brw,
1381 const struct gl_program *prog,
1382 struct brw_stage_state *stage_state,
1383 struct brw_stage_prog_data *prog_data)
1384 {
1385 struct gl_context *ctx = &brw->ctx;
1386 uint32_t *surf_offsets =
1387 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1388
1389 if (prog->info.num_abos) {
1390 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1391 struct gl_buffer_binding *binding =
1392 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1393 struct intel_buffer_object *intel_bo =
1394 intel_buffer_object(binding->BufferObject);
1395 struct brw_bo *bo =
1396 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1397 intel_bo->Base.Size - binding->Offset,
1398 true);
1399
1400 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1401 binding->Offset, ISL_FORMAT_RAW,
1402 bo->size - binding->Offset, 1,
1403 RELOC_WRITE);
1404 }
1405
1406 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1407 }
1408 }
1409
1410 static void
1411 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1412 {
1413 /* _NEW_PROGRAM */
1414 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1415
1416 if (wm) {
1417 /* BRW_NEW_FS_PROG_DATA */
1418 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1419 }
1420 }
1421
1422 const struct brw_tracked_state brw_wm_abo_surfaces = {
1423 .dirty = {
1424 .mesa = _NEW_PROGRAM,
1425 .brw = BRW_NEW_ATOMIC_BUFFER |
1426 BRW_NEW_BATCH |
1427 BRW_NEW_FS_PROG_DATA,
1428 },
1429 .emit = brw_upload_wm_abo_surfaces,
1430 };
1431
1432 static void
1433 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1434 {
1435 /* _NEW_PROGRAM */
1436 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1437
1438 if (cp) {
1439 /* BRW_NEW_CS_PROG_DATA */
1440 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1441 }
1442 }
1443
1444 const struct brw_tracked_state brw_cs_abo_surfaces = {
1445 .dirty = {
1446 .mesa = _NEW_PROGRAM,
1447 .brw = BRW_NEW_ATOMIC_BUFFER |
1448 BRW_NEW_BATCH |
1449 BRW_NEW_CS_PROG_DATA,
1450 },
1451 .emit = brw_upload_cs_abo_surfaces,
1452 };
1453
1454 static void
1455 brw_upload_cs_image_surfaces(struct brw_context *brw)
1456 {
1457 /* _NEW_PROGRAM */
1458 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1459
1460 if (cp) {
1461 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1462 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1463 brw->cs.base.prog_data);
1464 }
1465 }
1466
1467 const struct brw_tracked_state brw_cs_image_surfaces = {
1468 .dirty = {
1469 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1470 .brw = BRW_NEW_BATCH |
1471 BRW_NEW_CS_PROG_DATA |
1472 BRW_NEW_AUX_STATE |
1473 BRW_NEW_IMAGE_UNITS
1474 },
1475 .emit = brw_upload_cs_image_surfaces,
1476 };
1477
1478 static uint32_t
1479 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1480 {
1481 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1482 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1483 if (access == GL_WRITE_ONLY) {
1484 return hw_format;
1485 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1486 /* Typed surface reads support a very limited subset of the shader
1487 * image formats. Translate it into the closest format the
1488 * hardware supports.
1489 */
1490 return isl_lower_storage_image_format(devinfo, hw_format);
1491 } else {
1492 /* The hardware doesn't actually support a typed format that we can use
1493 * so we have to fall back to untyped read/write messages.
1494 */
1495 return ISL_FORMAT_RAW;
1496 }
1497 }
1498
1499 static void
1500 update_default_image_param(struct brw_context *brw,
1501 struct gl_image_unit *u,
1502 unsigned surface_idx,
1503 struct brw_image_param *param)
1504 {
1505 memset(param, 0, sizeof(*param));
1506 param->surface_idx = surface_idx;
1507 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1508 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1509 * detailed explanation of these parameters.
1510 */
1511 param->swizzling[0] = 0xff;
1512 param->swizzling[1] = 0xff;
1513 }
1514
1515 static void
1516 update_buffer_image_param(struct brw_context *brw,
1517 struct gl_image_unit *u,
1518 unsigned surface_idx,
1519 struct brw_image_param *param)
1520 {
1521 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1522 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1523 update_default_image_param(brw, u, surface_idx, param);
1524
1525 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1526 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1527 }
1528
1529 static unsigned
1530 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1531 unsigned level)
1532 {
1533 if (target == GL_TEXTURE_CUBE_MAP)
1534 return 6;
1535
1536 return target == GL_TEXTURE_3D ?
1537 minify(mt->surf.logical_level0_px.depth, level) :
1538 mt->surf.logical_level0_px.array_len;
1539 }
1540
1541 static void
1542 update_image_surface(struct brw_context *brw,
1543 struct gl_image_unit *u,
1544 GLenum access,
1545 unsigned surface_idx,
1546 uint32_t *surf_offset,
1547 struct brw_image_param *param)
1548 {
1549 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1550
1551 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1552 struct gl_texture_object *obj = u->TexObj;
1553 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1554
1555 if (obj->Target == GL_TEXTURE_BUFFER) {
1556 struct intel_buffer_object *intel_obj =
1557 intel_buffer_object(obj->BufferObject);
1558 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1559 _mesa_get_format_bytes(u->_ActualFormat));
1560
1561 brw_emit_buffer_surface_state(
1562 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1563 format, intel_obj->Base.Size, texel_size,
1564 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1565
1566 update_buffer_image_param(brw, u, surface_idx, param);
1567
1568 } else {
1569 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1570 struct intel_mipmap_tree *mt = intel_obj->mt;
1571 const unsigned num_layers = u->Layered ?
1572 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1573
1574 struct isl_view view = {
1575 .format = format,
1576 .base_level = obj->MinLevel + u->Level,
1577 .levels = 1,
1578 .base_array_layer = obj->MinLayer + u->_Layer,
1579 .array_len = num_layers,
1580 .swizzle = ISL_SWIZZLE_IDENTITY,
1581 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1582 };
1583
1584 if (format == ISL_FORMAT_RAW) {
1585 brw_emit_buffer_surface_state(
1586 brw, surf_offset, mt->bo, mt->offset,
1587 format, mt->bo->size - mt->offset, 1 /* pitch */,
1588 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1589
1590 } else {
1591 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1592 assert(!intel_miptree_has_color_unresolved(mt,
1593 view.base_level, 1,
1594 view.base_array_layer,
1595 view.array_len));
1596 brw_emit_surface_state(brw, mt, mt->target, view,
1597 ISL_AUX_USAGE_NONE, tex_mocs[devinfo->gen],
1598 surf_offset, surf_index,
1599 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1600 }
1601
1602 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1603 param->surface_idx = surface_idx;
1604 }
1605
1606 } else {
1607 emit_null_surface_state(brw, NULL, surf_offset);
1608 update_default_image_param(brw, u, surface_idx, param);
1609 }
1610 }
1611
1612 void
1613 brw_upload_image_surfaces(struct brw_context *brw,
1614 const struct gl_program *prog,
1615 struct brw_stage_state *stage_state,
1616 struct brw_stage_prog_data *prog_data)
1617 {
1618 assert(prog);
1619 struct gl_context *ctx = &brw->ctx;
1620
1621 if (prog->info.num_images) {
1622 for (unsigned i = 0; i < prog->info.num_images; i++) {
1623 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1624 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1625
1626 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1627 surf_idx,
1628 &stage_state->surf_offset[surf_idx],
1629 &prog_data->image_param[i]);
1630 }
1631
1632 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1633 /* This may have changed the image metadata dependent on the context
1634 * image unit state and passed to the program as uniforms, make sure
1635 * that push and pull constants are reuploaded.
1636 */
1637 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1638 }
1639 }
1640
1641 static void
1642 brw_upload_wm_image_surfaces(struct brw_context *brw)
1643 {
1644 /* BRW_NEW_FRAGMENT_PROGRAM */
1645 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1646
1647 if (wm) {
1648 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1649 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1650 brw->wm.base.prog_data);
1651 }
1652 }
1653
1654 const struct brw_tracked_state brw_wm_image_surfaces = {
1655 .dirty = {
1656 .mesa = _NEW_TEXTURE,
1657 .brw = BRW_NEW_BATCH |
1658 BRW_NEW_AUX_STATE |
1659 BRW_NEW_FRAGMENT_PROGRAM |
1660 BRW_NEW_FS_PROG_DATA |
1661 BRW_NEW_IMAGE_UNITS
1662 },
1663 .emit = brw_upload_wm_image_surfaces,
1664 };
1665
1666 static void
1667 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1668 {
1669 struct gl_context *ctx = &brw->ctx;
1670 /* _NEW_PROGRAM */
1671 struct gl_program *prog =
1672 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1673 /* BRW_NEW_CS_PROG_DATA */
1674 const struct brw_cs_prog_data *cs_prog_data =
1675 brw_cs_prog_data(brw->cs.base.prog_data);
1676
1677 if (prog && cs_prog_data->uses_num_work_groups) {
1678 const unsigned surf_idx =
1679 cs_prog_data->binding_table.work_groups_start;
1680 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1681 struct brw_bo *bo;
1682 uint32_t bo_offset;
1683
1684 if (brw->compute.num_work_groups_bo == NULL) {
1685 bo = NULL;
1686 intel_upload_data(brw,
1687 (void *)brw->compute.num_work_groups,
1688 3 * sizeof(GLuint),
1689 sizeof(GLuint),
1690 &bo,
1691 &bo_offset);
1692 } else {
1693 bo = brw->compute.num_work_groups_bo;
1694 bo_offset = brw->compute.num_work_groups_offset;
1695 }
1696
1697 brw_emit_buffer_surface_state(brw, surf_offset,
1698 bo, bo_offset,
1699 ISL_FORMAT_RAW,
1700 3 * sizeof(GLuint), 1,
1701 RELOC_WRITE);
1702 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1703 }
1704 }
1705
1706 const struct brw_tracked_state brw_cs_work_groups_surface = {
1707 .dirty = {
1708 .brw = BRW_NEW_CS_PROG_DATA |
1709 BRW_NEW_CS_WORK_GROUPS
1710 },
1711 .emit = brw_upload_cs_work_groups_surface,
1712 };