i965: Use PTE MOCS for all external buffers
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
44
45 #include "isl/isl.h"
46
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
52
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
56 #include "brw_wm.h"
57
58 uint32_t wb_mocs[] = {
59 [7] = GEN7_MOCS_L3,
60 [8] = BDW_MOCS_WB,
61 [9] = SKL_MOCS_WB,
62 [10] = CNL_MOCS_WB,
63 };
64
65 uint32_t pte_mocs[] = {
66 [7] = GEN7_MOCS_L3,
67 [8] = BDW_MOCS_PTE,
68 [9] = SKL_MOCS_PTE,
69 [10] = CNL_MOCS_PTE,
70 };
71
72 static uint32_t
73 get_tex_mocs(const struct gen_device_info *devinfo, struct brw_bo *bo)
74 {
75 return (bo && bo->external ? pte_mocs : wb_mocs)[devinfo->gen];
76 }
77
78 static void
79 get_isl_surf(struct brw_context *brw, struct intel_mipmap_tree *mt,
80 GLenum target, struct isl_view *view,
81 uint32_t *tile_x, uint32_t *tile_y,
82 uint32_t *offset, struct isl_surf *surf)
83 {
84 *surf = mt->surf;
85
86 const struct gen_device_info *devinfo = &brw->screen->devinfo;
87 const enum isl_dim_layout dim_layout =
88 get_isl_dim_layout(devinfo, mt->surf.tiling, target);
89
90 if (surf->dim_layout == dim_layout)
91 return;
92
93 /* The layout of the specified texture target is not compatible with the
94 * actual layout of the miptree structure in memory -- You're entering
95 * dangerous territory, this can only possibly work if you only intended
96 * to access a single level and slice of the texture, and the hardware
97 * supports the tile offset feature in order to allow non-tile-aligned
98 * base offsets, since we'll have to point the hardware to the first
99 * texel of the level instead of relying on the usual base level/layer
100 * controls.
101 */
102 assert(devinfo->has_surface_tile_offset);
103 assert(view->levels == 1 && view->array_len == 1);
104 assert(*tile_x == 0 && *tile_y == 0);
105
106 *offset += intel_miptree_get_tile_offsets(mt, view->base_level,
107 view->base_array_layer,
108 tile_x, tile_y);
109
110 /* Minify the logical dimensions of the texture. */
111 const unsigned l = view->base_level - mt->first_level;
112 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
113 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
114 minify(surf->logical_level0_px.height, l);
115 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
116 minify(surf->logical_level0_px.depth, l);
117
118 /* Only the base level and layer can be addressed with the overridden
119 * layout.
120 */
121 surf->logical_level0_px.array_len = 1;
122 surf->levels = 1;
123 surf->dim_layout = dim_layout;
124
125 /* The requested slice of the texture is now at the base level and
126 * layer.
127 */
128 view->base_level = 0;
129 view->base_array_layer = 0;
130 }
131
132 static void
133 brw_emit_surface_state(struct brw_context *brw,
134 struct intel_mipmap_tree *mt,
135 GLenum target, struct isl_view view,
136 enum isl_aux_usage aux_usage,
137 uint32_t mocs, uint32_t *surf_offset, int surf_index,
138 unsigned reloc_flags)
139 {
140 uint32_t tile_x = mt->level[0].level_x;
141 uint32_t tile_y = mt->level[0].level_y;
142 uint32_t offset = mt->offset;
143
144 struct isl_surf surf;
145
146 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf);
147
148 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
149
150 struct brw_bo *aux_bo;
151 struct isl_surf *aux_surf = NULL;
152 uint64_t aux_offset = 0;
153 switch (aux_usage) {
154 case ISL_AUX_USAGE_MCS:
155 case ISL_AUX_USAGE_CCS_D:
156 case ISL_AUX_USAGE_CCS_E:
157 aux_surf = &mt->mcs_buf->surf;
158 aux_bo = mt->mcs_buf->bo;
159 aux_offset = mt->mcs_buf->offset;
160 break;
161
162 case ISL_AUX_USAGE_HIZ:
163 aux_surf = &mt->hiz_buf->surf;
164 aux_bo = mt->hiz_buf->bo;
165 aux_offset = 0;
166 break;
167
168 case ISL_AUX_USAGE_NONE:
169 break;
170 }
171
172 if (aux_usage != ISL_AUX_USAGE_NONE) {
173 /* We only really need a clear color if we also have an auxiliary
174 * surface. Without one, it does nothing.
175 */
176 clear_color = mt->fast_clear_color;
177 }
178
179 void *state = brw_state_batch(brw,
180 brw->isl_dev.ss.size,
181 brw->isl_dev.ss.align,
182 surf_offset);
183
184 isl_surf_fill_state(&brw->isl_dev, state, .surf = &mt->surf, .view = &view,
185 .address = brw_state_reloc(&brw->batch,
186 *surf_offset + brw->isl_dev.ss.addr_offset,
187 mt->bo, offset, reloc_flags),
188 .aux_surf = aux_surf, .aux_usage = aux_usage,
189 .aux_address = aux_offset,
190 .mocs = mocs, .clear_color = clear_color,
191 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
192 if (aux_surf) {
193 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
194 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
195 * contain other control information. Since buffer addresses are always
196 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
197 * an ordinary reloc to do the necessary address translation.
198 *
199 * FIXME: move to the point of assignment.
200 */
201 assert((aux_offset & 0xfff) == 0);
202 uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
203 *aux_addr = brw_state_reloc(&brw->batch,
204 *surf_offset +
205 brw->isl_dev.ss.aux_addr_offset,
206 aux_bo, *aux_addr,
207 reloc_flags);
208 }
209 }
210
211 static uint32_t
212 gen6_update_renderbuffer_surface(struct brw_context *brw,
213 struct gl_renderbuffer *rb,
214 unsigned unit,
215 uint32_t surf_index)
216 {
217 const struct gen_device_info *devinfo = &brw->screen->devinfo;
218 struct gl_context *ctx = &brw->ctx;
219 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
220 struct intel_mipmap_tree *mt = irb->mt;
221
222 assert(brw_render_target_supported(brw, rb));
223
224 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
225 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
226 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
227 __func__, _mesa_get_format_name(rb_format));
228 }
229 enum isl_format isl_format = brw->mesa_to_isl_render_format[rb_format];
230
231 enum isl_aux_usage aux_usage =
232 brw->draw_aux_buffer_disabled[unit] ? ISL_AUX_USAGE_NONE :
233 intel_miptree_render_aux_usage(brw, mt, isl_format,
234 ctx->Color.BlendEnabled & (1 << unit));
235
236 struct isl_view view = {
237 .format = isl_format,
238 .base_level = irb->mt_level - irb->mt->first_level,
239 .levels = 1,
240 .base_array_layer = irb->mt_layer,
241 .array_len = MAX2(irb->layer_count, 1),
242 .swizzle = ISL_SWIZZLE_IDENTITY,
243 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
244 };
245
246 uint32_t offset;
247 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
248 pte_mocs[devinfo->gen],
249 &offset, surf_index,
250 RELOC_WRITE);
251 return offset;
252 }
253
254 GLuint
255 translate_tex_target(GLenum target)
256 {
257 switch (target) {
258 case GL_TEXTURE_1D:
259 case GL_TEXTURE_1D_ARRAY_EXT:
260 return BRW_SURFACE_1D;
261
262 case GL_TEXTURE_RECTANGLE_NV:
263 return BRW_SURFACE_2D;
264
265 case GL_TEXTURE_2D:
266 case GL_TEXTURE_2D_ARRAY_EXT:
267 case GL_TEXTURE_EXTERNAL_OES:
268 case GL_TEXTURE_2D_MULTISAMPLE:
269 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
270 return BRW_SURFACE_2D;
271
272 case GL_TEXTURE_3D:
273 return BRW_SURFACE_3D;
274
275 case GL_TEXTURE_CUBE_MAP:
276 case GL_TEXTURE_CUBE_MAP_ARRAY:
277 return BRW_SURFACE_CUBE;
278
279 default:
280 unreachable("not reached");
281 }
282 }
283
284 uint32_t
285 brw_get_surface_tiling_bits(enum isl_tiling tiling)
286 {
287 switch (tiling) {
288 case ISL_TILING_X:
289 return BRW_SURFACE_TILED;
290 case ISL_TILING_Y0:
291 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
292 default:
293 return 0;
294 }
295 }
296
297
298 uint32_t
299 brw_get_surface_num_multisamples(unsigned num_samples)
300 {
301 if (num_samples > 1)
302 return BRW_SURFACE_MULTISAMPLECOUNT_4;
303 else
304 return BRW_SURFACE_MULTISAMPLECOUNT_1;
305 }
306
307 /**
308 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
309 * swizzling.
310 */
311 int
312 brw_get_texture_swizzle(const struct gl_context *ctx,
313 const struct gl_texture_object *t)
314 {
315 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
316
317 int swizzles[SWIZZLE_NIL + 1] = {
318 SWIZZLE_X,
319 SWIZZLE_Y,
320 SWIZZLE_Z,
321 SWIZZLE_W,
322 SWIZZLE_ZERO,
323 SWIZZLE_ONE,
324 SWIZZLE_NIL
325 };
326
327 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
328 img->_BaseFormat == GL_DEPTH_STENCIL) {
329 GLenum depth_mode = t->DepthMode;
330
331 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
332 * with depth component data specified with a sized internal format.
333 * Otherwise, it's left at the old default, GL_LUMINANCE.
334 */
335 if (_mesa_is_gles3(ctx) &&
336 img->InternalFormat != GL_DEPTH_COMPONENT &&
337 img->InternalFormat != GL_DEPTH_STENCIL) {
338 depth_mode = GL_RED;
339 }
340
341 switch (depth_mode) {
342 case GL_ALPHA:
343 swizzles[0] = SWIZZLE_ZERO;
344 swizzles[1] = SWIZZLE_ZERO;
345 swizzles[2] = SWIZZLE_ZERO;
346 swizzles[3] = SWIZZLE_X;
347 break;
348 case GL_LUMINANCE:
349 swizzles[0] = SWIZZLE_X;
350 swizzles[1] = SWIZZLE_X;
351 swizzles[2] = SWIZZLE_X;
352 swizzles[3] = SWIZZLE_ONE;
353 break;
354 case GL_INTENSITY:
355 swizzles[0] = SWIZZLE_X;
356 swizzles[1] = SWIZZLE_X;
357 swizzles[2] = SWIZZLE_X;
358 swizzles[3] = SWIZZLE_X;
359 break;
360 case GL_RED:
361 swizzles[0] = SWIZZLE_X;
362 swizzles[1] = SWIZZLE_ZERO;
363 swizzles[2] = SWIZZLE_ZERO;
364 swizzles[3] = SWIZZLE_ONE;
365 break;
366 }
367 }
368
369 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
370
371 /* If the texture's format is alpha-only, force R, G, and B to
372 * 0.0. Similarly, if the texture's format has no alpha channel,
373 * force the alpha value read to 1.0. This allows for the
374 * implementation to use an RGBA texture for any of these formats
375 * without leaking any unexpected values.
376 */
377 switch (img->_BaseFormat) {
378 case GL_ALPHA:
379 swizzles[0] = SWIZZLE_ZERO;
380 swizzles[1] = SWIZZLE_ZERO;
381 swizzles[2] = SWIZZLE_ZERO;
382 break;
383 case GL_LUMINANCE:
384 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
385 swizzles[0] = SWIZZLE_X;
386 swizzles[1] = SWIZZLE_X;
387 swizzles[2] = SWIZZLE_X;
388 swizzles[3] = SWIZZLE_ONE;
389 }
390 break;
391 case GL_LUMINANCE_ALPHA:
392 if (datatype == GL_SIGNED_NORMALIZED) {
393 swizzles[0] = SWIZZLE_X;
394 swizzles[1] = SWIZZLE_X;
395 swizzles[2] = SWIZZLE_X;
396 swizzles[3] = SWIZZLE_W;
397 }
398 break;
399 case GL_INTENSITY:
400 if (datatype == GL_SIGNED_NORMALIZED) {
401 swizzles[0] = SWIZZLE_X;
402 swizzles[1] = SWIZZLE_X;
403 swizzles[2] = SWIZZLE_X;
404 swizzles[3] = SWIZZLE_X;
405 }
406 break;
407 case GL_RED:
408 case GL_RG:
409 case GL_RGB:
410 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0 ||
411 img->TexFormat == MESA_FORMAT_RGB_DXT1 ||
412 img->TexFormat == MESA_FORMAT_SRGB_DXT1)
413 swizzles[3] = SWIZZLE_ONE;
414 break;
415 }
416
417 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
418 swizzles[GET_SWZ(t->_Swizzle, 1)],
419 swizzles[GET_SWZ(t->_Swizzle, 2)],
420 swizzles[GET_SWZ(t->_Swizzle, 3)]);
421 }
422
423 /**
424 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
425 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
426 *
427 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
428 * 0 1 2 3 4 5
429 * 4 5 6 7 0 1
430 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
431 *
432 * which is simply adding 4 then modding by 8 (or anding with 7).
433 *
434 * We then may need to apply workarounds for textureGather hardware bugs.
435 */
436 static unsigned
437 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
438 {
439 unsigned scs = (swizzle + 4) & 7;
440
441 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
442 }
443
444 static bool
445 brw_aux_surface_disabled(const struct brw_context *brw,
446 const struct intel_mipmap_tree *mt)
447 {
448 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
449
450 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
451 const struct intel_renderbuffer *irb =
452 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
453
454 if (irb && irb->mt == mt)
455 return brw->draw_aux_buffer_disabled[i];
456 }
457
458 return false;
459 }
460
461 static void
462 brw_update_texture_surface(struct gl_context *ctx,
463 unsigned unit,
464 uint32_t *surf_offset,
465 bool for_gather,
466 bool for_txf,
467 uint32_t plane)
468 {
469 struct brw_context *brw = brw_context(ctx);
470 const struct gen_device_info *devinfo = &brw->screen->devinfo;
471 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
472
473 if (obj->Target == GL_TEXTURE_BUFFER) {
474 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
475
476 } else {
477 struct intel_texture_object *intel_obj = intel_texture_object(obj);
478 struct intel_mipmap_tree *mt = intel_obj->mt;
479
480 if (plane > 0) {
481 if (mt->plane[plane - 1] == NULL)
482 return;
483 mt = mt->plane[plane - 1];
484 }
485
486 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
487 /* If this is a view with restricted NumLayers, then our effective depth
488 * is not just the miptree depth.
489 */
490 unsigned view_num_layers;
491 if (obj->Immutable && obj->Target != GL_TEXTURE_3D) {
492 view_num_layers = obj->NumLayers;
493 } else {
494 view_num_layers = mt->surf.dim == ISL_SURF_DIM_3D ?
495 mt->surf.logical_level0_px.depth :
496 mt->surf.logical_level0_px.array_len;
497 }
498
499 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
500 * texturing functions that return a float, as our code generation always
501 * selects the .x channel (which would always be 0).
502 */
503 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
504 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
505 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
506 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
507 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
508 brw_get_texture_swizzle(&brw->ctx, obj));
509
510 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
511 enum isl_format format = translate_tex_format(brw, mesa_fmt,
512 for_txf ? GL_DECODE_EXT :
513 sampler->sRGBDecode);
514
515 /* Implement gen6 and gen7 gather work-around */
516 bool need_green_to_blue = false;
517 if (for_gather) {
518 if (devinfo->gen == 7 && (format == ISL_FORMAT_R32G32_FLOAT ||
519 format == ISL_FORMAT_R32G32_SINT ||
520 format == ISL_FORMAT_R32G32_UINT)) {
521 format = ISL_FORMAT_R32G32_FLOAT_LD;
522 need_green_to_blue = devinfo->is_haswell;
523 } else if (devinfo->gen == 6) {
524 /* Sandybridge's gather4 message is broken for integer formats.
525 * To work around this, we pretend the surface is UNORM for
526 * 8 or 16-bit formats, and emit shader instructions to recover
527 * the real INT/UINT value. For 32-bit formats, we pretend
528 * the surface is FLOAT, and simply reinterpret the resulting
529 * bits.
530 */
531 switch (format) {
532 case ISL_FORMAT_R8_SINT:
533 case ISL_FORMAT_R8_UINT:
534 format = ISL_FORMAT_R8_UNORM;
535 break;
536
537 case ISL_FORMAT_R16_SINT:
538 case ISL_FORMAT_R16_UINT:
539 format = ISL_FORMAT_R16_UNORM;
540 break;
541
542 case ISL_FORMAT_R32_SINT:
543 case ISL_FORMAT_R32_UINT:
544 format = ISL_FORMAT_R32_FLOAT;
545 break;
546
547 default:
548 break;
549 }
550 }
551 }
552
553 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
554 if (devinfo->gen <= 7) {
555 assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
556 mt = mt->r8stencil_mt;
557 } else {
558 mt = mt->stencil_mt;
559 }
560 format = ISL_FORMAT_R8_UINT;
561 } else if (devinfo->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
562 assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
563 mt = mt->r8stencil_mt;
564 format = ISL_FORMAT_R8_UINT;
565 }
566
567 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
568
569 struct isl_view view = {
570 .format = format,
571 .base_level = obj->MinLevel + obj->BaseLevel,
572 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
573 .base_array_layer = obj->MinLayer,
574 .array_len = view_num_layers,
575 .swizzle = {
576 .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
577 .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
578 .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
579 .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
580 },
581 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
582 };
583
584 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
585 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
586 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
587
588 enum isl_aux_usage aux_usage =
589 intel_miptree_texture_aux_usage(brw, mt, format);
590
591 if (brw_aux_surface_disabled(brw, mt))
592 aux_usage = ISL_AUX_USAGE_NONE;
593
594 brw_emit_surface_state(brw, mt, mt->target, view, aux_usage,
595 get_tex_mocs(devinfo, mt->bo),
596 surf_offset, surf_index,
597 0);
598 }
599 }
600
601 void
602 brw_emit_buffer_surface_state(struct brw_context *brw,
603 uint32_t *out_offset,
604 struct brw_bo *bo,
605 unsigned buffer_offset,
606 unsigned surface_format,
607 unsigned buffer_size,
608 unsigned pitch,
609 unsigned reloc_flags)
610 {
611 const struct gen_device_info *devinfo = &brw->screen->devinfo;
612 uint32_t *dw = brw_state_batch(brw,
613 brw->isl_dev.ss.size,
614 brw->isl_dev.ss.align,
615 out_offset);
616
617 isl_buffer_fill_state(&brw->isl_dev, dw,
618 .address = !bo ? buffer_offset :
619 brw_state_reloc(&brw->batch,
620 *out_offset + brw->isl_dev.ss.addr_offset,
621 bo, buffer_offset,
622 reloc_flags),
623 .size = buffer_size,
624 .format = surface_format,
625 .stride = pitch,
626 .mocs = get_tex_mocs(devinfo, bo));
627 }
628
629 void
630 brw_update_buffer_texture_surface(struct gl_context *ctx,
631 unsigned unit,
632 uint32_t *surf_offset)
633 {
634 struct brw_context *brw = brw_context(ctx);
635 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
636 struct intel_buffer_object *intel_obj =
637 intel_buffer_object(tObj->BufferObject);
638 uint32_t size = tObj->BufferSize;
639 struct brw_bo *bo = NULL;
640 mesa_format format = tObj->_BufferObjectFormat;
641 const enum isl_format isl_format = brw_isl_format_for_mesa_format(format);
642 int texel_size = _mesa_get_format_bytes(format);
643
644 if (intel_obj) {
645 size = MIN2(size, intel_obj->Base.Size);
646 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size,
647 false);
648 }
649
650 /* The ARB_texture_buffer_specification says:
651 *
652 * "The number of texels in the buffer texture's texel array is given by
653 *
654 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
655 *
656 * where <buffer_size> is the size of the buffer object, in basic
657 * machine units and <components> and <base_type> are the element count
658 * and base data type for elements, as specified in Table X.1. The
659 * number of texels in the texel array is then clamped to the
660 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
661 *
662 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
663 * so that when ISL divides by stride to obtain the number of texels, that
664 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
665 */
666 size = MIN2(size, ctx->Const.MaxTextureBufferSize * (unsigned) texel_size);
667
668 if (isl_format == ISL_FORMAT_UNSUPPORTED) {
669 _mesa_problem(NULL, "bad format %s for texture buffer\n",
670 _mesa_get_format_name(format));
671 }
672
673 brw_emit_buffer_surface_state(brw, surf_offset, bo,
674 tObj->BufferOffset,
675 isl_format,
676 size,
677 texel_size,
678 0);
679 }
680
681 /**
682 * Create the constant buffer surface. Vertex/fragment shader constants will be
683 * read from this buffer with Data Port Read instructions/messages.
684 */
685 void
686 brw_create_constant_surface(struct brw_context *brw,
687 struct brw_bo *bo,
688 uint32_t offset,
689 uint32_t size,
690 uint32_t *out_offset)
691 {
692 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
693 ISL_FORMAT_R32G32B32A32_FLOAT,
694 size, 1, 0);
695 }
696
697 /**
698 * Create the buffer surface. Shader buffer variables will be
699 * read from / write to this buffer with Data Port Read/Write
700 * instructions/messages.
701 */
702 void
703 brw_create_buffer_surface(struct brw_context *brw,
704 struct brw_bo *bo,
705 uint32_t offset,
706 uint32_t size,
707 uint32_t *out_offset)
708 {
709 /* Use a raw surface so we can reuse existing untyped read/write/atomic
710 * messages. We need these specifically for the fragment shader since they
711 * include a pixel mask header that we need to ensure correct behavior
712 * with helper invocations, which cannot write to the buffer.
713 */
714 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
715 ISL_FORMAT_RAW,
716 size, 1, RELOC_WRITE);
717 }
718
719 /**
720 * Set up a binding table entry for use by stream output logic (transform
721 * feedback).
722 *
723 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
724 */
725 void
726 brw_update_sol_surface(struct brw_context *brw,
727 struct gl_buffer_object *buffer_obj,
728 uint32_t *out_offset, unsigned num_vector_components,
729 unsigned stride_dwords, unsigned offset_dwords)
730 {
731 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
732 uint32_t offset_bytes = 4 * offset_dwords;
733 struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
734 offset_bytes,
735 buffer_obj->Size - offset_bytes,
736 true);
737 uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
738 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
739 size_t size_dwords = buffer_obj->Size / 4;
740 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
741
742 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
743 * too big to map using a single binding table entry?
744 */
745 assert((size_dwords - offset_dwords) / stride_dwords
746 <= BRW_MAX_NUM_BUFFER_ENTRIES);
747
748 if (size_dwords > offset_dwords + num_vector_components) {
749 /* There is room for at least 1 transform feedback output in the buffer.
750 * Compute the number of additional transform feedback outputs the
751 * buffer has room for.
752 */
753 buffer_size_minus_1 =
754 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
755 } else {
756 /* There isn't even room for a single transform feedback output in the
757 * buffer. We can't configure the binding table entry to prevent output
758 * entirely; we'll have to rely on the geometry shader to detect
759 * overflow. But to minimize the damage in case of a bug, set up the
760 * binding table entry to just allow a single output.
761 */
762 buffer_size_minus_1 = 0;
763 }
764 width = buffer_size_minus_1 & 0x7f;
765 height = (buffer_size_minus_1 & 0xfff80) >> 7;
766 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
767
768 switch (num_vector_components) {
769 case 1:
770 surface_format = ISL_FORMAT_R32_FLOAT;
771 break;
772 case 2:
773 surface_format = ISL_FORMAT_R32G32_FLOAT;
774 break;
775 case 3:
776 surface_format = ISL_FORMAT_R32G32B32_FLOAT;
777 break;
778 case 4:
779 surface_format = ISL_FORMAT_R32G32B32A32_FLOAT;
780 break;
781 default:
782 unreachable("Invalid vector size for transform feedback output");
783 }
784
785 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
786 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
787 surface_format << BRW_SURFACE_FORMAT_SHIFT |
788 BRW_SURFACE_RC_READ_WRITE;
789 surf[1] = brw_state_reloc(&brw->batch,
790 *out_offset + 4, bo, offset_bytes, RELOC_WRITE);
791 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
792 height << BRW_SURFACE_HEIGHT_SHIFT);
793 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
794 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
795 surf[4] = 0;
796 surf[5] = 0;
797 }
798
799 /* Creates a new WM constant buffer reflecting the current fragment program's
800 * constants, if needed by the fragment program.
801 *
802 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
803 * state atom.
804 */
805 static void
806 brw_upload_wm_pull_constants(struct brw_context *brw)
807 {
808 struct brw_stage_state *stage_state = &brw->wm.base;
809 /* BRW_NEW_FRAGMENT_PROGRAM */
810 struct brw_program *fp =
811 (struct brw_program *) brw->programs[MESA_SHADER_FRAGMENT];
812
813 /* BRW_NEW_FS_PROG_DATA */
814 struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
815
816 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
817 /* _NEW_PROGRAM_CONSTANTS */
818 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
819 stage_state, prog_data);
820 }
821
822 const struct brw_tracked_state brw_wm_pull_constants = {
823 .dirty = {
824 .mesa = _NEW_PROGRAM_CONSTANTS,
825 .brw = BRW_NEW_BATCH |
826 BRW_NEW_FRAGMENT_PROGRAM |
827 BRW_NEW_FS_PROG_DATA,
828 },
829 .emit = brw_upload_wm_pull_constants,
830 };
831
832 /**
833 * Creates a null renderbuffer surface.
834 *
835 * This is used when the shader doesn't write to any color output. An FB
836 * write to target 0 will still be emitted, because that's how the thread is
837 * terminated (and computed depth is returned), so we need to have the
838 * hardware discard the target 0 color output..
839 */
840 static void
841 emit_null_surface_state(struct brw_context *brw,
842 const struct gl_framebuffer *fb,
843 uint32_t *out_offset)
844 {
845 const struct gen_device_info *devinfo = &brw->screen->devinfo;
846 uint32_t *surf = brw_state_batch(brw,
847 brw->isl_dev.ss.size,
848 brw->isl_dev.ss.align,
849 out_offset);
850
851 /* Use the fb dimensions or 1x1x1 */
852 const unsigned width = fb ? _mesa_geometric_width(fb) : 1;
853 const unsigned height = fb ? _mesa_geometric_height(fb) : 1;
854 const unsigned samples = fb ? _mesa_geometric_samples(fb) : 1;
855
856 if (devinfo->gen != 6 || samples <= 1) {
857 isl_null_fill_state(&brw->isl_dev, surf,
858 isl_extent3d(width, height, 1));
859 return;
860 }
861
862 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
863 * So work around this problem by rendering into dummy color buffer.
864 *
865 * To decrease the amount of memory needed by the workaround buffer, we
866 * set its pitch to 128 bytes (the width of a Y tile). This means that
867 * the amount of memory needed for the workaround buffer is
868 * (width_in_tiles + height_in_tiles - 1) tiles.
869 *
870 * Note that since the workaround buffer will be interpreted by the
871 * hardware as an interleaved multisampled buffer, we need to compute
872 * width_in_tiles and height_in_tiles by dividing the width and height
873 * by 16 rather than the normal Y-tile size of 32.
874 */
875 unsigned width_in_tiles = ALIGN(width, 16) / 16;
876 unsigned height_in_tiles = ALIGN(height, 16) / 16;
877 unsigned pitch_minus_1 = 127;
878 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
879 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
880 size_needed);
881
882 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
883 ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
884 surf[1] = brw_state_reloc(&brw->batch, *out_offset + 4,
885 brw->wm.multisampled_null_render_target_bo,
886 0, RELOC_WRITE);
887
888 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
889 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
890
891 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
892 * Notes):
893 *
894 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
895 */
896 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
897 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
898 surf[4] = BRW_SURFACE_MULTISAMPLECOUNT_4;
899 surf[5] = 0;
900 }
901
902 /**
903 * Sets up a surface state structure to point at the given region.
904 * While it is only used for the front/back buffer currently, it should be
905 * usable for further buffers when doing ARB_draw_buffer support.
906 */
907 static uint32_t
908 gen4_update_renderbuffer_surface(struct brw_context *brw,
909 struct gl_renderbuffer *rb,
910 unsigned unit,
911 uint32_t surf_index)
912 {
913 const struct gen_device_info *devinfo = &brw->screen->devinfo;
914 struct gl_context *ctx = &brw->ctx;
915 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
916 struct intel_mipmap_tree *mt = irb->mt;
917 uint32_t *surf;
918 uint32_t tile_x, tile_y;
919 enum isl_format format;
920 uint32_t offset;
921 /* _NEW_BUFFERS */
922 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
923 /* BRW_NEW_FS_PROG_DATA */
924
925 if (rb->TexImage && !devinfo->has_surface_tile_offset) {
926 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
927
928 if (tile_x != 0 || tile_y != 0) {
929 /* Original gen4 hardware couldn't draw to a non-tile-aligned
930 * destination in a miptree unless you actually setup your renderbuffer
931 * as a miptree and used the fragile lod/array_index/etc. controls to
932 * select the image. So, instead, we just make a new single-level
933 * miptree and render into that.
934 */
935 intel_renderbuffer_move_to_temp(brw, irb, false);
936 assert(irb->align_wa_mt);
937 mt = irb->align_wa_mt;
938 }
939 }
940
941 surf = brw_state_batch(brw, 6 * 4, 32, &offset);
942
943 format = brw->mesa_to_isl_render_format[rb_format];
944 if (unlikely(!brw->mesa_format_supports_render[rb_format])) {
945 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
946 __func__, _mesa_get_format_name(rb_format));
947 }
948
949 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
950 format << BRW_SURFACE_FORMAT_SHIFT);
951
952 /* reloc */
953 assert(mt->offset % mt->cpp == 0);
954 surf[1] = brw_state_reloc(&brw->batch, offset + 4, mt->bo,
955 mt->offset +
956 intel_renderbuffer_get_tile_offsets(irb,
957 &tile_x,
958 &tile_y),
959 RELOC_WRITE);
960
961 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
962 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
963
964 surf[3] = (brw_get_surface_tiling_bits(mt->surf.tiling) |
965 (mt->surf.row_pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
966
967 surf[4] = brw_get_surface_num_multisamples(mt->surf.samples);
968
969 assert(devinfo->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
970 /* Note that the low bits of these fields are missing, so
971 * there's the possibility of getting in trouble.
972 */
973 assert(tile_x % 4 == 0);
974 assert(tile_y % 2 == 0);
975 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
976 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
977 (mt->surf.image_alignment_el.height == 4 ?
978 BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
979
980 if (devinfo->gen < 6) {
981 /* _NEW_COLOR */
982 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
983 (ctx->Color.BlendEnabled & (1 << unit)))
984 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
985
986 if (!ctx->Color.ColorMask[unit][0])
987 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
988 if (!ctx->Color.ColorMask[unit][1])
989 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
990 if (!ctx->Color.ColorMask[unit][2])
991 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
992
993 /* As mentioned above, disable writes to the alpha component when the
994 * renderbuffer is XRGB.
995 */
996 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
997 !ctx->Color.ColorMask[unit][3]) {
998 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
999 }
1000 }
1001
1002 return offset;
1003 }
1004
1005 static void
1006 update_renderbuffer_surfaces(struct brw_context *brw)
1007 {
1008 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1009 const struct gl_context *ctx = &brw->ctx;
1010
1011 /* _NEW_BUFFERS | _NEW_COLOR */
1012 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1013
1014 /* Render targets always start at binding table index 0. */
1015 const unsigned rt_start = 0;
1016
1017 uint32_t *surf_offsets = brw->wm.base.surf_offset;
1018
1019 /* Update surfaces for drawing buffers */
1020 if (fb->_NumColorDrawBuffers >= 1) {
1021 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1022 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1023
1024 if (intel_renderbuffer(rb)) {
1025 surf_offsets[rt_start + i] = devinfo->gen >= 6 ?
1026 gen6_update_renderbuffer_surface(brw, rb, i, rt_start + i) :
1027 gen4_update_renderbuffer_surface(brw, rb, i, rt_start + i);
1028 } else {
1029 emit_null_surface_state(brw, fb, &surf_offsets[rt_start + i]);
1030 }
1031 }
1032 } else {
1033 emit_null_surface_state(brw, fb, &surf_offsets[rt_start]);
1034 }
1035
1036 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1037 }
1038
1039 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1040 .dirty = {
1041 .mesa = _NEW_BUFFERS |
1042 _NEW_COLOR,
1043 .brw = BRW_NEW_BATCH,
1044 },
1045 .emit = update_renderbuffer_surfaces,
1046 };
1047
1048 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1049 .dirty = {
1050 .mesa = _NEW_BUFFERS,
1051 .brw = BRW_NEW_BATCH |
1052 BRW_NEW_AUX_STATE,
1053 },
1054 .emit = update_renderbuffer_surfaces,
1055 };
1056
1057 static void
1058 update_renderbuffer_read_surfaces(struct brw_context *brw)
1059 {
1060 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1061 const struct gl_context *ctx = &brw->ctx;
1062
1063 /* BRW_NEW_FS_PROG_DATA */
1064 const struct brw_wm_prog_data *wm_prog_data =
1065 brw_wm_prog_data(brw->wm.base.prog_data);
1066
1067 if (wm_prog_data->has_render_target_reads &&
1068 !ctx->Extensions.MESA_shader_framebuffer_fetch) {
1069 /* _NEW_BUFFERS */
1070 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1071
1072 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1073 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1074 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1075 const unsigned surf_index =
1076 wm_prog_data->binding_table.render_target_read_start + i;
1077 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1078
1079 if (irb) {
1080 const enum isl_format format = brw->mesa_to_isl_render_format[
1081 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1082 assert(isl_format_supports_sampling(&brw->screen->devinfo,
1083 format));
1084
1085 /* Override the target of the texture if the render buffer is a
1086 * single slice of a 3D texture (since the minimum array element
1087 * field of the surface state structure is ignored by the sampler
1088 * unit for 3D textures on some hardware), or if the render buffer
1089 * is a 1D array (since shaders always provide the array index
1090 * coordinate at the Z component to avoid state-dependent
1091 * recompiles when changing the texture target of the
1092 * framebuffer).
1093 */
1094 const GLenum target =
1095 (irb->mt->target == GL_TEXTURE_3D &&
1096 irb->layer_count == 1) ? GL_TEXTURE_2D :
1097 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1098 irb->mt->target;
1099
1100 const struct isl_view view = {
1101 .format = format,
1102 .base_level = irb->mt_level - irb->mt->first_level,
1103 .levels = 1,
1104 .base_array_layer = irb->mt_layer,
1105 .array_len = irb->layer_count,
1106 .swizzle = ISL_SWIZZLE_IDENTITY,
1107 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1108 };
1109
1110 enum isl_aux_usage aux_usage =
1111 intel_miptree_texture_aux_usage(brw, irb->mt, format);
1112 if (brw->draw_aux_buffer_disabled[i])
1113 aux_usage = ISL_AUX_USAGE_NONE;
1114
1115 brw_emit_surface_state(brw, irb->mt, target, view, aux_usage,
1116 get_tex_mocs(devinfo, irb->mt->bo),
1117 surf_offset, surf_index,
1118 0);
1119
1120 } else {
1121 emit_null_surface_state(brw, fb, surf_offset);
1122 }
1123 }
1124
1125 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1126 }
1127 }
1128
1129 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1130 .dirty = {
1131 .mesa = _NEW_BUFFERS,
1132 .brw = BRW_NEW_BATCH |
1133 BRW_NEW_AUX_STATE |
1134 BRW_NEW_FS_PROG_DATA,
1135 },
1136 .emit = update_renderbuffer_read_surfaces,
1137 };
1138
1139 static void
1140 update_stage_texture_surfaces(struct brw_context *brw,
1141 const struct gl_program *prog,
1142 struct brw_stage_state *stage_state,
1143 bool for_gather, uint32_t plane)
1144 {
1145 if (!prog)
1146 return;
1147
1148 struct gl_context *ctx = &brw->ctx;
1149
1150 uint32_t *surf_offset = stage_state->surf_offset;
1151
1152 /* BRW_NEW_*_PROG_DATA */
1153 if (for_gather)
1154 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1155 else
1156 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1157
1158 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1159 for (unsigned s = 0; s < num_samplers; s++) {
1160 surf_offset[s] = 0;
1161
1162 if (prog->SamplersUsed & (1 << s)) {
1163 const unsigned unit = prog->SamplerUnits[s];
1164 const bool used_by_txf = prog->info.textures_used_by_txf & (1 << s);
1165
1166 /* _NEW_TEXTURE */
1167 if (ctx->Texture.Unit[unit]._Current) {
1168 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather,
1169 used_by_txf, plane);
1170 }
1171 }
1172 }
1173 }
1174
1175
1176 /**
1177 * Construct SURFACE_STATE objects for enabled textures.
1178 */
1179 static void
1180 brw_update_texture_surfaces(struct brw_context *brw)
1181 {
1182 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1183
1184 /* BRW_NEW_VERTEX_PROGRAM */
1185 struct gl_program *vs = brw->programs[MESA_SHADER_VERTEX];
1186
1187 /* BRW_NEW_TESS_PROGRAMS */
1188 struct gl_program *tcs = brw->programs[MESA_SHADER_TESS_CTRL];
1189 struct gl_program *tes = brw->programs[MESA_SHADER_TESS_EVAL];
1190
1191 /* BRW_NEW_GEOMETRY_PROGRAM */
1192 struct gl_program *gs = brw->programs[MESA_SHADER_GEOMETRY];
1193
1194 /* BRW_NEW_FRAGMENT_PROGRAM */
1195 struct gl_program *fs = brw->programs[MESA_SHADER_FRAGMENT];
1196
1197 /* _NEW_TEXTURE */
1198 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1199 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1200 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1201 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1202 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1203
1204 /* emit alternate set of surface state for gather. this
1205 * allows the surface format to be overriden for only the
1206 * gather4 messages. */
1207 if (devinfo->gen < 8) {
1208 if (vs && vs->info.uses_texture_gather)
1209 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1210 if (tcs && tcs->info.uses_texture_gather)
1211 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1212 if (tes && tes->info.uses_texture_gather)
1213 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1214 if (gs && gs->info.uses_texture_gather)
1215 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1216 if (fs && fs->info.uses_texture_gather)
1217 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1218 }
1219
1220 if (fs) {
1221 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1222 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1223 }
1224
1225 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1226 }
1227
1228 const struct brw_tracked_state brw_texture_surfaces = {
1229 .dirty = {
1230 .mesa = _NEW_TEXTURE,
1231 .brw = BRW_NEW_BATCH |
1232 BRW_NEW_AUX_STATE |
1233 BRW_NEW_FRAGMENT_PROGRAM |
1234 BRW_NEW_FS_PROG_DATA |
1235 BRW_NEW_GEOMETRY_PROGRAM |
1236 BRW_NEW_GS_PROG_DATA |
1237 BRW_NEW_TESS_PROGRAMS |
1238 BRW_NEW_TCS_PROG_DATA |
1239 BRW_NEW_TES_PROG_DATA |
1240 BRW_NEW_TEXTURE_BUFFER |
1241 BRW_NEW_VERTEX_PROGRAM |
1242 BRW_NEW_VS_PROG_DATA,
1243 },
1244 .emit = brw_update_texture_surfaces,
1245 };
1246
1247 static void
1248 brw_update_cs_texture_surfaces(struct brw_context *brw)
1249 {
1250 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1251
1252 /* BRW_NEW_COMPUTE_PROGRAM */
1253 struct gl_program *cs = brw->programs[MESA_SHADER_COMPUTE];
1254
1255 /* _NEW_TEXTURE */
1256 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1257
1258 /* emit alternate set of surface state for gather. this
1259 * allows the surface format to be overriden for only the
1260 * gather4 messages.
1261 */
1262 if (devinfo->gen < 8) {
1263 if (cs && cs->info.uses_texture_gather)
1264 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1265 }
1266
1267 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1268 }
1269
1270 const struct brw_tracked_state brw_cs_texture_surfaces = {
1271 .dirty = {
1272 .mesa = _NEW_TEXTURE,
1273 .brw = BRW_NEW_BATCH |
1274 BRW_NEW_COMPUTE_PROGRAM |
1275 BRW_NEW_AUX_STATE,
1276 },
1277 .emit = brw_update_cs_texture_surfaces,
1278 };
1279
1280
1281 void
1282 brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1283 struct brw_stage_state *stage_state,
1284 struct brw_stage_prog_data *prog_data)
1285 {
1286 struct gl_context *ctx = &brw->ctx;
1287
1288 if (!prog)
1289 return;
1290
1291 uint32_t *ubo_surf_offsets =
1292 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1293
1294 for (int i = 0; i < prog->info.num_ubos; i++) {
1295 struct gl_buffer_binding *binding =
1296 &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1297
1298 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1299 emit_null_surface_state(brw, NULL, &ubo_surf_offsets[i]);
1300 } else {
1301 struct intel_buffer_object *intel_bo =
1302 intel_buffer_object(binding->BufferObject);
1303 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1304 if (!binding->AutomaticSize)
1305 size = MIN2(size, binding->Size);
1306 struct brw_bo *bo =
1307 intel_bufferobj_buffer(brw, intel_bo,
1308 binding->Offset,
1309 size, false);
1310 brw_create_constant_surface(brw, bo, binding->Offset,
1311 size,
1312 &ubo_surf_offsets[i]);
1313 }
1314 }
1315
1316 uint32_t *ssbo_surf_offsets =
1317 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1318
1319 for (int i = 0; i < prog->info.num_ssbos; i++) {
1320 struct gl_buffer_binding *binding =
1321 &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1322
1323 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1324 emit_null_surface_state(brw, NULL, &ssbo_surf_offsets[i]);
1325 } else {
1326 struct intel_buffer_object *intel_bo =
1327 intel_buffer_object(binding->BufferObject);
1328 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1329 if (!binding->AutomaticSize)
1330 size = MIN2(size, binding->Size);
1331 struct brw_bo *bo =
1332 intel_bufferobj_buffer(brw, intel_bo,
1333 binding->Offset,
1334 size, true);
1335 brw_create_buffer_surface(brw, bo, binding->Offset,
1336 size,
1337 &ssbo_surf_offsets[i]);
1338 }
1339 }
1340
1341 stage_state->push_constants_dirty = true;
1342
1343 if (prog->info.num_ubos || prog->info.num_ssbos)
1344 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1345 }
1346
1347 static void
1348 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1349 {
1350 struct gl_context *ctx = &brw->ctx;
1351 /* _NEW_PROGRAM */
1352 struct gl_program *prog = ctx->FragmentProgram._Current;
1353
1354 /* BRW_NEW_FS_PROG_DATA */
1355 brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1356 }
1357
1358 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1359 .dirty = {
1360 .mesa = _NEW_PROGRAM,
1361 .brw = BRW_NEW_BATCH |
1362 BRW_NEW_FS_PROG_DATA |
1363 BRW_NEW_UNIFORM_BUFFER,
1364 },
1365 .emit = brw_upload_wm_ubo_surfaces,
1366 };
1367
1368 static void
1369 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1370 {
1371 struct gl_context *ctx = &brw->ctx;
1372 /* _NEW_PROGRAM */
1373 struct gl_program *prog =
1374 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1375
1376 /* BRW_NEW_CS_PROG_DATA */
1377 brw_upload_ubo_surfaces(brw, prog, &brw->cs.base, brw->cs.base.prog_data);
1378 }
1379
1380 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1381 .dirty = {
1382 .mesa = _NEW_PROGRAM,
1383 .brw = BRW_NEW_BATCH |
1384 BRW_NEW_CS_PROG_DATA |
1385 BRW_NEW_UNIFORM_BUFFER,
1386 },
1387 .emit = brw_upload_cs_ubo_surfaces,
1388 };
1389
1390 void
1391 brw_upload_abo_surfaces(struct brw_context *brw,
1392 const struct gl_program *prog,
1393 struct brw_stage_state *stage_state,
1394 struct brw_stage_prog_data *prog_data)
1395 {
1396 struct gl_context *ctx = &brw->ctx;
1397 uint32_t *surf_offsets =
1398 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1399
1400 if (prog->info.num_abos) {
1401 for (unsigned i = 0; i < prog->info.num_abos; i++) {
1402 struct gl_buffer_binding *binding =
1403 &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1404 struct intel_buffer_object *intel_bo =
1405 intel_buffer_object(binding->BufferObject);
1406 struct brw_bo *bo =
1407 intel_bufferobj_buffer(brw, intel_bo, binding->Offset,
1408 intel_bo->Base.Size - binding->Offset,
1409 true);
1410
1411 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1412 binding->Offset, ISL_FORMAT_RAW,
1413 bo->size - binding->Offset, 1,
1414 RELOC_WRITE);
1415 }
1416
1417 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1418 }
1419 }
1420
1421 static void
1422 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1423 {
1424 /* _NEW_PROGRAM */
1425 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1426
1427 if (wm) {
1428 /* BRW_NEW_FS_PROG_DATA */
1429 brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1430 }
1431 }
1432
1433 const struct brw_tracked_state brw_wm_abo_surfaces = {
1434 .dirty = {
1435 .mesa = _NEW_PROGRAM,
1436 .brw = BRW_NEW_ATOMIC_BUFFER |
1437 BRW_NEW_BATCH |
1438 BRW_NEW_FS_PROG_DATA,
1439 },
1440 .emit = brw_upload_wm_abo_surfaces,
1441 };
1442
1443 static void
1444 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1445 {
1446 /* _NEW_PROGRAM */
1447 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1448
1449 if (cp) {
1450 /* BRW_NEW_CS_PROG_DATA */
1451 brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1452 }
1453 }
1454
1455 const struct brw_tracked_state brw_cs_abo_surfaces = {
1456 .dirty = {
1457 .mesa = _NEW_PROGRAM,
1458 .brw = BRW_NEW_ATOMIC_BUFFER |
1459 BRW_NEW_BATCH |
1460 BRW_NEW_CS_PROG_DATA,
1461 },
1462 .emit = brw_upload_cs_abo_surfaces,
1463 };
1464
1465 static void
1466 brw_upload_cs_image_surfaces(struct brw_context *brw)
1467 {
1468 /* _NEW_PROGRAM */
1469 const struct gl_program *cp = brw->programs[MESA_SHADER_COMPUTE];
1470
1471 if (cp) {
1472 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1473 brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1474 brw->cs.base.prog_data);
1475 }
1476 }
1477
1478 const struct brw_tracked_state brw_cs_image_surfaces = {
1479 .dirty = {
1480 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1481 .brw = BRW_NEW_BATCH |
1482 BRW_NEW_CS_PROG_DATA |
1483 BRW_NEW_AUX_STATE |
1484 BRW_NEW_IMAGE_UNITS
1485 },
1486 .emit = brw_upload_cs_image_surfaces,
1487 };
1488
1489 static uint32_t
1490 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1491 {
1492 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1493 enum isl_format hw_format = brw_isl_format_for_mesa_format(format);
1494 if (access == GL_WRITE_ONLY) {
1495 return hw_format;
1496 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1497 /* Typed surface reads support a very limited subset of the shader
1498 * image formats. Translate it into the closest format the
1499 * hardware supports.
1500 */
1501 return isl_lower_storage_image_format(devinfo, hw_format);
1502 } else {
1503 /* The hardware doesn't actually support a typed format that we can use
1504 * so we have to fall back to untyped read/write messages.
1505 */
1506 return ISL_FORMAT_RAW;
1507 }
1508 }
1509
1510 static void
1511 update_default_image_param(struct brw_context *brw,
1512 struct gl_image_unit *u,
1513 unsigned surface_idx,
1514 struct brw_image_param *param)
1515 {
1516 memset(param, 0, sizeof(*param));
1517 param->surface_idx = surface_idx;
1518 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1519 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1520 * detailed explanation of these parameters.
1521 */
1522 param->swizzling[0] = 0xff;
1523 param->swizzling[1] = 0xff;
1524 }
1525
1526 static void
1527 update_buffer_image_param(struct brw_context *brw,
1528 struct gl_image_unit *u,
1529 unsigned surface_idx,
1530 struct brw_image_param *param)
1531 {
1532 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1533 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1534 update_default_image_param(brw, u, surface_idx, param);
1535
1536 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1537 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1538 }
1539
1540 static unsigned
1541 get_image_num_layers(const struct intel_mipmap_tree *mt, GLenum target,
1542 unsigned level)
1543 {
1544 if (target == GL_TEXTURE_CUBE_MAP)
1545 return 6;
1546
1547 return target == GL_TEXTURE_3D ?
1548 minify(mt->surf.logical_level0_px.depth, level) :
1549 mt->surf.logical_level0_px.array_len;
1550 }
1551
1552 static void
1553 update_image_surface(struct brw_context *brw,
1554 struct gl_image_unit *u,
1555 GLenum access,
1556 unsigned surface_idx,
1557 uint32_t *surf_offset,
1558 struct brw_image_param *param)
1559 {
1560 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1561
1562 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1563 struct gl_texture_object *obj = u->TexObj;
1564 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1565
1566 if (obj->Target == GL_TEXTURE_BUFFER) {
1567 struct intel_buffer_object *intel_obj =
1568 intel_buffer_object(obj->BufferObject);
1569 const unsigned texel_size = (format == ISL_FORMAT_RAW ? 1 :
1570 _mesa_get_format_bytes(u->_ActualFormat));
1571
1572 brw_emit_buffer_surface_state(
1573 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1574 format, intel_obj->Base.Size, texel_size,
1575 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1576
1577 update_buffer_image_param(brw, u, surface_idx, param);
1578
1579 } else {
1580 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1581 struct intel_mipmap_tree *mt = intel_obj->mt;
1582 const unsigned num_layers = u->Layered ?
1583 get_image_num_layers(mt, obj->Target, u->Level) : 1;
1584
1585 struct isl_view view = {
1586 .format = format,
1587 .base_level = obj->MinLevel + u->Level,
1588 .levels = 1,
1589 .base_array_layer = obj->MinLayer + u->_Layer,
1590 .array_len = num_layers,
1591 .swizzle = ISL_SWIZZLE_IDENTITY,
1592 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1593 };
1594
1595 if (format == ISL_FORMAT_RAW) {
1596 brw_emit_buffer_surface_state(
1597 brw, surf_offset, mt->bo, mt->offset,
1598 format, mt->bo->size - mt->offset, 1 /* pitch */,
1599 access != GL_READ_ONLY ? RELOC_WRITE : 0);
1600
1601 } else {
1602 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1603 assert(!intel_miptree_has_color_unresolved(mt,
1604 view.base_level, 1,
1605 view.base_array_layer,
1606 view.array_len));
1607 brw_emit_surface_state(brw, mt, mt->target, view,
1608 ISL_AUX_USAGE_NONE,
1609 get_tex_mocs(devinfo, mt->bo),
1610 surf_offset, surf_index,
1611 access == GL_READ_ONLY ? 0 : RELOC_WRITE);
1612 }
1613
1614 isl_surf_fill_image_param(&brw->isl_dev, param, &mt->surf, &view);
1615 param->surface_idx = surface_idx;
1616 }
1617
1618 } else {
1619 emit_null_surface_state(brw, NULL, surf_offset);
1620 update_default_image_param(brw, u, surface_idx, param);
1621 }
1622 }
1623
1624 void
1625 brw_upload_image_surfaces(struct brw_context *brw,
1626 const struct gl_program *prog,
1627 struct brw_stage_state *stage_state,
1628 struct brw_stage_prog_data *prog_data)
1629 {
1630 assert(prog);
1631 struct gl_context *ctx = &brw->ctx;
1632
1633 if (prog->info.num_images) {
1634 for (unsigned i = 0; i < prog->info.num_images; i++) {
1635 struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1636 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1637
1638 update_image_surface(brw, u, prog->sh.ImageAccess[i],
1639 surf_idx,
1640 &stage_state->surf_offset[surf_idx],
1641 &stage_state->image_param[i]);
1642 }
1643
1644 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1645 /* This may have changed the image metadata dependent on the context
1646 * image unit state and passed to the program as uniforms, make sure
1647 * that push and pull constants are reuploaded.
1648 */
1649 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1650 }
1651 }
1652
1653 static void
1654 brw_upload_wm_image_surfaces(struct brw_context *brw)
1655 {
1656 /* BRW_NEW_FRAGMENT_PROGRAM */
1657 const struct gl_program *wm = brw->programs[MESA_SHADER_FRAGMENT];
1658
1659 if (wm) {
1660 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1661 brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1662 brw->wm.base.prog_data);
1663 }
1664 }
1665
1666 const struct brw_tracked_state brw_wm_image_surfaces = {
1667 .dirty = {
1668 .mesa = _NEW_TEXTURE,
1669 .brw = BRW_NEW_BATCH |
1670 BRW_NEW_AUX_STATE |
1671 BRW_NEW_FRAGMENT_PROGRAM |
1672 BRW_NEW_FS_PROG_DATA |
1673 BRW_NEW_IMAGE_UNITS
1674 },
1675 .emit = brw_upload_wm_image_surfaces,
1676 };
1677
1678 static void
1679 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1680 {
1681 struct gl_context *ctx = &brw->ctx;
1682 /* _NEW_PROGRAM */
1683 struct gl_program *prog =
1684 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1685 /* BRW_NEW_CS_PROG_DATA */
1686 const struct brw_cs_prog_data *cs_prog_data =
1687 brw_cs_prog_data(brw->cs.base.prog_data);
1688
1689 if (prog && cs_prog_data->uses_num_work_groups) {
1690 const unsigned surf_idx =
1691 cs_prog_data->binding_table.work_groups_start;
1692 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1693 struct brw_bo *bo;
1694 uint32_t bo_offset;
1695
1696 if (brw->compute.num_work_groups_bo == NULL) {
1697 bo = NULL;
1698 intel_upload_data(brw,
1699 (void *)brw->compute.num_work_groups,
1700 3 * sizeof(GLuint),
1701 sizeof(GLuint),
1702 &bo,
1703 &bo_offset);
1704 } else {
1705 bo = brw->compute.num_work_groups_bo;
1706 bo_offset = brw->compute.num_work_groups_offset;
1707 }
1708
1709 brw_emit_buffer_surface_state(brw, surf_offset,
1710 bo, bo_offset,
1711 ISL_FORMAT_RAW,
1712 3 * sizeof(GLuint), 1,
1713 RELOC_WRITE);
1714 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1715 }
1716 }
1717
1718 const struct brw_tracked_state brw_cs_work_groups_surface = {
1719 .dirty = {
1720 .brw = BRW_NEW_CS_PROG_DATA |
1721 BRW_NEW_CS_WORK_GROUPS
1722 },
1723 .emit = brw_upload_cs_work_groups_surface,
1724 };