i965: Implement support for overriding the texture target in brw_emit_surface_state.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
42 #include "main/shaderapi.h"
43
44 #include "isl/isl.h"
45
46 #include "intel_mipmap_tree.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_tex.h"
49 #include "intel_fbo.h"
50 #include "intel_buffer_objects.h"
51
52 #include "brw_context.h"
53 #include "brw_state.h"
54 #include "brw_defines.h"
55 #include "brw_wm.h"
56
57 struct surface_state_info {
58 unsigned num_dwords;
59 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
60 unsigned reloc_dw;
61 unsigned aux_reloc_dw;
62 unsigned tex_mocs;
63 unsigned rb_mocs;
64 };
65
66 static const struct surface_state_info surface_state_infos[] = {
67 [4] = {6, 32, 1, 0},
68 [5] = {6, 32, 1, 0},
69 [6] = {6, 32, 1, 0},
70 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
71 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
72 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
83 uint32_t tile_x = 0, tile_y = 0;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(brw->intelScreen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106
107 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
108 view.base_array_layer,
109 &tile_x, &tile_y);
110
111 /* Minify the logical dimensions of the texture. */
112 const unsigned l = view.base_level - mt->first_level;
113 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
114 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
115 minify(surf.logical_level0_px.height, l);
116 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
117 minify(surf.logical_level0_px.depth, l);
118
119 /* Only the base level and layer can be addressed with the overridden
120 * layout.
121 */
122 surf.logical_level0_px.array_len = 1;
123 surf.levels = 1;
124 surf.dim_layout = dim_layout;
125
126 /* The requested slice of the texture is now at the base level and
127 * layer.
128 */
129 view.base_level = 0;
130 view.base_array_layer = 0;
131 }
132
133 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
134
135 struct isl_surf *aux_surf = NULL, aux_surf_s;
136 uint64_t aux_offset = 0;
137 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
138 if (mt->mcs_mt &&
139 ((view.usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) ||
140 mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)) {
141 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
142 aux_surf = &aux_surf_s;
143 assert(mt->mcs_mt->offset == 0);
144 aux_offset = mt->mcs_mt->bo->offset64;
145
146 /* We only really need a clear color if we also have an auxiliary
147 * surfacae. Without one, it does nothing.
148 */
149 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
150 }
151
152 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
153 ss_info.num_dwords * 4, ss_info.ss_align,
154 surf_index, surf_offset);
155
156 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
157 .address = mt->bo->offset64 + offset,
158 .aux_surf = aux_surf, .aux_usage = aux_usage,
159 .aux_address = aux_offset,
160 .mocs = mocs, .clear_color = clear_color,
161 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
162
163 drm_intel_bo_emit_reloc(brw->batch.bo,
164 *surf_offset + 4 * ss_info.reloc_dw,
165 mt->bo, offset,
166 read_domains, write_domains);
167
168 if (aux_surf) {
169 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
170 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
171 * contain other control information. Since buffer addresses are always
172 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
173 * an ordinary reloc to do the necessary address translation.
174 */
175 assert((aux_offset & 0xfff) == 0);
176 drm_intel_bo_emit_reloc(brw->batch.bo,
177 *surf_offset + 4 * ss_info.aux_reloc_dw,
178 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
179 read_domains, write_domains);
180 }
181 }
182
183 uint32_t
184 brw_update_renderbuffer_surface(struct brw_context *brw,
185 struct gl_renderbuffer *rb,
186 bool layered, unsigned unit /* unused */,
187 uint32_t surf_index)
188 {
189 struct gl_context *ctx = &brw->ctx;
190 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191 struct intel_mipmap_tree *mt = irb->mt;
192
193 assert(brw_render_target_supported(brw, rb));
194 intel_miptree_used_for_rendering(mt);
195
196 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
197 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
198 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
199 __func__, _mesa_get_format_name(rb_format));
200 }
201
202 const unsigned layer_multiplier =
203 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
204 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
205 MAX2(irb->mt->num_samples, 1) : 1;
206
207 struct isl_view view = {
208 .format = brw->render_target_format[rb_format],
209 .base_level = irb->mt_level - irb->mt->first_level,
210 .levels = 1,
211 .base_array_layer = irb->mt_layer / layer_multiplier,
212 .array_len = MAX2(irb->layer_count, 1),
213 .channel_select = {
214 ISL_CHANNEL_SELECT_RED,
215 ISL_CHANNEL_SELECT_GREEN,
216 ISL_CHANNEL_SELECT_BLUE,
217 ISL_CHANNEL_SELECT_ALPHA,
218 },
219 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
220 };
221
222 uint32_t offset;
223 brw_emit_surface_state(brw, mt, mt->target, view,
224 surface_state_infos[brw->gen].rb_mocs,
225 &offset, surf_index,
226 I915_GEM_DOMAIN_RENDER,
227 I915_GEM_DOMAIN_RENDER);
228 return offset;
229 }
230
231 GLuint
232 translate_tex_target(GLenum target)
233 {
234 switch (target) {
235 case GL_TEXTURE_1D:
236 case GL_TEXTURE_1D_ARRAY_EXT:
237 return BRW_SURFACE_1D;
238
239 case GL_TEXTURE_RECTANGLE_NV:
240 return BRW_SURFACE_2D;
241
242 case GL_TEXTURE_2D:
243 case GL_TEXTURE_2D_ARRAY_EXT:
244 case GL_TEXTURE_EXTERNAL_OES:
245 case GL_TEXTURE_2D_MULTISAMPLE:
246 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_3D:
250 return BRW_SURFACE_3D;
251
252 case GL_TEXTURE_CUBE_MAP:
253 case GL_TEXTURE_CUBE_MAP_ARRAY:
254 return BRW_SURFACE_CUBE;
255
256 default:
257 unreachable("not reached");
258 }
259 }
260
261 uint32_t
262 brw_get_surface_tiling_bits(uint32_t tiling)
263 {
264 switch (tiling) {
265 case I915_TILING_X:
266 return BRW_SURFACE_TILED;
267 case I915_TILING_Y:
268 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
269 default:
270 return 0;
271 }
272 }
273
274
275 uint32_t
276 brw_get_surface_num_multisamples(unsigned num_samples)
277 {
278 if (num_samples > 1)
279 return BRW_SURFACE_MULTISAMPLECOUNT_4;
280 else
281 return BRW_SURFACE_MULTISAMPLECOUNT_1;
282 }
283
284 /**
285 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
286 * swizzling.
287 */
288 int
289 brw_get_texture_swizzle(const struct gl_context *ctx,
290 const struct gl_texture_object *t)
291 {
292 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
293
294 int swizzles[SWIZZLE_NIL + 1] = {
295 SWIZZLE_X,
296 SWIZZLE_Y,
297 SWIZZLE_Z,
298 SWIZZLE_W,
299 SWIZZLE_ZERO,
300 SWIZZLE_ONE,
301 SWIZZLE_NIL
302 };
303
304 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
305 img->_BaseFormat == GL_DEPTH_STENCIL) {
306 GLenum depth_mode = t->DepthMode;
307
308 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
309 * with depth component data specified with a sized internal format.
310 * Otherwise, it's left at the old default, GL_LUMINANCE.
311 */
312 if (_mesa_is_gles3(ctx) &&
313 img->InternalFormat != GL_DEPTH_COMPONENT &&
314 img->InternalFormat != GL_DEPTH_STENCIL) {
315 depth_mode = GL_RED;
316 }
317
318 switch (depth_mode) {
319 case GL_ALPHA:
320 swizzles[0] = SWIZZLE_ZERO;
321 swizzles[1] = SWIZZLE_ZERO;
322 swizzles[2] = SWIZZLE_ZERO;
323 swizzles[3] = SWIZZLE_X;
324 break;
325 case GL_LUMINANCE:
326 swizzles[0] = SWIZZLE_X;
327 swizzles[1] = SWIZZLE_X;
328 swizzles[2] = SWIZZLE_X;
329 swizzles[3] = SWIZZLE_ONE;
330 break;
331 case GL_INTENSITY:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_X;
336 break;
337 case GL_RED:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_ZERO;
340 swizzles[2] = SWIZZLE_ZERO;
341 swizzles[3] = SWIZZLE_ONE;
342 break;
343 }
344 }
345
346 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
347
348 /* If the texture's format is alpha-only, force R, G, and B to
349 * 0.0. Similarly, if the texture's format has no alpha channel,
350 * force the alpha value read to 1.0. This allows for the
351 * implementation to use an RGBA texture for any of these formats
352 * without leaking any unexpected values.
353 */
354 switch (img->_BaseFormat) {
355 case GL_ALPHA:
356 swizzles[0] = SWIZZLE_ZERO;
357 swizzles[1] = SWIZZLE_ZERO;
358 swizzles[2] = SWIZZLE_ZERO;
359 break;
360 case GL_LUMINANCE:
361 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
362 swizzles[0] = SWIZZLE_X;
363 swizzles[1] = SWIZZLE_X;
364 swizzles[2] = SWIZZLE_X;
365 swizzles[3] = SWIZZLE_ONE;
366 }
367 break;
368 case GL_LUMINANCE_ALPHA:
369 if (datatype == GL_SIGNED_NORMALIZED) {
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_W;
374 }
375 break;
376 case GL_INTENSITY:
377 if (datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_X;
382 }
383 break;
384 case GL_RED:
385 case GL_RG:
386 case GL_RGB:
387 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
388 swizzles[3] = SWIZZLE_ONE;
389 break;
390 }
391
392 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
393 swizzles[GET_SWZ(t->_Swizzle, 1)],
394 swizzles[GET_SWZ(t->_Swizzle, 2)],
395 swizzles[GET_SWZ(t->_Swizzle, 3)]);
396 }
397
398 /**
399 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
400 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
401 *
402 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
403 * 0 1 2 3 4 5
404 * 4 5 6 7 0 1
405 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
406 *
407 * which is simply adding 4 then modding by 8 (or anding with 7).
408 *
409 * We then may need to apply workarounds for textureGather hardware bugs.
410 */
411 static unsigned
412 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
413 {
414 unsigned scs = (swizzle + 4) & 7;
415
416 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
417 }
418
419 void
420 brw_update_texture_surface(struct gl_context *ctx,
421 unsigned unit,
422 uint32_t *surf_offset,
423 bool for_gather,
424 uint32_t plane)
425 {
426 struct brw_context *brw = brw_context(ctx);
427 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
428
429 if (obj->Target == GL_TEXTURE_BUFFER) {
430 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
431
432 } else {
433 struct intel_texture_object *intel_obj = intel_texture_object(obj);
434 struct intel_mipmap_tree *mt = intel_obj->mt;
435
436 if (plane > 0) {
437 if (mt->plane[plane - 1] == NULL)
438 return;
439 mt = mt->plane[plane - 1];
440 }
441
442 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
443 /* If this is a view with restricted NumLayers, then our effective depth
444 * is not just the miptree depth.
445 */
446 const unsigned view_num_layers =
447 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
448 mt->logical_depth0;
449
450 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
451 * texturing functions that return a float, as our code generation always
452 * selects the .x channel (which would always be 0).
453 */
454 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
455 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
456 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
457 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
458 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
459 brw_get_texture_swizzle(&brw->ctx, obj));
460
461 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
462 unsigned format = translate_tex_format(brw, mesa_fmt,
463 sampler->sRGBDecode);
464
465 /* Implement gen6 and gen7 gather work-around */
466 bool need_green_to_blue = false;
467 if (for_gather) {
468 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
469 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
470 need_green_to_blue = brw->is_haswell;
471 } else if (brw->gen == 6) {
472 /* Sandybridge's gather4 message is broken for integer formats.
473 * To work around this, we pretend the surface is UNORM for
474 * 8 or 16-bit formats, and emit shader instructions to recover
475 * the real INT/UINT value. For 32-bit formats, we pretend
476 * the surface is FLOAT, and simply reinterpret the resulting
477 * bits.
478 */
479 switch (format) {
480 case BRW_SURFACEFORMAT_R8_SINT:
481 case BRW_SURFACEFORMAT_R8_UINT:
482 format = BRW_SURFACEFORMAT_R8_UNORM;
483 break;
484
485 case BRW_SURFACEFORMAT_R16_SINT:
486 case BRW_SURFACEFORMAT_R16_UINT:
487 format = BRW_SURFACEFORMAT_R16_UNORM;
488 break;
489
490 case BRW_SURFACEFORMAT_R32_SINT:
491 case BRW_SURFACEFORMAT_R32_UINT:
492 format = BRW_SURFACEFORMAT_R32_FLOAT;
493 break;
494
495 default:
496 break;
497 }
498 }
499 }
500
501 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
502 assert(brw->gen >= 8);
503 mt = mt->stencil_mt;
504 format = BRW_SURFACEFORMAT_R8_UINT;
505 }
506
507 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
508
509 struct isl_view view = {
510 .format = format,
511 .base_level = obj->MinLevel + obj->BaseLevel,
512 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
513 .base_array_layer = obj->MinLayer,
514 .array_len = view_num_layers,
515 .channel_select = {
516 swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
517 swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
518 swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
519 swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
520 },
521 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
522 };
523
524 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
525 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
526 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
527
528 brw_emit_surface_state(brw, mt, mt->target, view,
529 surface_state_infos[brw->gen].tex_mocs,
530 surf_offset, surf_index,
531 I915_GEM_DOMAIN_SAMPLER, 0);
532 }
533 }
534
535 void
536 brw_emit_buffer_surface_state(struct brw_context *brw,
537 uint32_t *out_offset,
538 drm_intel_bo *bo,
539 unsigned buffer_offset,
540 unsigned surface_format,
541 unsigned buffer_size,
542 unsigned pitch,
543 bool rw)
544 {
545 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
546
547 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
548 ss_info.num_dwords * 4, ss_info.ss_align,
549 out_offset);
550
551 isl_buffer_fill_state(&brw->isl_dev, dw,
552 .address = (bo ? bo->offset64 : 0) + buffer_offset,
553 .size = buffer_size,
554 .format = surface_format,
555 .stride = pitch,
556 .mocs = ss_info.tex_mocs);
557
558 if (bo) {
559 drm_intel_bo_emit_reloc(brw->batch.bo,
560 *out_offset + 4 * ss_info.reloc_dw,
561 bo, buffer_offset,
562 I915_GEM_DOMAIN_SAMPLER,
563 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
564 }
565 }
566
567 void
568 brw_update_buffer_texture_surface(struct gl_context *ctx,
569 unsigned unit,
570 uint32_t *surf_offset)
571 {
572 struct brw_context *brw = brw_context(ctx);
573 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
574 struct intel_buffer_object *intel_obj =
575 intel_buffer_object(tObj->BufferObject);
576 uint32_t size = tObj->BufferSize;
577 drm_intel_bo *bo = NULL;
578 mesa_format format = tObj->_BufferObjectFormat;
579 uint32_t brw_format = brw_format_for_mesa_format(format);
580 int texel_size = _mesa_get_format_bytes(format);
581
582 if (intel_obj) {
583 size = MIN2(size, intel_obj->Base.Size);
584 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
585 }
586
587 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
588 _mesa_problem(NULL, "bad format %s for texture buffer\n",
589 _mesa_get_format_name(format));
590 }
591
592 brw_emit_buffer_surface_state(brw, surf_offset, bo,
593 tObj->BufferOffset,
594 brw_format,
595 size,
596 texel_size,
597 false /* rw */);
598 }
599
600 /**
601 * Create the constant buffer surface. Vertex/fragment shader constants will be
602 * read from this buffer with Data Port Read instructions/messages.
603 */
604 void
605 brw_create_constant_surface(struct brw_context *brw,
606 drm_intel_bo *bo,
607 uint32_t offset,
608 uint32_t size,
609 uint32_t *out_offset)
610 {
611 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
612 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
613 size, 1, false);
614 }
615
616 /**
617 * Create the buffer surface. Shader buffer variables will be
618 * read from / write to this buffer with Data Port Read/Write
619 * instructions/messages.
620 */
621 void
622 brw_create_buffer_surface(struct brw_context *brw,
623 drm_intel_bo *bo,
624 uint32_t offset,
625 uint32_t size,
626 uint32_t *out_offset)
627 {
628 /* Use a raw surface so we can reuse existing untyped read/write/atomic
629 * messages. We need these specifically for the fragment shader since they
630 * include a pixel mask header that we need to ensure correct behavior
631 * with helper invocations, which cannot write to the buffer.
632 */
633 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
634 BRW_SURFACEFORMAT_RAW,
635 size, 1, true);
636 }
637
638 /**
639 * Set up a binding table entry for use by stream output logic (transform
640 * feedback).
641 *
642 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
643 */
644 void
645 brw_update_sol_surface(struct brw_context *brw,
646 struct gl_buffer_object *buffer_obj,
647 uint32_t *out_offset, unsigned num_vector_components,
648 unsigned stride_dwords, unsigned offset_dwords)
649 {
650 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
651 uint32_t offset_bytes = 4 * offset_dwords;
652 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
653 offset_bytes,
654 buffer_obj->Size - offset_bytes);
655 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
656 out_offset);
657 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
658 size_t size_dwords = buffer_obj->Size / 4;
659 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
660
661 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
662 * too big to map using a single binding table entry?
663 */
664 assert((size_dwords - offset_dwords) / stride_dwords
665 <= BRW_MAX_NUM_BUFFER_ENTRIES);
666
667 if (size_dwords > offset_dwords + num_vector_components) {
668 /* There is room for at least 1 transform feedback output in the buffer.
669 * Compute the number of additional transform feedback outputs the
670 * buffer has room for.
671 */
672 buffer_size_minus_1 =
673 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
674 } else {
675 /* There isn't even room for a single transform feedback output in the
676 * buffer. We can't configure the binding table entry to prevent output
677 * entirely; we'll have to rely on the geometry shader to detect
678 * overflow. But to minimize the damage in case of a bug, set up the
679 * binding table entry to just allow a single output.
680 */
681 buffer_size_minus_1 = 0;
682 }
683 width = buffer_size_minus_1 & 0x7f;
684 height = (buffer_size_minus_1 & 0xfff80) >> 7;
685 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
686
687 switch (num_vector_components) {
688 case 1:
689 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
690 break;
691 case 2:
692 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
693 break;
694 case 3:
695 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
696 break;
697 case 4:
698 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
699 break;
700 default:
701 unreachable("Invalid vector size for transform feedback output");
702 }
703
704 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
705 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
706 surface_format << BRW_SURFACE_FORMAT_SHIFT |
707 BRW_SURFACE_RC_READ_WRITE;
708 surf[1] = bo->offset64 + offset_bytes; /* reloc */
709 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
710 height << BRW_SURFACE_HEIGHT_SHIFT);
711 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
712 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
713 surf[4] = 0;
714 surf[5] = 0;
715
716 /* Emit relocation to surface contents. */
717 drm_intel_bo_emit_reloc(brw->batch.bo,
718 *out_offset + 4,
719 bo, offset_bytes,
720 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
721 }
722
723 /* Creates a new WM constant buffer reflecting the current fragment program's
724 * constants, if needed by the fragment program.
725 *
726 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
727 * state atom.
728 */
729 static void
730 brw_upload_wm_pull_constants(struct brw_context *brw)
731 {
732 struct brw_stage_state *stage_state = &brw->wm.base;
733 /* BRW_NEW_FRAGMENT_PROGRAM */
734 struct brw_fragment_program *fp =
735 (struct brw_fragment_program *) brw->fragment_program;
736 /* BRW_NEW_FS_PROG_DATA */
737 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
738
739 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
740 /* _NEW_PROGRAM_CONSTANTS */
741 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
742 stage_state, prog_data);
743 }
744
745 const struct brw_tracked_state brw_wm_pull_constants = {
746 .dirty = {
747 .mesa = _NEW_PROGRAM_CONSTANTS,
748 .brw = BRW_NEW_BATCH |
749 BRW_NEW_BLORP |
750 BRW_NEW_FRAGMENT_PROGRAM |
751 BRW_NEW_FS_PROG_DATA,
752 },
753 .emit = brw_upload_wm_pull_constants,
754 };
755
756 /**
757 * Creates a null renderbuffer surface.
758 *
759 * This is used when the shader doesn't write to any color output. An FB
760 * write to target 0 will still be emitted, because that's how the thread is
761 * terminated (and computed depth is returned), so we need to have the
762 * hardware discard the target 0 color output..
763 */
764 static void
765 brw_emit_null_surface_state(struct brw_context *brw,
766 unsigned width,
767 unsigned height,
768 unsigned samples,
769 uint32_t *out_offset)
770 {
771 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
772 * Notes):
773 *
774 * A null surface will be used in instances where an actual surface is
775 * not bound. When a write message is generated to a null surface, no
776 * actual surface is written to. When a read message (including any
777 * sampling engine message) is generated to a null surface, the result
778 * is all zeros. Note that a null surface type is allowed to be used
779 * with all messages, even if it is not specificially indicated as
780 * supported. All of the remaining fields in surface state are ignored
781 * for null surfaces, with the following exceptions:
782 *
783 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
784 * depth buffer’s corresponding state for all render target surfaces,
785 * including null.
786 *
787 * - Surface Format must be R8G8B8A8_UNORM.
788 */
789 unsigned surface_type = BRW_SURFACE_NULL;
790 drm_intel_bo *bo = NULL;
791 unsigned pitch_minus_1 = 0;
792 uint32_t multisampling_state = 0;
793 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
794 out_offset);
795
796 if (samples > 1) {
797 /* On Gen6, null render targets seem to cause GPU hangs when
798 * multisampling. So work around this problem by rendering into dummy
799 * color buffer.
800 *
801 * To decrease the amount of memory needed by the workaround buffer, we
802 * set its pitch to 128 bytes (the width of a Y tile). This means that
803 * the amount of memory needed for the workaround buffer is
804 * (width_in_tiles + height_in_tiles - 1) tiles.
805 *
806 * Note that since the workaround buffer will be interpreted by the
807 * hardware as an interleaved multisampled buffer, we need to compute
808 * width_in_tiles and height_in_tiles by dividing the width and height
809 * by 16 rather than the normal Y-tile size of 32.
810 */
811 unsigned width_in_tiles = ALIGN(width, 16) / 16;
812 unsigned height_in_tiles = ALIGN(height, 16) / 16;
813 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
814 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
815 size_needed);
816 bo = brw->wm.multisampled_null_render_target_bo;
817 surface_type = BRW_SURFACE_2D;
818 pitch_minus_1 = 127;
819 multisampling_state = brw_get_surface_num_multisamples(samples);
820 }
821
822 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
823 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
824 if (brw->gen < 6) {
825 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
826 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
827 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
828 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
829 }
830 surf[1] = bo ? bo->offset64 : 0;
831 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
832 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
833
834 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
835 * Notes):
836 *
837 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
838 */
839 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
840 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
841 surf[4] = multisampling_state;
842 surf[5] = 0;
843
844 if (bo) {
845 drm_intel_bo_emit_reloc(brw->batch.bo,
846 *out_offset + 4,
847 bo, 0,
848 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
849 }
850 }
851
852 /**
853 * Sets up a surface state structure to point at the given region.
854 * While it is only used for the front/back buffer currently, it should be
855 * usable for further buffers when doing ARB_draw_buffer support.
856 */
857 static uint32_t
858 gen4_update_renderbuffer_surface(struct brw_context *brw,
859 struct gl_renderbuffer *rb,
860 bool layered, unsigned unit,
861 uint32_t surf_index)
862 {
863 struct gl_context *ctx = &brw->ctx;
864 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
865 struct intel_mipmap_tree *mt = irb->mt;
866 uint32_t *surf;
867 uint32_t tile_x, tile_y;
868 uint32_t format = 0;
869 uint32_t offset;
870 /* _NEW_BUFFERS */
871 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
872 /* BRW_NEW_FS_PROG_DATA */
873
874 assert(!layered);
875
876 if (rb->TexImage && !brw->has_surface_tile_offset) {
877 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
878
879 if (tile_x != 0 || tile_y != 0) {
880 /* Original gen4 hardware couldn't draw to a non-tile-aligned
881 * destination in a miptree unless you actually setup your renderbuffer
882 * as a miptree and used the fragile lod/array_index/etc. controls to
883 * select the image. So, instead, we just make a new single-level
884 * miptree and render into that.
885 */
886 intel_renderbuffer_move_to_temp(brw, irb, false);
887 mt = irb->mt;
888 }
889 }
890
891 intel_miptree_used_for_rendering(irb->mt);
892
893 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
894
895 format = brw->render_target_format[rb_format];
896 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
897 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
898 __func__, _mesa_get_format_name(rb_format));
899 }
900
901 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
902 format << BRW_SURFACE_FORMAT_SHIFT);
903
904 /* reloc */
905 assert(mt->offset % mt->cpp == 0);
906 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
907 mt->bo->offset64 + mt->offset);
908
909 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
910 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
911
912 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
913 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
914
915 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
916
917 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
918 /* Note that the low bits of these fields are missing, so
919 * there's the possibility of getting in trouble.
920 */
921 assert(tile_x % 4 == 0);
922 assert(tile_y % 2 == 0);
923 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
924 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
925 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
926
927 if (brw->gen < 6) {
928 /* _NEW_COLOR */
929 if (!ctx->Color.ColorLogicOpEnabled &&
930 (ctx->Color.BlendEnabled & (1 << unit)))
931 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
932
933 if (!ctx->Color.ColorMask[unit][0])
934 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
935 if (!ctx->Color.ColorMask[unit][1])
936 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
937 if (!ctx->Color.ColorMask[unit][2])
938 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
939
940 /* As mentioned above, disable writes to the alpha component when the
941 * renderbuffer is XRGB.
942 */
943 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
944 !ctx->Color.ColorMask[unit][3]) {
945 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
946 }
947 }
948
949 drm_intel_bo_emit_reloc(brw->batch.bo,
950 offset + 4,
951 mt->bo,
952 surf[1] - mt->bo->offset64,
953 I915_GEM_DOMAIN_RENDER,
954 I915_GEM_DOMAIN_RENDER);
955
956 return offset;
957 }
958
959 /**
960 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
961 */
962 void
963 brw_update_renderbuffer_surfaces(struct brw_context *brw,
964 const struct gl_framebuffer *fb,
965 uint32_t render_target_start,
966 uint32_t *surf_offset)
967 {
968 GLuint i;
969 const unsigned int w = _mesa_geometric_width(fb);
970 const unsigned int h = _mesa_geometric_height(fb);
971 const unsigned int s = _mesa_geometric_samples(fb);
972
973 /* Update surfaces for drawing buffers */
974 if (fb->_NumColorDrawBuffers >= 1) {
975 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
976 const uint32_t surf_index = render_target_start + i;
977
978 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
979 surf_offset[surf_index] =
980 brw->vtbl.update_renderbuffer_surface(
981 brw, fb->_ColorDrawBuffers[i],
982 _mesa_geometric_layers(fb) > 0, i, surf_index);
983 } else {
984 brw->vtbl.emit_null_surface_state(brw, w, h, s,
985 &surf_offset[surf_index]);
986 }
987 }
988 } else {
989 const uint32_t surf_index = render_target_start;
990 brw->vtbl.emit_null_surface_state(brw, w, h, s,
991 &surf_offset[surf_index]);
992 }
993 }
994
995 static void
996 update_renderbuffer_surfaces(struct brw_context *brw)
997 {
998 const struct gl_context *ctx = &brw->ctx;
999
1000 /* _NEW_BUFFERS | _NEW_COLOR */
1001 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1002 brw_update_renderbuffer_surfaces(
1003 brw, fb,
1004 brw->wm.prog_data->binding_table.render_target_start,
1005 brw->wm.base.surf_offset);
1006 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1007 }
1008
1009 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1010 .dirty = {
1011 .mesa = _NEW_BUFFERS |
1012 _NEW_COLOR,
1013 .brw = BRW_NEW_BATCH |
1014 BRW_NEW_BLORP |
1015 BRW_NEW_FS_PROG_DATA,
1016 },
1017 .emit = update_renderbuffer_surfaces,
1018 };
1019
1020 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1021 .dirty = {
1022 .mesa = _NEW_BUFFERS,
1023 .brw = BRW_NEW_BATCH |
1024 BRW_NEW_BLORP,
1025 },
1026 .emit = update_renderbuffer_surfaces,
1027 };
1028
1029
1030 static void
1031 update_stage_texture_surfaces(struct brw_context *brw,
1032 const struct gl_program *prog,
1033 struct brw_stage_state *stage_state,
1034 bool for_gather, uint32_t plane)
1035 {
1036 if (!prog)
1037 return;
1038
1039 struct gl_context *ctx = &brw->ctx;
1040
1041 uint32_t *surf_offset = stage_state->surf_offset;
1042
1043 /* BRW_NEW_*_PROG_DATA */
1044 if (for_gather)
1045 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1046 else
1047 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1048
1049 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1050 for (unsigned s = 0; s < num_samplers; s++) {
1051 surf_offset[s] = 0;
1052
1053 if (prog->SamplersUsed & (1 << s)) {
1054 const unsigned unit = prog->SamplerUnits[s];
1055
1056 /* _NEW_TEXTURE */
1057 if (ctx->Texture.Unit[unit]._Current) {
1058 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1059 }
1060 }
1061 }
1062 }
1063
1064
1065 /**
1066 * Construct SURFACE_STATE objects for enabled textures.
1067 */
1068 static void
1069 brw_update_texture_surfaces(struct brw_context *brw)
1070 {
1071 /* BRW_NEW_VERTEX_PROGRAM */
1072 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1073
1074 /* BRW_NEW_TESS_PROGRAMS */
1075 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1076 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1077
1078 /* BRW_NEW_GEOMETRY_PROGRAM */
1079 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1080
1081 /* BRW_NEW_FRAGMENT_PROGRAM */
1082 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1083
1084 /* _NEW_TEXTURE */
1085 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1086 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1087 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1088 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1089 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1090
1091 /* emit alternate set of surface state for gather. this
1092 * allows the surface format to be overriden for only the
1093 * gather4 messages. */
1094 if (brw->gen < 8) {
1095 if (vs && vs->UsesGather)
1096 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1097 if (tcs && tcs->UsesGather)
1098 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1099 if (tes && tes->UsesGather)
1100 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1101 if (gs && gs->UsesGather)
1102 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1103 if (fs && fs->UsesGather)
1104 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1105 }
1106
1107 if (fs) {
1108 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1109 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1110 }
1111
1112 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1113 }
1114
1115 const struct brw_tracked_state brw_texture_surfaces = {
1116 .dirty = {
1117 .mesa = _NEW_TEXTURE,
1118 .brw = BRW_NEW_BATCH |
1119 BRW_NEW_BLORP |
1120 BRW_NEW_FRAGMENT_PROGRAM |
1121 BRW_NEW_FS_PROG_DATA |
1122 BRW_NEW_GEOMETRY_PROGRAM |
1123 BRW_NEW_GS_PROG_DATA |
1124 BRW_NEW_TESS_PROGRAMS |
1125 BRW_NEW_TCS_PROG_DATA |
1126 BRW_NEW_TES_PROG_DATA |
1127 BRW_NEW_TEXTURE_BUFFER |
1128 BRW_NEW_VERTEX_PROGRAM |
1129 BRW_NEW_VS_PROG_DATA,
1130 },
1131 .emit = brw_update_texture_surfaces,
1132 };
1133
1134 static void
1135 brw_update_cs_texture_surfaces(struct brw_context *brw)
1136 {
1137 /* BRW_NEW_COMPUTE_PROGRAM */
1138 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1139
1140 /* _NEW_TEXTURE */
1141 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1142
1143 /* emit alternate set of surface state for gather. this
1144 * allows the surface format to be overriden for only the
1145 * gather4 messages.
1146 */
1147 if (brw->gen < 8) {
1148 if (cs && cs->UsesGather)
1149 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1150 }
1151
1152 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1153 }
1154
1155 const struct brw_tracked_state brw_cs_texture_surfaces = {
1156 .dirty = {
1157 .mesa = _NEW_TEXTURE,
1158 .brw = BRW_NEW_BATCH |
1159 BRW_NEW_BLORP |
1160 BRW_NEW_COMPUTE_PROGRAM,
1161 },
1162 .emit = brw_update_cs_texture_surfaces,
1163 };
1164
1165
1166 void
1167 brw_upload_ubo_surfaces(struct brw_context *brw,
1168 struct gl_linked_shader *shader,
1169 struct brw_stage_state *stage_state,
1170 struct brw_stage_prog_data *prog_data)
1171 {
1172 struct gl_context *ctx = &brw->ctx;
1173
1174 if (!shader)
1175 return;
1176
1177 uint32_t *ubo_surf_offsets =
1178 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1179
1180 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1181 struct gl_uniform_buffer_binding *binding =
1182 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1183
1184 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1185 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1186 } else {
1187 struct intel_buffer_object *intel_bo =
1188 intel_buffer_object(binding->BufferObject);
1189 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1190 if (!binding->AutomaticSize)
1191 size = MIN2(size, binding->Size);
1192 drm_intel_bo *bo =
1193 intel_bufferobj_buffer(brw, intel_bo,
1194 binding->Offset,
1195 size);
1196 brw_create_constant_surface(brw, bo, binding->Offset,
1197 size,
1198 &ubo_surf_offsets[i]);
1199 }
1200 }
1201
1202 uint32_t *ssbo_surf_offsets =
1203 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1204
1205 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1206 struct gl_shader_storage_buffer_binding *binding =
1207 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1208
1209 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1210 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1211 } else {
1212 struct intel_buffer_object *intel_bo =
1213 intel_buffer_object(binding->BufferObject);
1214 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1215 if (!binding->AutomaticSize)
1216 size = MIN2(size, binding->Size);
1217 drm_intel_bo *bo =
1218 intel_bufferobj_buffer(brw, intel_bo,
1219 binding->Offset,
1220 size);
1221 brw_create_buffer_surface(brw, bo, binding->Offset,
1222 size,
1223 &ssbo_surf_offsets[i]);
1224 }
1225 }
1226
1227 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1228 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1229 }
1230
1231 static void
1232 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1233 {
1234 struct gl_context *ctx = &brw->ctx;
1235 /* _NEW_PROGRAM */
1236 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1237
1238 if (!prog)
1239 return;
1240
1241 /* BRW_NEW_FS_PROG_DATA */
1242 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1243 &brw->wm.base, &brw->wm.prog_data->base);
1244 }
1245
1246 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1247 .dirty = {
1248 .mesa = _NEW_PROGRAM,
1249 .brw = BRW_NEW_BATCH |
1250 BRW_NEW_BLORP |
1251 BRW_NEW_FS_PROG_DATA |
1252 BRW_NEW_UNIFORM_BUFFER,
1253 },
1254 .emit = brw_upload_wm_ubo_surfaces,
1255 };
1256
1257 static void
1258 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1259 {
1260 struct gl_context *ctx = &brw->ctx;
1261 /* _NEW_PROGRAM */
1262 struct gl_shader_program *prog =
1263 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1264
1265 if (!prog)
1266 return;
1267
1268 /* BRW_NEW_CS_PROG_DATA */
1269 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1270 &brw->cs.base, &brw->cs.prog_data->base);
1271 }
1272
1273 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1274 .dirty = {
1275 .mesa = _NEW_PROGRAM,
1276 .brw = BRW_NEW_BATCH |
1277 BRW_NEW_BLORP |
1278 BRW_NEW_CS_PROG_DATA |
1279 BRW_NEW_UNIFORM_BUFFER,
1280 },
1281 .emit = brw_upload_cs_ubo_surfaces,
1282 };
1283
1284 void
1285 brw_upload_abo_surfaces(struct brw_context *brw,
1286 struct gl_linked_shader *shader,
1287 struct brw_stage_state *stage_state,
1288 struct brw_stage_prog_data *prog_data)
1289 {
1290 struct gl_context *ctx = &brw->ctx;
1291 uint32_t *surf_offsets =
1292 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1293
1294 if (shader && shader->NumAtomicBuffers) {
1295 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1296 struct gl_atomic_buffer_binding *binding =
1297 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1298 struct intel_buffer_object *intel_bo =
1299 intel_buffer_object(binding->BufferObject);
1300 drm_intel_bo *bo = intel_bufferobj_buffer(
1301 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1302
1303 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1304 binding->Offset, BRW_SURFACEFORMAT_RAW,
1305 bo->size - binding->Offset, 1, true);
1306 }
1307
1308 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1309 }
1310 }
1311
1312 static void
1313 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1314 {
1315 struct gl_context *ctx = &brw->ctx;
1316 /* _NEW_PROGRAM */
1317 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1318
1319 if (prog) {
1320 /* BRW_NEW_FS_PROG_DATA */
1321 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1322 &brw->wm.base, &brw->wm.prog_data->base);
1323 }
1324 }
1325
1326 const struct brw_tracked_state brw_wm_abo_surfaces = {
1327 .dirty = {
1328 .mesa = _NEW_PROGRAM,
1329 .brw = BRW_NEW_ATOMIC_BUFFER |
1330 BRW_NEW_BLORP |
1331 BRW_NEW_BATCH |
1332 BRW_NEW_FS_PROG_DATA,
1333 },
1334 .emit = brw_upload_wm_abo_surfaces,
1335 };
1336
1337 static void
1338 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1339 {
1340 struct gl_context *ctx = &brw->ctx;
1341 /* _NEW_PROGRAM */
1342 struct gl_shader_program *prog =
1343 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1344
1345 if (prog) {
1346 /* BRW_NEW_CS_PROG_DATA */
1347 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1348 &brw->cs.base, &brw->cs.prog_data->base);
1349 }
1350 }
1351
1352 const struct brw_tracked_state brw_cs_abo_surfaces = {
1353 .dirty = {
1354 .mesa = _NEW_PROGRAM,
1355 .brw = BRW_NEW_ATOMIC_BUFFER |
1356 BRW_NEW_BLORP |
1357 BRW_NEW_BATCH |
1358 BRW_NEW_CS_PROG_DATA,
1359 },
1360 .emit = brw_upload_cs_abo_surfaces,
1361 };
1362
1363 static void
1364 brw_upload_cs_image_surfaces(struct brw_context *brw)
1365 {
1366 struct gl_context *ctx = &brw->ctx;
1367 /* _NEW_PROGRAM */
1368 struct gl_shader_program *prog =
1369 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1370
1371 if (prog) {
1372 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1373 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1374 &brw->cs.base, &brw->cs.prog_data->base);
1375 }
1376 }
1377
1378 const struct brw_tracked_state brw_cs_image_surfaces = {
1379 .dirty = {
1380 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1381 .brw = BRW_NEW_BATCH |
1382 BRW_NEW_BLORP |
1383 BRW_NEW_CS_PROG_DATA |
1384 BRW_NEW_IMAGE_UNITS
1385 },
1386 .emit = brw_upload_cs_image_surfaces,
1387 };
1388
1389 static uint32_t
1390 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1391 {
1392 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1393 uint32_t hw_format = brw_format_for_mesa_format(format);
1394 if (access == GL_WRITE_ONLY) {
1395 return hw_format;
1396 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1397 /* Typed surface reads support a very limited subset of the shader
1398 * image formats. Translate it into the closest format the
1399 * hardware supports.
1400 */
1401 return isl_lower_storage_image_format(devinfo, hw_format);
1402 } else {
1403 /* The hardware doesn't actually support a typed format that we can use
1404 * so we have to fall back to untyped read/write messages.
1405 */
1406 return BRW_SURFACEFORMAT_RAW;
1407 }
1408 }
1409
1410 static void
1411 update_default_image_param(struct brw_context *brw,
1412 struct gl_image_unit *u,
1413 unsigned surface_idx,
1414 struct brw_image_param *param)
1415 {
1416 memset(param, 0, sizeof(*param));
1417 param->surface_idx = surface_idx;
1418 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1419 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1420 * detailed explanation of these parameters.
1421 */
1422 param->swizzling[0] = 0xff;
1423 param->swizzling[1] = 0xff;
1424 }
1425
1426 static void
1427 update_buffer_image_param(struct brw_context *brw,
1428 struct gl_image_unit *u,
1429 unsigned surface_idx,
1430 struct brw_image_param *param)
1431 {
1432 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1433 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1434 update_default_image_param(brw, u, surface_idx, param);
1435
1436 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1437 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1438 }
1439
1440 static void
1441 update_texture_image_param(struct brw_context *brw,
1442 struct gl_image_unit *u,
1443 unsigned surface_idx,
1444 struct brw_image_param *param)
1445 {
1446 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1447
1448 update_default_image_param(brw, u, surface_idx, param);
1449
1450 param->size[0] = minify(mt->logical_width0, u->Level);
1451 param->size[1] = minify(mt->logical_height0, u->Level);
1452 param->size[2] = (!u->Layered ? 1 :
1453 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1454 u->TexObj->Target == GL_TEXTURE_3D ?
1455 minify(mt->logical_depth0, u->Level) :
1456 mt->logical_depth0);
1457
1458 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1459 &param->offset[0],
1460 &param->offset[1]);
1461
1462 param->stride[0] = mt->cpp;
1463 param->stride[1] = mt->pitch / mt->cpp;
1464 param->stride[2] =
1465 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1466 param->stride[3] =
1467 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1468
1469 if (mt->tiling == I915_TILING_X) {
1470 /* An X tile is a rectangular block of 512x8 bytes. */
1471 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1472 param->tiling[1] = _mesa_logbase2(8);
1473
1474 if (brw->has_swizzling) {
1475 /* Right shifts required to swizzle bits 9 and 10 of the memory
1476 * address with bit 6.
1477 */
1478 param->swizzling[0] = 3;
1479 param->swizzling[1] = 4;
1480 }
1481 } else if (mt->tiling == I915_TILING_Y) {
1482 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1483 * different to the layout of an X-tiled surface, we simply pretend that
1484 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1485 * one arranged in X-major order just like is the case for X-tiling.
1486 */
1487 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1488 param->tiling[1] = _mesa_logbase2(32);
1489
1490 if (brw->has_swizzling) {
1491 /* Right shift required to swizzle bit 9 of the memory address with
1492 * bit 6.
1493 */
1494 param->swizzling[0] = 3;
1495 }
1496 }
1497
1498 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1499 * address calculation algorithm (emit_address_calculation() in
1500 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1501 * modulus equal to the LOD.
1502 */
1503 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1504 0);
1505 }
1506
1507 static void
1508 update_image_surface(struct brw_context *brw,
1509 struct gl_image_unit *u,
1510 GLenum access,
1511 unsigned surface_idx,
1512 uint32_t *surf_offset,
1513 struct brw_image_param *param)
1514 {
1515 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1516 struct gl_texture_object *obj = u->TexObj;
1517 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1518
1519 if (obj->Target == GL_TEXTURE_BUFFER) {
1520 struct intel_buffer_object *intel_obj =
1521 intel_buffer_object(obj->BufferObject);
1522 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1523 _mesa_get_format_bytes(u->_ActualFormat));
1524
1525 brw_emit_buffer_surface_state(
1526 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1527 format, intel_obj->Base.Size, texel_size,
1528 access != GL_READ_ONLY);
1529
1530 update_buffer_image_param(brw, u, surface_idx, param);
1531
1532 } else {
1533 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1534 struct intel_mipmap_tree *mt = intel_obj->mt;
1535
1536 if (format == BRW_SURFACEFORMAT_RAW) {
1537 brw_emit_buffer_surface_state(
1538 brw, surf_offset, mt->bo, mt->offset,
1539 format, mt->bo->size - mt->offset, 1 /* pitch */,
1540 access != GL_READ_ONLY);
1541
1542 } else {
1543 const unsigned num_layers = (!u->Layered ? 1 :
1544 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1545 mt->logical_depth0);
1546
1547 struct isl_view view = {
1548 .format = format,
1549 .base_level = obj->MinLevel + u->Level,
1550 .levels = 1,
1551 .base_array_layer = obj->MinLayer + u->_Layer,
1552 .array_len = num_layers,
1553 .channel_select = {
1554 ISL_CHANNEL_SELECT_RED,
1555 ISL_CHANNEL_SELECT_GREEN,
1556 ISL_CHANNEL_SELECT_BLUE,
1557 ISL_CHANNEL_SELECT_ALPHA,
1558 },
1559 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1560 };
1561
1562 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1563
1564 brw_emit_surface_state(brw, mt, mt->target, view,
1565 surface_state_infos[brw->gen].tex_mocs,
1566 surf_offset, surf_index,
1567 I915_GEM_DOMAIN_SAMPLER,
1568 access == GL_READ_ONLY ? 0 :
1569 I915_GEM_DOMAIN_SAMPLER);
1570 }
1571
1572 update_texture_image_param(brw, u, surface_idx, param);
1573 }
1574
1575 } else {
1576 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1577 update_default_image_param(brw, u, surface_idx, param);
1578 }
1579 }
1580
1581 void
1582 brw_upload_image_surfaces(struct brw_context *brw,
1583 struct gl_linked_shader *shader,
1584 struct brw_stage_state *stage_state,
1585 struct brw_stage_prog_data *prog_data)
1586 {
1587 struct gl_context *ctx = &brw->ctx;
1588
1589 if (shader && shader->NumImages) {
1590 for (unsigned i = 0; i < shader->NumImages; i++) {
1591 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1592 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1593
1594 update_image_surface(brw, u, shader->ImageAccess[i],
1595 surf_idx,
1596 &stage_state->surf_offset[surf_idx],
1597 &prog_data->image_param[i]);
1598 }
1599
1600 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1601 /* This may have changed the image metadata dependent on the context
1602 * image unit state and passed to the program as uniforms, make sure
1603 * that push and pull constants are reuploaded.
1604 */
1605 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1606 }
1607 }
1608
1609 static void
1610 brw_upload_wm_image_surfaces(struct brw_context *brw)
1611 {
1612 struct gl_context *ctx = &brw->ctx;
1613 /* BRW_NEW_FRAGMENT_PROGRAM */
1614 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1615
1616 if (prog) {
1617 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1618 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1619 &brw->wm.base, &brw->wm.prog_data->base);
1620 }
1621 }
1622
1623 const struct brw_tracked_state brw_wm_image_surfaces = {
1624 .dirty = {
1625 .mesa = _NEW_TEXTURE,
1626 .brw = BRW_NEW_BATCH |
1627 BRW_NEW_BLORP |
1628 BRW_NEW_FRAGMENT_PROGRAM |
1629 BRW_NEW_FS_PROG_DATA |
1630 BRW_NEW_IMAGE_UNITS
1631 },
1632 .emit = brw_upload_wm_image_surfaces,
1633 };
1634
1635 void
1636 gen4_init_vtable_surface_functions(struct brw_context *brw)
1637 {
1638 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1639 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1640 }
1641
1642 void
1643 gen6_init_vtable_surface_functions(struct brw_context *brw)
1644 {
1645 gen4_init_vtable_surface_functions(brw);
1646 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1647 }
1648
1649 static void
1650 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1651 {
1652 struct gl_context *ctx = &brw->ctx;
1653 /* _NEW_PROGRAM */
1654 struct gl_shader_program *prog =
1655 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1656
1657 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1658 const unsigned surf_idx =
1659 brw->cs.prog_data->binding_table.work_groups_start;
1660 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1661 drm_intel_bo *bo;
1662 uint32_t bo_offset;
1663
1664 if (brw->compute.num_work_groups_bo == NULL) {
1665 bo = NULL;
1666 intel_upload_data(brw,
1667 (void *)brw->compute.num_work_groups,
1668 3 * sizeof(GLuint),
1669 sizeof(GLuint),
1670 &bo,
1671 &bo_offset);
1672 } else {
1673 bo = brw->compute.num_work_groups_bo;
1674 bo_offset = brw->compute.num_work_groups_offset;
1675 }
1676
1677 brw_emit_buffer_surface_state(brw, surf_offset,
1678 bo, bo_offset,
1679 BRW_SURFACEFORMAT_RAW,
1680 3 * sizeof(GLuint), 1, true);
1681 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1682 }
1683 }
1684
1685 const struct brw_tracked_state brw_cs_work_groups_surface = {
1686 .dirty = {
1687 .brw = BRW_NEW_BLORP |
1688 BRW_NEW_CS_WORK_GROUPS
1689 },
1690 .emit = brw_upload_cs_work_groups_surface,
1691 };