i965: Disable hardware blending if advanced blending is in use.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_wm_surface_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
42 #include "main/shaderapi.h"
43
44 #include "isl/isl.h"
45
46 #include "intel_mipmap_tree.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_tex.h"
49 #include "intel_fbo.h"
50 #include "intel_buffer_objects.h"
51
52 #include "brw_context.h"
53 #include "brw_state.h"
54 #include "brw_defines.h"
55 #include "brw_wm.h"
56
57 struct surface_state_info {
58 unsigned num_dwords;
59 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
60 unsigned reloc_dw;
61 unsigned aux_reloc_dw;
62 unsigned tex_mocs;
63 unsigned rb_mocs;
64 };
65
66 static const struct surface_state_info surface_state_infos[] = {
67 [4] = {6, 32, 1, 0},
68 [5] = {6, 32, 1, 0},
69 [6] = {6, 32, 1, 0},
70 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
71 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
72 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
73 };
74
75 static void
76 brw_emit_surface_state(struct brw_context *brw,
77 struct intel_mipmap_tree *mt,
78 GLenum target, struct isl_view view,
79 uint32_t mocs, uint32_t *surf_offset, int surf_index,
80 unsigned read_domains, unsigned write_domains)
81 {
82 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
83 uint32_t tile_x = 0, tile_y = 0;
84 uint32_t offset = mt->offset;
85
86 struct isl_surf surf;
87 intel_miptree_get_isl_surf(brw, mt, &surf);
88
89 surf.dim = get_isl_surf_dim(target);
90
91 const enum isl_dim_layout dim_layout =
92 get_isl_dim_layout(brw->intelScreen->devinfo, mt->tiling, target);
93
94 if (surf.dim_layout != dim_layout) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
102 * controls.
103 */
104 assert(brw->has_surface_tile_offset);
105 assert(view.levels == 1 && view.array_len == 1);
106
107 offset += intel_miptree_get_tile_offsets(mt, view.base_level,
108 view.base_array_layer,
109 &tile_x, &tile_y);
110
111 /* Minify the logical dimensions of the texture. */
112 const unsigned l = view.base_level - mt->first_level;
113 surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
114 surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
115 minify(surf.logical_level0_px.height, l);
116 surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
117 minify(surf.logical_level0_px.depth, l);
118
119 /* Only the base level and layer can be addressed with the overridden
120 * layout.
121 */
122 surf.logical_level0_px.array_len = 1;
123 surf.levels = 1;
124 surf.dim_layout = dim_layout;
125
126 /* The requested slice of the texture is now at the base level and
127 * layer.
128 */
129 view.base_level = 0;
130 view.base_array_layer = 0;
131 }
132
133 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
134
135 struct isl_surf *aux_surf = NULL, aux_surf_s;
136 uint64_t aux_offset = 0;
137 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
138 if (mt->mcs_mt &&
139 ((view.usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) ||
140 mt->fast_clear_state != INTEL_FAST_CLEAR_STATE_RESOLVED)) {
141 intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
142 aux_surf = &aux_surf_s;
143 assert(mt->mcs_mt->offset == 0);
144 aux_offset = mt->mcs_mt->bo->offset64;
145
146 /* We only really need a clear color if we also have an auxiliary
147 * surfacae. Without one, it does nothing.
148 */
149 clear_color = intel_miptree_get_isl_clear_color(brw, mt);
150 }
151
152 uint32_t *dw = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
153 ss_info.num_dwords * 4, ss_info.ss_align,
154 surf_index, surf_offset);
155
156 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
157 .address = mt->bo->offset64 + offset,
158 .aux_surf = aux_surf, .aux_usage = aux_usage,
159 .aux_address = aux_offset,
160 .mocs = mocs, .clear_color = clear_color,
161 .x_offset_sa = tile_x, .y_offset_sa = tile_y);
162
163 drm_intel_bo_emit_reloc(brw->batch.bo,
164 *surf_offset + 4 * ss_info.reloc_dw,
165 mt->bo, offset,
166 read_domains, write_domains);
167
168 if (aux_surf) {
169 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
170 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
171 * contain other control information. Since buffer addresses are always
172 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
173 * an ordinary reloc to do the necessary address translation.
174 */
175 assert((aux_offset & 0xfff) == 0);
176 drm_intel_bo_emit_reloc(brw->batch.bo,
177 *surf_offset + 4 * ss_info.aux_reloc_dw,
178 mt->mcs_mt->bo, dw[ss_info.aux_reloc_dw] & 0xfff,
179 read_domains, write_domains);
180 }
181 }
182
183 uint32_t
184 brw_update_renderbuffer_surface(struct brw_context *brw,
185 struct gl_renderbuffer *rb,
186 bool layered, unsigned unit /* unused */,
187 uint32_t surf_index)
188 {
189 struct gl_context *ctx = &brw->ctx;
190 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191 struct intel_mipmap_tree *mt = irb->mt;
192
193 assert(brw_render_target_supported(brw, rb));
194 intel_miptree_used_for_rendering(mt);
195
196 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
197 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
198 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
199 __func__, _mesa_get_format_name(rb_format));
200 }
201
202 const unsigned layer_multiplier =
203 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
204 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
205 MAX2(irb->mt->num_samples, 1) : 1;
206
207 struct isl_view view = {
208 .format = brw->render_target_format[rb_format],
209 .base_level = irb->mt_level - irb->mt->first_level,
210 .levels = 1,
211 .base_array_layer = irb->mt_layer / layer_multiplier,
212 .array_len = MAX2(irb->layer_count, 1),
213 .channel_select = {
214 ISL_CHANNEL_SELECT_RED,
215 ISL_CHANNEL_SELECT_GREEN,
216 ISL_CHANNEL_SELECT_BLUE,
217 ISL_CHANNEL_SELECT_ALPHA,
218 },
219 .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
220 };
221
222 uint32_t offset;
223 brw_emit_surface_state(brw, mt, mt->target, view,
224 surface_state_infos[brw->gen].rb_mocs,
225 &offset, surf_index,
226 I915_GEM_DOMAIN_RENDER,
227 I915_GEM_DOMAIN_RENDER);
228 return offset;
229 }
230
231 GLuint
232 translate_tex_target(GLenum target)
233 {
234 switch (target) {
235 case GL_TEXTURE_1D:
236 case GL_TEXTURE_1D_ARRAY_EXT:
237 return BRW_SURFACE_1D;
238
239 case GL_TEXTURE_RECTANGLE_NV:
240 return BRW_SURFACE_2D;
241
242 case GL_TEXTURE_2D:
243 case GL_TEXTURE_2D_ARRAY_EXT:
244 case GL_TEXTURE_EXTERNAL_OES:
245 case GL_TEXTURE_2D_MULTISAMPLE:
246 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
247 return BRW_SURFACE_2D;
248
249 case GL_TEXTURE_3D:
250 return BRW_SURFACE_3D;
251
252 case GL_TEXTURE_CUBE_MAP:
253 case GL_TEXTURE_CUBE_MAP_ARRAY:
254 return BRW_SURFACE_CUBE;
255
256 default:
257 unreachable("not reached");
258 }
259 }
260
261 uint32_t
262 brw_get_surface_tiling_bits(uint32_t tiling)
263 {
264 switch (tiling) {
265 case I915_TILING_X:
266 return BRW_SURFACE_TILED;
267 case I915_TILING_Y:
268 return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
269 default:
270 return 0;
271 }
272 }
273
274
275 uint32_t
276 brw_get_surface_num_multisamples(unsigned num_samples)
277 {
278 if (num_samples > 1)
279 return BRW_SURFACE_MULTISAMPLECOUNT_4;
280 else
281 return BRW_SURFACE_MULTISAMPLECOUNT_1;
282 }
283
284 /**
285 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
286 * swizzling.
287 */
288 int
289 brw_get_texture_swizzle(const struct gl_context *ctx,
290 const struct gl_texture_object *t)
291 {
292 const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
293
294 int swizzles[SWIZZLE_NIL + 1] = {
295 SWIZZLE_X,
296 SWIZZLE_Y,
297 SWIZZLE_Z,
298 SWIZZLE_W,
299 SWIZZLE_ZERO,
300 SWIZZLE_ONE,
301 SWIZZLE_NIL
302 };
303
304 if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
305 img->_BaseFormat == GL_DEPTH_STENCIL) {
306 GLenum depth_mode = t->DepthMode;
307
308 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
309 * with depth component data specified with a sized internal format.
310 * Otherwise, it's left at the old default, GL_LUMINANCE.
311 */
312 if (_mesa_is_gles3(ctx) &&
313 img->InternalFormat != GL_DEPTH_COMPONENT &&
314 img->InternalFormat != GL_DEPTH_STENCIL) {
315 depth_mode = GL_RED;
316 }
317
318 switch (depth_mode) {
319 case GL_ALPHA:
320 swizzles[0] = SWIZZLE_ZERO;
321 swizzles[1] = SWIZZLE_ZERO;
322 swizzles[2] = SWIZZLE_ZERO;
323 swizzles[3] = SWIZZLE_X;
324 break;
325 case GL_LUMINANCE:
326 swizzles[0] = SWIZZLE_X;
327 swizzles[1] = SWIZZLE_X;
328 swizzles[2] = SWIZZLE_X;
329 swizzles[3] = SWIZZLE_ONE;
330 break;
331 case GL_INTENSITY:
332 swizzles[0] = SWIZZLE_X;
333 swizzles[1] = SWIZZLE_X;
334 swizzles[2] = SWIZZLE_X;
335 swizzles[3] = SWIZZLE_X;
336 break;
337 case GL_RED:
338 swizzles[0] = SWIZZLE_X;
339 swizzles[1] = SWIZZLE_ZERO;
340 swizzles[2] = SWIZZLE_ZERO;
341 swizzles[3] = SWIZZLE_ONE;
342 break;
343 }
344 }
345
346 GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
347
348 /* If the texture's format is alpha-only, force R, G, and B to
349 * 0.0. Similarly, if the texture's format has no alpha channel,
350 * force the alpha value read to 1.0. This allows for the
351 * implementation to use an RGBA texture for any of these formats
352 * without leaking any unexpected values.
353 */
354 switch (img->_BaseFormat) {
355 case GL_ALPHA:
356 swizzles[0] = SWIZZLE_ZERO;
357 swizzles[1] = SWIZZLE_ZERO;
358 swizzles[2] = SWIZZLE_ZERO;
359 break;
360 case GL_LUMINANCE:
361 if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
362 swizzles[0] = SWIZZLE_X;
363 swizzles[1] = SWIZZLE_X;
364 swizzles[2] = SWIZZLE_X;
365 swizzles[3] = SWIZZLE_ONE;
366 }
367 break;
368 case GL_LUMINANCE_ALPHA:
369 if (datatype == GL_SIGNED_NORMALIZED) {
370 swizzles[0] = SWIZZLE_X;
371 swizzles[1] = SWIZZLE_X;
372 swizzles[2] = SWIZZLE_X;
373 swizzles[3] = SWIZZLE_W;
374 }
375 break;
376 case GL_INTENSITY:
377 if (datatype == GL_SIGNED_NORMALIZED) {
378 swizzles[0] = SWIZZLE_X;
379 swizzles[1] = SWIZZLE_X;
380 swizzles[2] = SWIZZLE_X;
381 swizzles[3] = SWIZZLE_X;
382 }
383 break;
384 case GL_RED:
385 case GL_RG:
386 case GL_RGB:
387 if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
388 swizzles[3] = SWIZZLE_ONE;
389 break;
390 }
391
392 return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
393 swizzles[GET_SWZ(t->_Swizzle, 1)],
394 swizzles[GET_SWZ(t->_Swizzle, 2)],
395 swizzles[GET_SWZ(t->_Swizzle, 3)]);
396 }
397
398 /**
399 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
400 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
401 *
402 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
403 * 0 1 2 3 4 5
404 * 4 5 6 7 0 1
405 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
406 *
407 * which is simply adding 4 then modding by 8 (or anding with 7).
408 *
409 * We then may need to apply workarounds for textureGather hardware bugs.
410 */
411 static unsigned
412 swizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
413 {
414 unsigned scs = (swizzle + 4) & 7;
415
416 return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
417 }
418
419 void
420 brw_update_texture_surface(struct gl_context *ctx,
421 unsigned unit,
422 uint32_t *surf_offset,
423 bool for_gather,
424 uint32_t plane)
425 {
426 struct brw_context *brw = brw_context(ctx);
427 struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
428
429 if (obj->Target == GL_TEXTURE_BUFFER) {
430 brw_update_buffer_texture_surface(ctx, unit, surf_offset);
431
432 } else {
433 struct intel_texture_object *intel_obj = intel_texture_object(obj);
434 struct intel_mipmap_tree *mt = intel_obj->mt;
435
436 if (plane > 0) {
437 if (mt->plane[plane - 1] == NULL)
438 return;
439 mt = mt->plane[plane - 1];
440 }
441
442 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
443 /* If this is a view with restricted NumLayers, then our effective depth
444 * is not just the miptree depth.
445 */
446 const unsigned view_num_layers =
447 (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
448 mt->logical_depth0;
449
450 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
451 * texturing functions that return a float, as our code generation always
452 * selects the .x channel (which would always be 0).
453 */
454 struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
455 const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
456 (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
457 firstImage->_BaseFormat == GL_DEPTH_STENCIL);
458 const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
459 brw_get_texture_swizzle(&brw->ctx, obj));
460
461 mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
462 unsigned format = translate_tex_format(brw, mesa_fmt,
463 sampler->sRGBDecode);
464
465 /* Implement gen6 and gen7 gather work-around */
466 bool need_green_to_blue = false;
467 if (for_gather) {
468 if (brw->gen == 7 && format == BRW_SURFACEFORMAT_R32G32_FLOAT) {
469 format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
470 need_green_to_blue = brw->is_haswell;
471 } else if (brw->gen == 6) {
472 /* Sandybridge's gather4 message is broken for integer formats.
473 * To work around this, we pretend the surface is UNORM for
474 * 8 or 16-bit formats, and emit shader instructions to recover
475 * the real INT/UINT value. For 32-bit formats, we pretend
476 * the surface is FLOAT, and simply reinterpret the resulting
477 * bits.
478 */
479 switch (format) {
480 case BRW_SURFACEFORMAT_R8_SINT:
481 case BRW_SURFACEFORMAT_R8_UINT:
482 format = BRW_SURFACEFORMAT_R8_UNORM;
483 break;
484
485 case BRW_SURFACEFORMAT_R16_SINT:
486 case BRW_SURFACEFORMAT_R16_UINT:
487 format = BRW_SURFACEFORMAT_R16_UNORM;
488 break;
489
490 case BRW_SURFACEFORMAT_R32_SINT:
491 case BRW_SURFACEFORMAT_R32_UINT:
492 format = BRW_SURFACEFORMAT_R32_FLOAT;
493 break;
494
495 default:
496 break;
497 }
498 }
499 }
500
501 if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
502 assert(brw->gen >= 8);
503 mt = mt->stencil_mt;
504 format = BRW_SURFACEFORMAT_R8_UINT;
505 }
506
507 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
508
509 struct isl_view view = {
510 .format = format,
511 .base_level = obj->MinLevel + obj->BaseLevel,
512 .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
513 .base_array_layer = obj->MinLayer,
514 .array_len = view_num_layers,
515 .channel_select = {
516 swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
517 swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
518 swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
519 swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
520 },
521 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
522 };
523
524 if (obj->Target == GL_TEXTURE_CUBE_MAP ||
525 obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
526 view.usage |= ISL_SURF_USAGE_CUBE_BIT;
527
528 brw_emit_surface_state(brw, mt, mt->target, view,
529 surface_state_infos[brw->gen].tex_mocs,
530 surf_offset, surf_index,
531 I915_GEM_DOMAIN_SAMPLER, 0);
532 }
533 }
534
535 void
536 brw_emit_buffer_surface_state(struct brw_context *brw,
537 uint32_t *out_offset,
538 drm_intel_bo *bo,
539 unsigned buffer_offset,
540 unsigned surface_format,
541 unsigned buffer_size,
542 unsigned pitch,
543 bool rw)
544 {
545 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
546
547 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
548 ss_info.num_dwords * 4, ss_info.ss_align,
549 out_offset);
550
551 isl_buffer_fill_state(&brw->isl_dev, dw,
552 .address = (bo ? bo->offset64 : 0) + buffer_offset,
553 .size = buffer_size,
554 .format = surface_format,
555 .stride = pitch,
556 .mocs = ss_info.tex_mocs);
557
558 if (bo) {
559 drm_intel_bo_emit_reloc(brw->batch.bo,
560 *out_offset + 4 * ss_info.reloc_dw,
561 bo, buffer_offset,
562 I915_GEM_DOMAIN_SAMPLER,
563 (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
564 }
565 }
566
567 void
568 brw_update_buffer_texture_surface(struct gl_context *ctx,
569 unsigned unit,
570 uint32_t *surf_offset)
571 {
572 struct brw_context *brw = brw_context(ctx);
573 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
574 struct intel_buffer_object *intel_obj =
575 intel_buffer_object(tObj->BufferObject);
576 uint32_t size = tObj->BufferSize;
577 drm_intel_bo *bo = NULL;
578 mesa_format format = tObj->_BufferObjectFormat;
579 uint32_t brw_format = brw_format_for_mesa_format(format);
580 int texel_size = _mesa_get_format_bytes(format);
581
582 if (intel_obj) {
583 size = MIN2(size, intel_obj->Base.Size);
584 bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
585 }
586
587 if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
588 _mesa_problem(NULL, "bad format %s for texture buffer\n",
589 _mesa_get_format_name(format));
590 }
591
592 brw_emit_buffer_surface_state(brw, surf_offset, bo,
593 tObj->BufferOffset,
594 brw_format,
595 size,
596 texel_size,
597 false /* rw */);
598 }
599
600 /**
601 * Create the constant buffer surface. Vertex/fragment shader constants will be
602 * read from this buffer with Data Port Read instructions/messages.
603 */
604 void
605 brw_create_constant_surface(struct brw_context *brw,
606 drm_intel_bo *bo,
607 uint32_t offset,
608 uint32_t size,
609 uint32_t *out_offset)
610 {
611 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
612 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
613 size, 1, false);
614 }
615
616 /**
617 * Create the buffer surface. Shader buffer variables will be
618 * read from / write to this buffer with Data Port Read/Write
619 * instructions/messages.
620 */
621 void
622 brw_create_buffer_surface(struct brw_context *brw,
623 drm_intel_bo *bo,
624 uint32_t offset,
625 uint32_t size,
626 uint32_t *out_offset)
627 {
628 /* Use a raw surface so we can reuse existing untyped read/write/atomic
629 * messages. We need these specifically for the fragment shader since they
630 * include a pixel mask header that we need to ensure correct behavior
631 * with helper invocations, which cannot write to the buffer.
632 */
633 brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
634 BRW_SURFACEFORMAT_RAW,
635 size, 1, true);
636 }
637
638 /**
639 * Set up a binding table entry for use by stream output logic (transform
640 * feedback).
641 *
642 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
643 */
644 void
645 brw_update_sol_surface(struct brw_context *brw,
646 struct gl_buffer_object *buffer_obj,
647 uint32_t *out_offset, unsigned num_vector_components,
648 unsigned stride_dwords, unsigned offset_dwords)
649 {
650 struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
651 uint32_t offset_bytes = 4 * offset_dwords;
652 drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
653 offset_bytes,
654 buffer_obj->Size - offset_bytes);
655 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
656 out_offset);
657 uint32_t pitch_minus_1 = 4*stride_dwords - 1;
658 size_t size_dwords = buffer_obj->Size / 4;
659 uint32_t buffer_size_minus_1, width, height, depth, surface_format;
660
661 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
662 * too big to map using a single binding table entry?
663 */
664 assert((size_dwords - offset_dwords) / stride_dwords
665 <= BRW_MAX_NUM_BUFFER_ENTRIES);
666
667 if (size_dwords > offset_dwords + num_vector_components) {
668 /* There is room for at least 1 transform feedback output in the buffer.
669 * Compute the number of additional transform feedback outputs the
670 * buffer has room for.
671 */
672 buffer_size_minus_1 =
673 (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
674 } else {
675 /* There isn't even room for a single transform feedback output in the
676 * buffer. We can't configure the binding table entry to prevent output
677 * entirely; we'll have to rely on the geometry shader to detect
678 * overflow. But to minimize the damage in case of a bug, set up the
679 * binding table entry to just allow a single output.
680 */
681 buffer_size_minus_1 = 0;
682 }
683 width = buffer_size_minus_1 & 0x7f;
684 height = (buffer_size_minus_1 & 0xfff80) >> 7;
685 depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
686
687 switch (num_vector_components) {
688 case 1:
689 surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
690 break;
691 case 2:
692 surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
693 break;
694 case 3:
695 surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
696 break;
697 case 4:
698 surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
699 break;
700 default:
701 unreachable("Invalid vector size for transform feedback output");
702 }
703
704 surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
705 BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
706 surface_format << BRW_SURFACE_FORMAT_SHIFT |
707 BRW_SURFACE_RC_READ_WRITE;
708 surf[1] = bo->offset64 + offset_bytes; /* reloc */
709 surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
710 height << BRW_SURFACE_HEIGHT_SHIFT);
711 surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
712 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
713 surf[4] = 0;
714 surf[5] = 0;
715
716 /* Emit relocation to surface contents. */
717 drm_intel_bo_emit_reloc(brw->batch.bo,
718 *out_offset + 4,
719 bo, offset_bytes,
720 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
721 }
722
723 /* Creates a new WM constant buffer reflecting the current fragment program's
724 * constants, if needed by the fragment program.
725 *
726 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
727 * state atom.
728 */
729 static void
730 brw_upload_wm_pull_constants(struct brw_context *brw)
731 {
732 struct brw_stage_state *stage_state = &brw->wm.base;
733 /* BRW_NEW_FRAGMENT_PROGRAM */
734 struct brw_fragment_program *fp =
735 (struct brw_fragment_program *) brw->fragment_program;
736 /* BRW_NEW_FS_PROG_DATA */
737 struct brw_stage_prog_data *prog_data = &brw->wm.prog_data->base;
738
739 _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
740 /* _NEW_PROGRAM_CONSTANTS */
741 brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program.Base,
742 stage_state, prog_data);
743 }
744
745 const struct brw_tracked_state brw_wm_pull_constants = {
746 .dirty = {
747 .mesa = _NEW_PROGRAM_CONSTANTS,
748 .brw = BRW_NEW_BATCH |
749 BRW_NEW_BLORP |
750 BRW_NEW_FRAGMENT_PROGRAM |
751 BRW_NEW_FS_PROG_DATA,
752 },
753 .emit = brw_upload_wm_pull_constants,
754 };
755
756 /**
757 * Creates a null renderbuffer surface.
758 *
759 * This is used when the shader doesn't write to any color output. An FB
760 * write to target 0 will still be emitted, because that's how the thread is
761 * terminated (and computed depth is returned), so we need to have the
762 * hardware discard the target 0 color output..
763 */
764 static void
765 brw_emit_null_surface_state(struct brw_context *brw,
766 unsigned width,
767 unsigned height,
768 unsigned samples,
769 uint32_t *out_offset)
770 {
771 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
772 * Notes):
773 *
774 * A null surface will be used in instances where an actual surface is
775 * not bound. When a write message is generated to a null surface, no
776 * actual surface is written to. When a read message (including any
777 * sampling engine message) is generated to a null surface, the result
778 * is all zeros. Note that a null surface type is allowed to be used
779 * with all messages, even if it is not specificially indicated as
780 * supported. All of the remaining fields in surface state are ignored
781 * for null surfaces, with the following exceptions:
782 *
783 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
784 * depth buffer’s corresponding state for all render target surfaces,
785 * including null.
786 *
787 * - Surface Format must be R8G8B8A8_UNORM.
788 */
789 unsigned surface_type = BRW_SURFACE_NULL;
790 drm_intel_bo *bo = NULL;
791 unsigned pitch_minus_1 = 0;
792 uint32_t multisampling_state = 0;
793 uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
794 out_offset);
795
796 if (samples > 1) {
797 /* On Gen6, null render targets seem to cause GPU hangs when
798 * multisampling. So work around this problem by rendering into dummy
799 * color buffer.
800 *
801 * To decrease the amount of memory needed by the workaround buffer, we
802 * set its pitch to 128 bytes (the width of a Y tile). This means that
803 * the amount of memory needed for the workaround buffer is
804 * (width_in_tiles + height_in_tiles - 1) tiles.
805 *
806 * Note that since the workaround buffer will be interpreted by the
807 * hardware as an interleaved multisampled buffer, we need to compute
808 * width_in_tiles and height_in_tiles by dividing the width and height
809 * by 16 rather than the normal Y-tile size of 32.
810 */
811 unsigned width_in_tiles = ALIGN(width, 16) / 16;
812 unsigned height_in_tiles = ALIGN(height, 16) / 16;
813 unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
814 brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
815 size_needed);
816 bo = brw->wm.multisampled_null_render_target_bo;
817 surface_type = BRW_SURFACE_2D;
818 pitch_minus_1 = 127;
819 multisampling_state = brw_get_surface_num_multisamples(samples);
820 }
821
822 surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
823 BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
824 if (brw->gen < 6) {
825 surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
826 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
827 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
828 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
829 }
830 surf[1] = bo ? bo->offset64 : 0;
831 surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
832 (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
833
834 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
835 * Notes):
836 *
837 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
838 */
839 surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
840 pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
841 surf[4] = multisampling_state;
842 surf[5] = 0;
843
844 if (bo) {
845 drm_intel_bo_emit_reloc(brw->batch.bo,
846 *out_offset + 4,
847 bo, 0,
848 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
849 }
850 }
851
852 /**
853 * Sets up a surface state structure to point at the given region.
854 * While it is only used for the front/back buffer currently, it should be
855 * usable for further buffers when doing ARB_draw_buffer support.
856 */
857 static uint32_t
858 gen4_update_renderbuffer_surface(struct brw_context *brw,
859 struct gl_renderbuffer *rb,
860 bool layered, unsigned unit,
861 uint32_t surf_index)
862 {
863 struct gl_context *ctx = &brw->ctx;
864 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
865 struct intel_mipmap_tree *mt = irb->mt;
866 uint32_t *surf;
867 uint32_t tile_x, tile_y;
868 uint32_t format = 0;
869 uint32_t offset;
870 /* _NEW_BUFFERS */
871 mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
872 /* BRW_NEW_FS_PROG_DATA */
873
874 assert(!layered);
875
876 if (rb->TexImage && !brw->has_surface_tile_offset) {
877 intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
878
879 if (tile_x != 0 || tile_y != 0) {
880 /* Original gen4 hardware couldn't draw to a non-tile-aligned
881 * destination in a miptree unless you actually setup your renderbuffer
882 * as a miptree and used the fragile lod/array_index/etc. controls to
883 * select the image. So, instead, we just make a new single-level
884 * miptree and render into that.
885 */
886 intel_renderbuffer_move_to_temp(brw, irb, false);
887 mt = irb->mt;
888 }
889 }
890
891 intel_miptree_used_for_rendering(irb->mt);
892
893 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
894
895 format = brw->render_target_format[rb_format];
896 if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
897 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
898 __func__, _mesa_get_format_name(rb_format));
899 }
900
901 surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
902 format << BRW_SURFACE_FORMAT_SHIFT);
903
904 /* reloc */
905 assert(mt->offset % mt->cpp == 0);
906 surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
907 mt->bo->offset64 + mt->offset);
908
909 surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
910 (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
911
912 surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
913 (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
914
915 surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
916
917 assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
918 /* Note that the low bits of these fields are missing, so
919 * there's the possibility of getting in trouble.
920 */
921 assert(tile_x % 4 == 0);
922 assert(tile_y % 2 == 0);
923 surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
924 (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
925 (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
926
927 if (brw->gen < 6) {
928 /* _NEW_COLOR */
929 if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
930 (ctx->Color.BlendEnabled & (1 << unit)))
931 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
932
933 if (!ctx->Color.ColorMask[unit][0])
934 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
935 if (!ctx->Color.ColorMask[unit][1])
936 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
937 if (!ctx->Color.ColorMask[unit][2])
938 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
939
940 /* As mentioned above, disable writes to the alpha component when the
941 * renderbuffer is XRGB.
942 */
943 if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
944 !ctx->Color.ColorMask[unit][3]) {
945 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
946 }
947 }
948
949 drm_intel_bo_emit_reloc(brw->batch.bo,
950 offset + 4,
951 mt->bo,
952 surf[1] - mt->bo->offset64,
953 I915_GEM_DOMAIN_RENDER,
954 I915_GEM_DOMAIN_RENDER);
955
956 return offset;
957 }
958
959 /**
960 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
961 */
962 void
963 brw_update_renderbuffer_surfaces(struct brw_context *brw,
964 const struct gl_framebuffer *fb,
965 uint32_t render_target_start,
966 uint32_t *surf_offset)
967 {
968 GLuint i;
969 const unsigned int w = _mesa_geometric_width(fb);
970 const unsigned int h = _mesa_geometric_height(fb);
971 const unsigned int s = _mesa_geometric_samples(fb);
972
973 /* Update surfaces for drawing buffers */
974 if (fb->_NumColorDrawBuffers >= 1) {
975 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
976 const uint32_t surf_index = render_target_start + i;
977
978 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
979 surf_offset[surf_index] =
980 brw->vtbl.update_renderbuffer_surface(
981 brw, fb->_ColorDrawBuffers[i],
982 _mesa_geometric_layers(fb) > 0, i, surf_index);
983 } else {
984 brw->vtbl.emit_null_surface_state(brw, w, h, s,
985 &surf_offset[surf_index]);
986 }
987 }
988 } else {
989 const uint32_t surf_index = render_target_start;
990 brw->vtbl.emit_null_surface_state(brw, w, h, s,
991 &surf_offset[surf_index]);
992 }
993 }
994
995 static void
996 update_renderbuffer_surfaces(struct brw_context *brw)
997 {
998 const struct gl_context *ctx = &brw->ctx;
999
1000 /* _NEW_BUFFERS | _NEW_COLOR */
1001 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1002 brw_update_renderbuffer_surfaces(
1003 brw, fb,
1004 brw->wm.prog_data->binding_table.render_target_start,
1005 brw->wm.base.surf_offset);
1006 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1007 }
1008
1009 const struct brw_tracked_state brw_renderbuffer_surfaces = {
1010 .dirty = {
1011 .mesa = _NEW_BUFFERS |
1012 _NEW_COLOR,
1013 .brw = BRW_NEW_BATCH |
1014 BRW_NEW_BLORP |
1015 BRW_NEW_FS_PROG_DATA,
1016 },
1017 .emit = update_renderbuffer_surfaces,
1018 };
1019
1020 const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1021 .dirty = {
1022 .mesa = _NEW_BUFFERS,
1023 .brw = BRW_NEW_BATCH |
1024 BRW_NEW_BLORP,
1025 },
1026 .emit = update_renderbuffer_surfaces,
1027 };
1028
1029 static void
1030 update_renderbuffer_read_surfaces(struct brw_context *brw)
1031 {
1032 const struct gl_context *ctx = &brw->ctx;
1033
1034 /* BRW_NEW_FRAGMENT_PROGRAM */
1035 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1036 brw->fragment_program &&
1037 brw->fragment_program->Base.OutputsRead) {
1038 /* _NEW_BUFFERS */
1039 const struct gl_framebuffer *fb = ctx->DrawBuffer;
1040
1041 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1042 struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1043 const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1044 const unsigned surf_index =
1045 brw->wm.prog_data->binding_table.render_target_read_start + i;
1046 uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1047
1048 if (irb) {
1049 const unsigned format = brw->render_target_format[
1050 _mesa_get_render_format(ctx, intel_rb_format(irb))];
1051 assert(isl_format_supports_sampling(brw->intelScreen->devinfo,
1052 format));
1053
1054 /* Override the target of the texture if the render buffer is a
1055 * single slice of a 3D texture (since the minimum array element
1056 * field of the surface state structure is ignored by the sampler
1057 * unit for 3D textures on some hardware), or if the render buffer
1058 * is a 1D array (since shaders always provide the array index
1059 * coordinate at the Z component to avoid state-dependent
1060 * recompiles when changing the texture target of the
1061 * framebuffer).
1062 */
1063 const GLenum target =
1064 (irb->mt->target == GL_TEXTURE_3D &&
1065 irb->layer_count == 1) ? GL_TEXTURE_2D :
1066 irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1067 irb->mt->target;
1068
1069 /* intel_renderbuffer::mt_layer is expressed in sample units for
1070 * the UMS and CMS multisample layouts, but
1071 * intel_renderbuffer::layer_count is expressed in units of whole
1072 * logical layers regardless of the multisample layout.
1073 */
1074 const unsigned mt_layer_unit =
1075 (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1076 irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1077 MAX2(irb->mt->num_samples, 1) : 1;
1078
1079 const struct isl_view view = {
1080 .format = format,
1081 .base_level = irb->mt_level - irb->mt->first_level,
1082 .levels = 1,
1083 .base_array_layer = irb->mt_layer / mt_layer_unit,
1084 .array_len = irb->layer_count,
1085 .channel_select = {
1086 ISL_CHANNEL_SELECT_RED,
1087 ISL_CHANNEL_SELECT_GREEN,
1088 ISL_CHANNEL_SELECT_BLUE,
1089 ISL_CHANNEL_SELECT_ALPHA,
1090 },
1091 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1092 };
1093
1094 brw_emit_surface_state(brw, irb->mt, target, view,
1095 surface_state_infos[brw->gen].tex_mocs,
1096 surf_offset, surf_index,
1097 I915_GEM_DOMAIN_SAMPLER, 0);
1098
1099 } else {
1100 brw->vtbl.emit_null_surface_state(
1101 brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1102 _mesa_geometric_samples(fb), surf_offset);
1103 }
1104 }
1105
1106 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1107 }
1108 }
1109
1110 const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1111 .dirty = {
1112 .mesa = _NEW_BUFFERS,
1113 .brw = BRW_NEW_BATCH |
1114 BRW_NEW_FRAGMENT_PROGRAM,
1115 },
1116 .emit = update_renderbuffer_read_surfaces,
1117 };
1118
1119 static void
1120 update_stage_texture_surfaces(struct brw_context *brw,
1121 const struct gl_program *prog,
1122 struct brw_stage_state *stage_state,
1123 bool for_gather, uint32_t plane)
1124 {
1125 if (!prog)
1126 return;
1127
1128 struct gl_context *ctx = &brw->ctx;
1129
1130 uint32_t *surf_offset = stage_state->surf_offset;
1131
1132 /* BRW_NEW_*_PROG_DATA */
1133 if (for_gather)
1134 surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1135 else
1136 surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1137
1138 unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1139 for (unsigned s = 0; s < num_samplers; s++) {
1140 surf_offset[s] = 0;
1141
1142 if (prog->SamplersUsed & (1 << s)) {
1143 const unsigned unit = prog->SamplerUnits[s];
1144
1145 /* _NEW_TEXTURE */
1146 if (ctx->Texture.Unit[unit]._Current) {
1147 brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1148 }
1149 }
1150 }
1151 }
1152
1153
1154 /**
1155 * Construct SURFACE_STATE objects for enabled textures.
1156 */
1157 static void
1158 brw_update_texture_surfaces(struct brw_context *brw)
1159 {
1160 /* BRW_NEW_VERTEX_PROGRAM */
1161 struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1162
1163 /* BRW_NEW_TESS_PROGRAMS */
1164 struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1165 struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1166
1167 /* BRW_NEW_GEOMETRY_PROGRAM */
1168 struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1169
1170 /* BRW_NEW_FRAGMENT_PROGRAM */
1171 struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1172
1173 /* _NEW_TEXTURE */
1174 update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1175 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1176 update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1177 update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1178 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1179
1180 /* emit alternate set of surface state for gather. this
1181 * allows the surface format to be overriden for only the
1182 * gather4 messages. */
1183 if (brw->gen < 8) {
1184 if (vs && vs->UsesGather)
1185 update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1186 if (tcs && tcs->UsesGather)
1187 update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1188 if (tes && tes->UsesGather)
1189 update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1190 if (gs && gs->UsesGather)
1191 update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1192 if (fs && fs->UsesGather)
1193 update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1194 }
1195
1196 if (fs) {
1197 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1198 update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1199 }
1200
1201 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1202 }
1203
1204 const struct brw_tracked_state brw_texture_surfaces = {
1205 .dirty = {
1206 .mesa = _NEW_TEXTURE,
1207 .brw = BRW_NEW_BATCH |
1208 BRW_NEW_BLORP |
1209 BRW_NEW_FRAGMENT_PROGRAM |
1210 BRW_NEW_FS_PROG_DATA |
1211 BRW_NEW_GEOMETRY_PROGRAM |
1212 BRW_NEW_GS_PROG_DATA |
1213 BRW_NEW_TESS_PROGRAMS |
1214 BRW_NEW_TCS_PROG_DATA |
1215 BRW_NEW_TES_PROG_DATA |
1216 BRW_NEW_TEXTURE_BUFFER |
1217 BRW_NEW_VERTEX_PROGRAM |
1218 BRW_NEW_VS_PROG_DATA,
1219 },
1220 .emit = brw_update_texture_surfaces,
1221 };
1222
1223 static void
1224 brw_update_cs_texture_surfaces(struct brw_context *brw)
1225 {
1226 /* BRW_NEW_COMPUTE_PROGRAM */
1227 struct gl_program *cs = (struct gl_program *) brw->compute_program;
1228
1229 /* _NEW_TEXTURE */
1230 update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1231
1232 /* emit alternate set of surface state for gather. this
1233 * allows the surface format to be overriden for only the
1234 * gather4 messages.
1235 */
1236 if (brw->gen < 8) {
1237 if (cs && cs->UsesGather)
1238 update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1239 }
1240
1241 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1242 }
1243
1244 const struct brw_tracked_state brw_cs_texture_surfaces = {
1245 .dirty = {
1246 .mesa = _NEW_TEXTURE,
1247 .brw = BRW_NEW_BATCH |
1248 BRW_NEW_BLORP |
1249 BRW_NEW_COMPUTE_PROGRAM,
1250 },
1251 .emit = brw_update_cs_texture_surfaces,
1252 };
1253
1254
1255 void
1256 brw_upload_ubo_surfaces(struct brw_context *brw,
1257 struct gl_linked_shader *shader,
1258 struct brw_stage_state *stage_state,
1259 struct brw_stage_prog_data *prog_data)
1260 {
1261 struct gl_context *ctx = &brw->ctx;
1262
1263 if (!shader)
1264 return;
1265
1266 uint32_t *ubo_surf_offsets =
1267 &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1268
1269 for (int i = 0; i < shader->NumUniformBlocks; i++) {
1270 struct gl_uniform_buffer_binding *binding =
1271 &ctx->UniformBufferBindings[shader->UniformBlocks[i]->Binding];
1272
1273 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1274 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1275 } else {
1276 struct intel_buffer_object *intel_bo =
1277 intel_buffer_object(binding->BufferObject);
1278 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1279 if (!binding->AutomaticSize)
1280 size = MIN2(size, binding->Size);
1281 drm_intel_bo *bo =
1282 intel_bufferobj_buffer(brw, intel_bo,
1283 binding->Offset,
1284 size);
1285 brw_create_constant_surface(brw, bo, binding->Offset,
1286 size,
1287 &ubo_surf_offsets[i]);
1288 }
1289 }
1290
1291 uint32_t *ssbo_surf_offsets =
1292 &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1293
1294 for (int i = 0; i < shader->NumShaderStorageBlocks; i++) {
1295 struct gl_shader_storage_buffer_binding *binding =
1296 &ctx->ShaderStorageBufferBindings[shader->ShaderStorageBlocks[i]->Binding];
1297
1298 if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1299 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1300 } else {
1301 struct intel_buffer_object *intel_bo =
1302 intel_buffer_object(binding->BufferObject);
1303 GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1304 if (!binding->AutomaticSize)
1305 size = MIN2(size, binding->Size);
1306 drm_intel_bo *bo =
1307 intel_bufferobj_buffer(brw, intel_bo,
1308 binding->Offset,
1309 size);
1310 brw_create_buffer_surface(brw, bo, binding->Offset,
1311 size,
1312 &ssbo_surf_offsets[i]);
1313 }
1314 }
1315
1316 if (shader->NumUniformBlocks || shader->NumShaderStorageBlocks)
1317 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1318 }
1319
1320 static void
1321 brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1322 {
1323 struct gl_context *ctx = &brw->ctx;
1324 /* _NEW_PROGRAM */
1325 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1326
1327 if (!prog)
1328 return;
1329
1330 /* BRW_NEW_FS_PROG_DATA */
1331 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1332 &brw->wm.base, &brw->wm.prog_data->base);
1333 }
1334
1335 const struct brw_tracked_state brw_wm_ubo_surfaces = {
1336 .dirty = {
1337 .mesa = _NEW_PROGRAM,
1338 .brw = BRW_NEW_BATCH |
1339 BRW_NEW_BLORP |
1340 BRW_NEW_FS_PROG_DATA |
1341 BRW_NEW_UNIFORM_BUFFER,
1342 },
1343 .emit = brw_upload_wm_ubo_surfaces,
1344 };
1345
1346 static void
1347 brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1348 {
1349 struct gl_context *ctx = &brw->ctx;
1350 /* _NEW_PROGRAM */
1351 struct gl_shader_program *prog =
1352 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1353
1354 if (!prog)
1355 return;
1356
1357 /* BRW_NEW_CS_PROG_DATA */
1358 brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1359 &brw->cs.base, &brw->cs.prog_data->base);
1360 }
1361
1362 const struct brw_tracked_state brw_cs_ubo_surfaces = {
1363 .dirty = {
1364 .mesa = _NEW_PROGRAM,
1365 .brw = BRW_NEW_BATCH |
1366 BRW_NEW_BLORP |
1367 BRW_NEW_CS_PROG_DATA |
1368 BRW_NEW_UNIFORM_BUFFER,
1369 },
1370 .emit = brw_upload_cs_ubo_surfaces,
1371 };
1372
1373 void
1374 brw_upload_abo_surfaces(struct brw_context *brw,
1375 struct gl_linked_shader *shader,
1376 struct brw_stage_state *stage_state,
1377 struct brw_stage_prog_data *prog_data)
1378 {
1379 struct gl_context *ctx = &brw->ctx;
1380 uint32_t *surf_offsets =
1381 &stage_state->surf_offset[prog_data->binding_table.abo_start];
1382
1383 if (shader && shader->NumAtomicBuffers) {
1384 for (unsigned i = 0; i < shader->NumAtomicBuffers; i++) {
1385 struct gl_atomic_buffer_binding *binding =
1386 &ctx->AtomicBufferBindings[shader->AtomicBuffers[i]->Binding];
1387 struct intel_buffer_object *intel_bo =
1388 intel_buffer_object(binding->BufferObject);
1389 drm_intel_bo *bo = intel_bufferobj_buffer(
1390 brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1391
1392 brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1393 binding->Offset, BRW_SURFACEFORMAT_RAW,
1394 bo->size - binding->Offset, 1, true);
1395 }
1396
1397 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1398 }
1399 }
1400
1401 static void
1402 brw_upload_wm_abo_surfaces(struct brw_context *brw)
1403 {
1404 struct gl_context *ctx = &brw->ctx;
1405 /* _NEW_PROGRAM */
1406 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1407
1408 if (prog) {
1409 /* BRW_NEW_FS_PROG_DATA */
1410 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1411 &brw->wm.base, &brw->wm.prog_data->base);
1412 }
1413 }
1414
1415 const struct brw_tracked_state brw_wm_abo_surfaces = {
1416 .dirty = {
1417 .mesa = _NEW_PROGRAM,
1418 .brw = BRW_NEW_ATOMIC_BUFFER |
1419 BRW_NEW_BLORP |
1420 BRW_NEW_BATCH |
1421 BRW_NEW_FS_PROG_DATA,
1422 },
1423 .emit = brw_upload_wm_abo_surfaces,
1424 };
1425
1426 static void
1427 brw_upload_cs_abo_surfaces(struct brw_context *brw)
1428 {
1429 struct gl_context *ctx = &brw->ctx;
1430 /* _NEW_PROGRAM */
1431 struct gl_shader_program *prog =
1432 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1433
1434 if (prog) {
1435 /* BRW_NEW_CS_PROG_DATA */
1436 brw_upload_abo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1437 &brw->cs.base, &brw->cs.prog_data->base);
1438 }
1439 }
1440
1441 const struct brw_tracked_state brw_cs_abo_surfaces = {
1442 .dirty = {
1443 .mesa = _NEW_PROGRAM,
1444 .brw = BRW_NEW_ATOMIC_BUFFER |
1445 BRW_NEW_BLORP |
1446 BRW_NEW_BATCH |
1447 BRW_NEW_CS_PROG_DATA,
1448 },
1449 .emit = brw_upload_cs_abo_surfaces,
1450 };
1451
1452 static void
1453 brw_upload_cs_image_surfaces(struct brw_context *brw)
1454 {
1455 struct gl_context *ctx = &brw->ctx;
1456 /* _NEW_PROGRAM */
1457 struct gl_shader_program *prog =
1458 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1459
1460 if (prog) {
1461 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1462 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE],
1463 &brw->cs.base, &brw->cs.prog_data->base);
1464 }
1465 }
1466
1467 const struct brw_tracked_state brw_cs_image_surfaces = {
1468 .dirty = {
1469 .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1470 .brw = BRW_NEW_BATCH |
1471 BRW_NEW_BLORP |
1472 BRW_NEW_CS_PROG_DATA |
1473 BRW_NEW_IMAGE_UNITS
1474 },
1475 .emit = brw_upload_cs_image_surfaces,
1476 };
1477
1478 static uint32_t
1479 get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1480 {
1481 const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
1482 uint32_t hw_format = brw_format_for_mesa_format(format);
1483 if (access == GL_WRITE_ONLY) {
1484 return hw_format;
1485 } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1486 /* Typed surface reads support a very limited subset of the shader
1487 * image formats. Translate it into the closest format the
1488 * hardware supports.
1489 */
1490 return isl_lower_storage_image_format(devinfo, hw_format);
1491 } else {
1492 /* The hardware doesn't actually support a typed format that we can use
1493 * so we have to fall back to untyped read/write messages.
1494 */
1495 return BRW_SURFACEFORMAT_RAW;
1496 }
1497 }
1498
1499 static void
1500 update_default_image_param(struct brw_context *brw,
1501 struct gl_image_unit *u,
1502 unsigned surface_idx,
1503 struct brw_image_param *param)
1504 {
1505 memset(param, 0, sizeof(*param));
1506 param->surface_idx = surface_idx;
1507 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1508 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1509 * detailed explanation of these parameters.
1510 */
1511 param->swizzling[0] = 0xff;
1512 param->swizzling[1] = 0xff;
1513 }
1514
1515 static void
1516 update_buffer_image_param(struct brw_context *brw,
1517 struct gl_image_unit *u,
1518 unsigned surface_idx,
1519 struct brw_image_param *param)
1520 {
1521 struct gl_buffer_object *obj = u->TexObj->BufferObject;
1522 const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1523 update_default_image_param(brw, u, surface_idx, param);
1524
1525 param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1526 param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1527 }
1528
1529 static void
1530 update_texture_image_param(struct brw_context *brw,
1531 struct gl_image_unit *u,
1532 unsigned surface_idx,
1533 struct brw_image_param *param)
1534 {
1535 struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1536
1537 update_default_image_param(brw, u, surface_idx, param);
1538
1539 param->size[0] = minify(mt->logical_width0, u->Level);
1540 param->size[1] = minify(mt->logical_height0, u->Level);
1541 param->size[2] = (!u->Layered ? 1 :
1542 u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1543 u->TexObj->Target == GL_TEXTURE_3D ?
1544 minify(mt->logical_depth0, u->Level) :
1545 mt->logical_depth0);
1546
1547 intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1548 &param->offset[0],
1549 &param->offset[1]);
1550
1551 param->stride[0] = mt->cpp;
1552 param->stride[1] = mt->pitch / mt->cpp;
1553 param->stride[2] =
1554 brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1555 param->stride[3] =
1556 brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1557
1558 if (mt->tiling == I915_TILING_X) {
1559 /* An X tile is a rectangular block of 512x8 bytes. */
1560 param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1561 param->tiling[1] = _mesa_logbase2(8);
1562
1563 if (brw->has_swizzling) {
1564 /* Right shifts required to swizzle bits 9 and 10 of the memory
1565 * address with bit 6.
1566 */
1567 param->swizzling[0] = 3;
1568 param->swizzling[1] = 4;
1569 }
1570 } else if (mt->tiling == I915_TILING_Y) {
1571 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1572 * different to the layout of an X-tiled surface, we simply pretend that
1573 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1574 * one arranged in X-major order just like is the case for X-tiling.
1575 */
1576 param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1577 param->tiling[1] = _mesa_logbase2(32);
1578
1579 if (brw->has_swizzling) {
1580 /* Right shift required to swizzle bit 9 of the memory address with
1581 * bit 6.
1582 */
1583 param->swizzling[0] = 3;
1584 }
1585 }
1586
1587 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1588 * address calculation algorithm (emit_address_calculation() in
1589 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1590 * modulus equal to the LOD.
1591 */
1592 param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1593 0);
1594 }
1595
1596 static void
1597 update_image_surface(struct brw_context *brw,
1598 struct gl_image_unit *u,
1599 GLenum access,
1600 unsigned surface_idx,
1601 uint32_t *surf_offset,
1602 struct brw_image_param *param)
1603 {
1604 if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1605 struct gl_texture_object *obj = u->TexObj;
1606 const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1607
1608 if (obj->Target == GL_TEXTURE_BUFFER) {
1609 struct intel_buffer_object *intel_obj =
1610 intel_buffer_object(obj->BufferObject);
1611 const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1612 _mesa_get_format_bytes(u->_ActualFormat));
1613
1614 brw_emit_buffer_surface_state(
1615 brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1616 format, intel_obj->Base.Size, texel_size,
1617 access != GL_READ_ONLY);
1618
1619 update_buffer_image_param(brw, u, surface_idx, param);
1620
1621 } else {
1622 struct intel_texture_object *intel_obj = intel_texture_object(obj);
1623 struct intel_mipmap_tree *mt = intel_obj->mt;
1624
1625 if (format == BRW_SURFACEFORMAT_RAW) {
1626 brw_emit_buffer_surface_state(
1627 brw, surf_offset, mt->bo, mt->offset,
1628 format, mt->bo->size - mt->offset, 1 /* pitch */,
1629 access != GL_READ_ONLY);
1630
1631 } else {
1632 const unsigned num_layers = (!u->Layered ? 1 :
1633 obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1634 mt->logical_depth0);
1635
1636 struct isl_view view = {
1637 .format = format,
1638 .base_level = obj->MinLevel + u->Level,
1639 .levels = 1,
1640 .base_array_layer = obj->MinLayer + u->_Layer,
1641 .array_len = num_layers,
1642 .channel_select = {
1643 ISL_CHANNEL_SELECT_RED,
1644 ISL_CHANNEL_SELECT_GREEN,
1645 ISL_CHANNEL_SELECT_BLUE,
1646 ISL_CHANNEL_SELECT_ALPHA,
1647 },
1648 .usage = ISL_SURF_USAGE_STORAGE_BIT,
1649 };
1650
1651 const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1652
1653 brw_emit_surface_state(brw, mt, mt->target, view,
1654 surface_state_infos[brw->gen].tex_mocs,
1655 surf_offset, surf_index,
1656 I915_GEM_DOMAIN_SAMPLER,
1657 access == GL_READ_ONLY ? 0 :
1658 I915_GEM_DOMAIN_SAMPLER);
1659 }
1660
1661 update_texture_image_param(brw, u, surface_idx, param);
1662 }
1663
1664 } else {
1665 brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1666 update_default_image_param(brw, u, surface_idx, param);
1667 }
1668 }
1669
1670 void
1671 brw_upload_image_surfaces(struct brw_context *brw,
1672 struct gl_linked_shader *shader,
1673 struct brw_stage_state *stage_state,
1674 struct brw_stage_prog_data *prog_data)
1675 {
1676 struct gl_context *ctx = &brw->ctx;
1677
1678 if (shader && shader->NumImages) {
1679 for (unsigned i = 0; i < shader->NumImages; i++) {
1680 struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[i]];
1681 const unsigned surf_idx = prog_data->binding_table.image_start + i;
1682
1683 update_image_surface(brw, u, shader->ImageAccess[i],
1684 surf_idx,
1685 &stage_state->surf_offset[surf_idx],
1686 &prog_data->image_param[i]);
1687 }
1688
1689 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1690 /* This may have changed the image metadata dependent on the context
1691 * image unit state and passed to the program as uniforms, make sure
1692 * that push and pull constants are reuploaded.
1693 */
1694 brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1695 }
1696 }
1697
1698 static void
1699 brw_upload_wm_image_surfaces(struct brw_context *brw)
1700 {
1701 struct gl_context *ctx = &brw->ctx;
1702 /* BRW_NEW_FRAGMENT_PROGRAM */
1703 struct gl_shader_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1704
1705 if (prog) {
1706 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1707 brw_upload_image_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
1708 &brw->wm.base, &brw->wm.prog_data->base);
1709 }
1710 }
1711
1712 const struct brw_tracked_state brw_wm_image_surfaces = {
1713 .dirty = {
1714 .mesa = _NEW_TEXTURE,
1715 .brw = BRW_NEW_BATCH |
1716 BRW_NEW_BLORP |
1717 BRW_NEW_FRAGMENT_PROGRAM |
1718 BRW_NEW_FS_PROG_DATA |
1719 BRW_NEW_IMAGE_UNITS
1720 },
1721 .emit = brw_upload_wm_image_surfaces,
1722 };
1723
1724 void
1725 gen4_init_vtable_surface_functions(struct brw_context *brw)
1726 {
1727 brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1728 brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1729 }
1730
1731 void
1732 gen6_init_vtable_surface_functions(struct brw_context *brw)
1733 {
1734 gen4_init_vtable_surface_functions(brw);
1735 brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1736 }
1737
1738 static void
1739 brw_upload_cs_work_groups_surface(struct brw_context *brw)
1740 {
1741 struct gl_context *ctx = &brw->ctx;
1742 /* _NEW_PROGRAM */
1743 struct gl_shader_program *prog =
1744 ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1745
1746 if (prog && brw->cs.prog_data->uses_num_work_groups) {
1747 const unsigned surf_idx =
1748 brw->cs.prog_data->binding_table.work_groups_start;
1749 uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1750 drm_intel_bo *bo;
1751 uint32_t bo_offset;
1752
1753 if (brw->compute.num_work_groups_bo == NULL) {
1754 bo = NULL;
1755 intel_upload_data(brw,
1756 (void *)brw->compute.num_work_groups,
1757 3 * sizeof(GLuint),
1758 sizeof(GLuint),
1759 &bo,
1760 &bo_offset);
1761 } else {
1762 bo = brw->compute.num_work_groups_bo;
1763 bo_offset = brw->compute.num_work_groups_offset;
1764 }
1765
1766 brw_emit_buffer_surface_state(brw, surf_offset,
1767 bo, bo_offset,
1768 BRW_SURFACEFORMAT_RAW,
1769 3 * sizeof(GLuint), 1, true);
1770 brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1771 }
1772 }
1773
1774 const struct brw_tracked_state brw_cs_work_groups_surface = {
1775 .dirty = {
1776 .brw = BRW_NEW_BLORP |
1777 BRW_NEW_CS_WORK_GROUPS
1778 },
1779 .emit = brw_upload_cs_work_groups_surface,
1780 };