2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
59 INTEL_RENDERBUFFER_LAYERED
= 1 << 0,
60 INTEL_AUX_BUFFER_DISABLED
= 1 << 1,
63 uint32_t tex_mocs
[] = {
69 uint32_t rb_mocs
[] = {
76 brw_emit_surface_state(struct brw_context
*brw
,
77 struct intel_mipmap_tree
*mt
, uint32_t flags
,
78 GLenum target
, struct isl_view view
,
79 uint32_t mocs
, uint32_t *surf_offset
, int surf_index
,
80 unsigned read_domains
, unsigned write_domains
)
82 uint32_t tile_x
= mt
->level
[0].slice
[0].x_offset
;
83 uint32_t tile_y
= mt
->level
[0].slice
[0].y_offset
;
84 uint32_t offset
= mt
->offset
;
87 intel_miptree_get_isl_surf(brw
, mt
, &surf
);
89 surf
.dim
= get_isl_surf_dim(target
);
91 const enum isl_dim_layout dim_layout
=
92 get_isl_dim_layout(&brw
->screen
->devinfo
, mt
->tiling
, target
);
94 if (surf
.dim_layout
!= dim_layout
) {
95 /* The layout of the specified texture target is not compatible with the
96 * actual layout of the miptree structure in memory -- You're entering
97 * dangerous territory, this can only possibly work if you only intended
98 * to access a single level and slice of the texture, and the hardware
99 * supports the tile offset feature in order to allow non-tile-aligned
100 * base offsets, since we'll have to point the hardware to the first
101 * texel of the level instead of relying on the usual base level/layer
104 assert(brw
->has_surface_tile_offset
);
105 assert(view
.levels
== 1 && view
.array_len
== 1);
106 assert(tile_x
== 0 && tile_y
== 0);
108 offset
+= intel_miptree_get_tile_offsets(mt
, view
.base_level
,
109 view
.base_array_layer
,
112 /* Minify the logical dimensions of the texture. */
113 const unsigned l
= view
.base_level
- mt
->first_level
;
114 surf
.logical_level0_px
.width
= minify(surf
.logical_level0_px
.width
, l
);
115 surf
.logical_level0_px
.height
= surf
.dim
<= ISL_SURF_DIM_1D
? 1 :
116 minify(surf
.logical_level0_px
.height
, l
);
117 surf
.logical_level0_px
.depth
= surf
.dim
<= ISL_SURF_DIM_2D
? 1 :
118 minify(surf
.logical_level0_px
.depth
, l
);
120 /* Only the base level and layer can be addressed with the overridden
123 surf
.logical_level0_px
.array_len
= 1;
125 surf
.dim_layout
= dim_layout
;
127 /* The requested slice of the texture is now at the base level and
131 view
.base_array_layer
= 0;
134 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
136 struct brw_bo
*aux_bo
;
137 struct isl_surf
*aux_surf
= NULL
, aux_surf_s
;
138 uint64_t aux_offset
= 0;
139 enum isl_aux_usage aux_usage
= ISL_AUX_USAGE_NONE
;
140 if ((mt
->mcs_buf
|| intel_miptree_sample_with_hiz(brw
, mt
)) &&
141 !(flags
& INTEL_AUX_BUFFER_DISABLED
)) {
142 intel_miptree_get_aux_isl_surf(brw
, mt
, &aux_surf_s
, &aux_usage
);
143 aux_surf
= &aux_surf_s
;
146 aux_bo
= mt
->mcs_buf
->bo
;
147 aux_offset
= mt
->mcs_buf
->bo
->offset64
+ mt
->mcs_buf
->offset
;
149 aux_bo
= mt
->hiz_buf
->aux_base
.bo
;
150 aux_offset
= mt
->hiz_buf
->aux_base
.bo
->offset64
;
153 /* We only really need a clear color if we also have an auxiliary
154 * surface. Without one, it does nothing.
156 clear_color
= intel_miptree_get_isl_clear_color(brw
, mt
);
159 void *state
= brw_state_batch(brw
,
160 brw
->isl_dev
.ss
.size
,
161 brw
->isl_dev
.ss
.align
,
164 isl_surf_fill_state(&brw
->isl_dev
, state
, .surf
= &surf
, .view
= &view
,
165 .address
= mt
->bo
->offset64
+ offset
,
166 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
167 .aux_address
= aux_offset
,
168 .mocs
= mocs
, .clear_color
= clear_color
,
169 .x_offset_sa
= tile_x
, .y_offset_sa
= tile_y
);
171 brw_emit_reloc(&brw
->batch
, *surf_offset
+ brw
->isl_dev
.ss
.addr_offset
,
172 mt
->bo
, offset
, read_domains
, write_domains
);
175 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
176 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
177 * contain other control information. Since buffer addresses are always
178 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
179 * an ordinary reloc to do the necessary address translation.
181 assert((aux_offset
& 0xfff) == 0);
182 uint32_t *aux_addr
= state
+ brw
->isl_dev
.ss
.aux_addr_offset
;
183 brw_emit_reloc(&brw
->batch
,
184 *surf_offset
+ brw
->isl_dev
.ss
.aux_addr_offset
,
185 aux_bo
, *aux_addr
- aux_bo
->offset64
,
186 read_domains
, write_domains
);
191 brw_update_renderbuffer_surface(struct brw_context
*brw
,
192 struct gl_renderbuffer
*rb
,
193 uint32_t flags
, unsigned unit
/* unused */,
196 struct gl_context
*ctx
= &brw
->ctx
;
197 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
198 struct intel_mipmap_tree
*mt
= irb
->mt
;
201 assert(!(flags
& INTEL_AUX_BUFFER_DISABLED
));
204 assert(brw_render_target_supported(brw
, rb
));
206 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
207 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
208 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
209 __func__
, _mesa_get_format_name(rb_format
));
212 const unsigned layer_multiplier
=
213 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
214 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
215 MAX2(irb
->mt
->num_samples
, 1) : 1;
217 struct isl_view view
= {
218 .format
= brw
->render_target_format
[rb_format
],
219 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
221 .base_array_layer
= irb
->mt_layer
/ layer_multiplier
,
222 .array_len
= MAX2(irb
->layer_count
, 1),
223 .swizzle
= ISL_SWIZZLE_IDENTITY
,
224 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
228 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
231 I915_GEM_DOMAIN_RENDER
,
232 I915_GEM_DOMAIN_RENDER
);
237 translate_tex_target(GLenum target
)
241 case GL_TEXTURE_1D_ARRAY_EXT
:
242 return BRW_SURFACE_1D
;
244 case GL_TEXTURE_RECTANGLE_NV
:
245 return BRW_SURFACE_2D
;
248 case GL_TEXTURE_2D_ARRAY_EXT
:
249 case GL_TEXTURE_EXTERNAL_OES
:
250 case GL_TEXTURE_2D_MULTISAMPLE
:
251 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
252 return BRW_SURFACE_2D
;
255 return BRW_SURFACE_3D
;
257 case GL_TEXTURE_CUBE_MAP
:
258 case GL_TEXTURE_CUBE_MAP_ARRAY
:
259 return BRW_SURFACE_CUBE
;
262 unreachable("not reached");
267 brw_get_surface_tiling_bits(uint32_t tiling
)
271 return BRW_SURFACE_TILED
;
273 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
281 brw_get_surface_num_multisamples(unsigned num_samples
)
284 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
286 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
290 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
294 brw_get_texture_swizzle(const struct gl_context
*ctx
,
295 const struct gl_texture_object
*t
)
297 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
299 int swizzles
[SWIZZLE_NIL
+ 1] = {
309 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
310 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
311 GLenum depth_mode
= t
->DepthMode
;
313 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
314 * with depth component data specified with a sized internal format.
315 * Otherwise, it's left at the old default, GL_LUMINANCE.
317 if (_mesa_is_gles3(ctx
) &&
318 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
319 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
323 switch (depth_mode
) {
325 swizzles
[0] = SWIZZLE_ZERO
;
326 swizzles
[1] = SWIZZLE_ZERO
;
327 swizzles
[2] = SWIZZLE_ZERO
;
328 swizzles
[3] = SWIZZLE_X
;
331 swizzles
[0] = SWIZZLE_X
;
332 swizzles
[1] = SWIZZLE_X
;
333 swizzles
[2] = SWIZZLE_X
;
334 swizzles
[3] = SWIZZLE_ONE
;
337 swizzles
[0] = SWIZZLE_X
;
338 swizzles
[1] = SWIZZLE_X
;
339 swizzles
[2] = SWIZZLE_X
;
340 swizzles
[3] = SWIZZLE_X
;
343 swizzles
[0] = SWIZZLE_X
;
344 swizzles
[1] = SWIZZLE_ZERO
;
345 swizzles
[2] = SWIZZLE_ZERO
;
346 swizzles
[3] = SWIZZLE_ONE
;
351 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
353 /* If the texture's format is alpha-only, force R, G, and B to
354 * 0.0. Similarly, if the texture's format has no alpha channel,
355 * force the alpha value read to 1.0. This allows for the
356 * implementation to use an RGBA texture for any of these formats
357 * without leaking any unexpected values.
359 switch (img
->_BaseFormat
) {
361 swizzles
[0] = SWIZZLE_ZERO
;
362 swizzles
[1] = SWIZZLE_ZERO
;
363 swizzles
[2] = SWIZZLE_ZERO
;
366 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
367 swizzles
[0] = SWIZZLE_X
;
368 swizzles
[1] = SWIZZLE_X
;
369 swizzles
[2] = SWIZZLE_X
;
370 swizzles
[3] = SWIZZLE_ONE
;
373 case GL_LUMINANCE_ALPHA
:
374 if (datatype
== GL_SIGNED_NORMALIZED
) {
375 swizzles
[0] = SWIZZLE_X
;
376 swizzles
[1] = SWIZZLE_X
;
377 swizzles
[2] = SWIZZLE_X
;
378 swizzles
[3] = SWIZZLE_W
;
382 if (datatype
== GL_SIGNED_NORMALIZED
) {
383 swizzles
[0] = SWIZZLE_X
;
384 swizzles
[1] = SWIZZLE_X
;
385 swizzles
[2] = SWIZZLE_X
;
386 swizzles
[3] = SWIZZLE_X
;
392 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
393 swizzles
[3] = SWIZZLE_ONE
;
397 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
398 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
399 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
400 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
404 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
405 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
407 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
412 * which is simply adding 4 then modding by 8 (or anding with 7).
414 * We then may need to apply workarounds for textureGather hardware bugs.
417 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
419 unsigned scs
= (swizzle
+ 4) & 7;
421 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
425 brw_find_matching_rb(const struct gl_framebuffer
*fb
,
426 const struct intel_mipmap_tree
*mt
)
428 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
429 const struct intel_renderbuffer
*irb
=
430 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
432 if (irb
&& irb
->mt
== mt
)
436 return fb
->_NumColorDrawBuffers
;
440 brw_texture_view_sane(const struct brw_context
*brw
,
441 const struct intel_mipmap_tree
*mt
,
442 const struct isl_view
*view
)
444 /* There are special cases only for lossless compression. */
445 if (!intel_miptree_is_lossless_compressed(brw
, mt
))
448 if (isl_format_supports_ccs_e(&brw
->screen
->devinfo
, view
->format
))
451 /* Logic elsewhere needs to take care to resolve the color buffer prior
452 * to sampling it as non-compressed.
454 if (intel_miptree_has_color_unresolved(mt
, view
->base_level
, view
->levels
,
455 view
->base_array_layer
,
459 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
460 const unsigned rb_index
= brw_find_matching_rb(fb
, mt
);
462 if (rb_index
== fb
->_NumColorDrawBuffers
)
465 /* Underlying surface is compressed but it is sampled using a format that
466 * the sampling engine doesn't support as compressed. Compression must be
467 * disabled for both sampling engine and data port in case the same surface
468 * is used also as render target.
470 return brw
->draw_aux_buffer_disabled
[rb_index
];
474 brw_disable_aux_surface(const struct brw_context
*brw
,
475 const struct intel_mipmap_tree
*mt
,
476 const struct isl_view
*view
)
478 /* Nothing to disable. */
482 const bool is_unresolved
= intel_miptree_has_color_unresolved(
483 mt
, view
->base_level
, view
->levels
,
484 view
->base_array_layer
, view
->array_len
);
486 /* There are special cases only for lossless compression. */
487 if (!intel_miptree_is_lossless_compressed(brw
, mt
))
488 return !is_unresolved
;
490 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
491 const unsigned rb_index
= brw_find_matching_rb(fb
, mt
);
493 /* If we are drawing into this with compression enabled, then we must also
494 * enable compression when texturing from it regardless of
495 * fast_clear_state. If we don't then, after the first draw call with
496 * this setup, there will be data in the CCS which won't get picked up by
497 * subsequent texturing operations as required by ARB_texture_barrier.
498 * Since we don't want to re-emit the binding table or do a resolve
499 * operation every draw call, the easiest thing to do is just enable
500 * compression on the texturing side. This is completely safe to do
501 * since, if compressed texturing weren't allowed, we would have disabled
502 * compression of render targets in whatever_that_function_is_called().
504 if (rb_index
< fb
->_NumColorDrawBuffers
) {
505 if (brw
->draw_aux_buffer_disabled
[rb_index
]) {
506 assert(!is_unresolved
);
509 return brw
->draw_aux_buffer_disabled
[rb_index
];
512 return !is_unresolved
;
516 brw_update_texture_surface(struct gl_context
*ctx
,
518 uint32_t *surf_offset
,
522 struct brw_context
*brw
= brw_context(ctx
);
523 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
525 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
526 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
529 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
530 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
533 if (mt
->plane
[plane
- 1] == NULL
)
535 mt
= mt
->plane
[plane
- 1];
538 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
539 /* If this is a view with restricted NumLayers, then our effective depth
540 * is not just the miptree depth.
542 const unsigned view_num_layers
=
543 (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) ? obj
->NumLayers
:
546 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
547 * texturing functions that return a float, as our code generation always
548 * selects the .x channel (which would always be 0).
550 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
551 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
552 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
553 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
554 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
555 brw_get_texture_swizzle(&brw
->ctx
, obj
));
557 mesa_format mesa_fmt
= plane
== 0 ? intel_obj
->_Format
: mt
->format
;
558 unsigned format
= translate_tex_format(brw
, mesa_fmt
,
559 sampler
->sRGBDecode
);
561 /* Implement gen6 and gen7 gather work-around */
562 bool need_green_to_blue
= false;
564 if (brw
->gen
== 7 && (format
== ISL_FORMAT_R32G32_FLOAT
||
565 format
== ISL_FORMAT_R32G32_SINT
||
566 format
== ISL_FORMAT_R32G32_UINT
)) {
567 format
= ISL_FORMAT_R32G32_FLOAT_LD
;
568 need_green_to_blue
= brw
->is_haswell
;
569 } else if (brw
->gen
== 6) {
570 /* Sandybridge's gather4 message is broken for integer formats.
571 * To work around this, we pretend the surface is UNORM for
572 * 8 or 16-bit formats, and emit shader instructions to recover
573 * the real INT/UINT value. For 32-bit formats, we pretend
574 * the surface is FLOAT, and simply reinterpret the resulting
578 case ISL_FORMAT_R8_SINT
:
579 case ISL_FORMAT_R8_UINT
:
580 format
= ISL_FORMAT_R8_UNORM
;
583 case ISL_FORMAT_R16_SINT
:
584 case ISL_FORMAT_R16_UINT
:
585 format
= ISL_FORMAT_R16_UNORM
;
588 case ISL_FORMAT_R32_SINT
:
589 case ISL_FORMAT_R32_UINT
:
590 format
= ISL_FORMAT_R32_FLOAT
;
599 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
601 assert(mt
->r8stencil_mt
&& !mt
->stencil_mt
->r8stencil_needs_update
);
602 mt
= mt
->r8stencil_mt
;
606 format
= ISL_FORMAT_R8_UINT
;
607 } else if (brw
->gen
<= 7 && mt
->format
== MESA_FORMAT_S_UINT8
) {
608 assert(mt
->r8stencil_mt
&& !mt
->r8stencil_needs_update
);
609 mt
= mt
->r8stencil_mt
;
610 format
= ISL_FORMAT_R8_UINT
;
613 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
615 struct isl_view view
= {
617 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
618 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
619 .base_array_layer
= obj
->MinLayer
,
620 .array_len
= view_num_layers
,
622 .r
= swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
623 .g
= swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
624 .b
= swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
625 .a
= swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
627 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
630 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
631 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
632 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
634 assert(brw_texture_view_sane(brw
, mt
, &view
));
636 const int flags
= brw_disable_aux_surface(brw
, mt
, &view
) ?
637 INTEL_AUX_BUFFER_DISABLED
: 0;
638 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
640 surf_offset
, surf_index
,
641 I915_GEM_DOMAIN_SAMPLER
, 0);
646 brw_emit_buffer_surface_state(struct brw_context
*brw
,
647 uint32_t *out_offset
,
649 unsigned buffer_offset
,
650 unsigned surface_format
,
651 unsigned buffer_size
,
655 uint32_t *dw
= brw_state_batch(brw
,
656 brw
->isl_dev
.ss
.size
,
657 brw
->isl_dev
.ss
.align
,
660 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
661 .address
= (bo
? bo
->offset64
: 0) + buffer_offset
,
663 .format
= surface_format
,
665 .mocs
= tex_mocs
[brw
->gen
]);
668 brw_emit_reloc(&brw
->batch
, *out_offset
+ brw
->isl_dev
.ss
.addr_offset
,
670 I915_GEM_DOMAIN_SAMPLER
,
671 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
676 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
678 uint32_t *surf_offset
)
680 struct brw_context
*brw
= brw_context(ctx
);
681 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
682 struct intel_buffer_object
*intel_obj
=
683 intel_buffer_object(tObj
->BufferObject
);
684 uint32_t size
= tObj
->BufferSize
;
685 struct brw_bo
*bo
= NULL
;
686 mesa_format format
= tObj
->_BufferObjectFormat
;
687 uint32_t brw_format
= brw_isl_format_for_mesa_format(format
);
688 int texel_size
= _mesa_get_format_bytes(format
);
691 size
= MIN2(size
, intel_obj
->Base
.Size
);
692 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
695 /* The ARB_texture_buffer_specification says:
697 * "The number of texels in the buffer texture's texel array is given by
699 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
701 * where <buffer_size> is the size of the buffer object, in basic
702 * machine units and <components> and <base_type> are the element count
703 * and base data type for elements, as specified in Table X.1. The
704 * number of texels in the texel array is then clamped to the
705 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
707 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
708 * so that when ISL divides by stride to obtain the number of texels, that
709 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
711 size
= MIN2(size
, ctx
->Const
.MaxTextureBufferSize
* (unsigned) texel_size
);
713 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
714 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
715 _mesa_get_format_name(format
));
718 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
727 * Create the constant buffer surface. Vertex/fragment shader constants will be
728 * read from this buffer with Data Port Read instructions/messages.
731 brw_create_constant_surface(struct brw_context
*brw
,
735 uint32_t *out_offset
)
737 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
738 ISL_FORMAT_R32G32B32A32_FLOAT
,
743 * Create the buffer surface. Shader buffer variables will be
744 * read from / write to this buffer with Data Port Read/Write
745 * instructions/messages.
748 brw_create_buffer_surface(struct brw_context
*brw
,
752 uint32_t *out_offset
)
754 /* Use a raw surface so we can reuse existing untyped read/write/atomic
755 * messages. We need these specifically for the fragment shader since they
756 * include a pixel mask header that we need to ensure correct behavior
757 * with helper invocations, which cannot write to the buffer.
759 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
765 * Set up a binding table entry for use by stream output logic (transform
768 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
771 brw_update_sol_surface(struct brw_context
*brw
,
772 struct gl_buffer_object
*buffer_obj
,
773 uint32_t *out_offset
, unsigned num_vector_components
,
774 unsigned stride_dwords
, unsigned offset_dwords
)
776 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
777 uint32_t offset_bytes
= 4 * offset_dwords
;
778 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
780 buffer_obj
->Size
- offset_bytes
);
781 uint32_t *surf
= brw_state_batch(brw
, 6 * 4, 32, out_offset
);
782 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
783 size_t size_dwords
= buffer_obj
->Size
/ 4;
784 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
786 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
787 * too big to map using a single binding table entry?
789 assert((size_dwords
- offset_dwords
) / stride_dwords
790 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
792 if (size_dwords
> offset_dwords
+ num_vector_components
) {
793 /* There is room for at least 1 transform feedback output in the buffer.
794 * Compute the number of additional transform feedback outputs the
795 * buffer has room for.
797 buffer_size_minus_1
=
798 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
800 /* There isn't even room for a single transform feedback output in the
801 * buffer. We can't configure the binding table entry to prevent output
802 * entirely; we'll have to rely on the geometry shader to detect
803 * overflow. But to minimize the damage in case of a bug, set up the
804 * binding table entry to just allow a single output.
806 buffer_size_minus_1
= 0;
808 width
= buffer_size_minus_1
& 0x7f;
809 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
810 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
812 switch (num_vector_components
) {
814 surface_format
= ISL_FORMAT_R32_FLOAT
;
817 surface_format
= ISL_FORMAT_R32G32_FLOAT
;
820 surface_format
= ISL_FORMAT_R32G32B32_FLOAT
;
823 surface_format
= ISL_FORMAT_R32G32B32A32_FLOAT
;
826 unreachable("Invalid vector size for transform feedback output");
829 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
830 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
831 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
832 BRW_SURFACE_RC_READ_WRITE
;
833 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
834 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
835 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
836 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
837 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
841 /* Emit relocation to surface contents. */
842 brw_emit_reloc(&brw
->batch
, *out_offset
+ 4, bo
, offset_bytes
,
843 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
846 /* Creates a new WM constant buffer reflecting the current fragment program's
847 * constants, if needed by the fragment program.
849 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
853 brw_upload_wm_pull_constants(struct brw_context
*brw
)
855 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
856 /* BRW_NEW_FRAGMENT_PROGRAM */
857 struct brw_program
*fp
= (struct brw_program
*) brw
->fragment_program
;
858 /* BRW_NEW_FS_PROG_DATA */
859 struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
861 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
862 /* _NEW_PROGRAM_CONSTANTS */
863 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
,
864 stage_state
, prog_data
);
867 const struct brw_tracked_state brw_wm_pull_constants
= {
869 .mesa
= _NEW_PROGRAM_CONSTANTS
,
870 .brw
= BRW_NEW_BATCH
|
872 BRW_NEW_FRAGMENT_PROGRAM
|
873 BRW_NEW_FS_PROG_DATA
,
875 .emit
= brw_upload_wm_pull_constants
,
879 * Creates a null renderbuffer surface.
881 * This is used when the shader doesn't write to any color output. An FB
882 * write to target 0 will still be emitted, because that's how the thread is
883 * terminated (and computed depth is returned), so we need to have the
884 * hardware discard the target 0 color output..
887 brw_emit_null_surface_state(struct brw_context
*brw
,
891 uint32_t *out_offset
)
893 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
896 * A null surface will be used in instances where an actual surface is
897 * not bound. When a write message is generated to a null surface, no
898 * actual surface is written to. When a read message (including any
899 * sampling engine message) is generated to a null surface, the result
900 * is all zeros. Note that a null surface type is allowed to be used
901 * with all messages, even if it is not specificially indicated as
902 * supported. All of the remaining fields in surface state are ignored
903 * for null surfaces, with the following exceptions:
905 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
906 * depth buffer’s corresponding state for all render target surfaces,
909 * - Surface Format must be R8G8B8A8_UNORM.
911 unsigned surface_type
= BRW_SURFACE_NULL
;
912 struct brw_bo
*bo
= NULL
;
913 unsigned pitch_minus_1
= 0;
914 uint32_t multisampling_state
= 0;
915 uint32_t *surf
= brw_state_batch(brw
, 6 * 4, 32, out_offset
);
918 /* On Gen6, null render targets seem to cause GPU hangs when
919 * multisampling. So work around this problem by rendering into dummy
922 * To decrease the amount of memory needed by the workaround buffer, we
923 * set its pitch to 128 bytes (the width of a Y tile). This means that
924 * the amount of memory needed for the workaround buffer is
925 * (width_in_tiles + height_in_tiles - 1) tiles.
927 * Note that since the workaround buffer will be interpreted by the
928 * hardware as an interleaved multisampled buffer, we need to compute
929 * width_in_tiles and height_in_tiles by dividing the width and height
930 * by 16 rather than the normal Y-tile size of 32.
932 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
933 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
934 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
935 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
937 bo
= brw
->wm
.multisampled_null_render_target_bo
;
938 surface_type
= BRW_SURFACE_2D
;
940 multisampling_state
= brw_get_surface_num_multisamples(samples
);
943 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
944 ISL_FORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
946 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
947 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
948 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
949 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
951 surf
[1] = bo
? bo
->offset64
: 0;
952 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
953 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
955 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
958 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
960 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
961 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
962 surf
[4] = multisampling_state
;
966 brw_emit_reloc(&brw
->batch
, *out_offset
+ 4, bo
, 0,
967 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
972 * Sets up a surface state structure to point at the given region.
973 * While it is only used for the front/back buffer currently, it should be
974 * usable for further buffers when doing ARB_draw_buffer support.
977 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
978 struct gl_renderbuffer
*rb
,
979 uint32_t flags
, unsigned unit
,
982 struct gl_context
*ctx
= &brw
->ctx
;
983 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
984 struct intel_mipmap_tree
*mt
= irb
->mt
;
986 uint32_t tile_x
, tile_y
;
990 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
991 /* BRW_NEW_FS_PROG_DATA */
993 assert(!(flags
& INTEL_RENDERBUFFER_LAYERED
));
994 assert(!(flags
& INTEL_AUX_BUFFER_DISABLED
));
996 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
997 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
999 if (tile_x
!= 0 || tile_y
!= 0) {
1000 /* Original gen4 hardware couldn't draw to a non-tile-aligned
1001 * destination in a miptree unless you actually setup your renderbuffer
1002 * as a miptree and used the fragile lod/array_index/etc. controls to
1003 * select the image. So, instead, we just make a new single-level
1004 * miptree and render into that.
1006 intel_renderbuffer_move_to_temp(brw
, irb
, false);
1011 surf
= brw_state_batch(brw
, 6 * 4, 32, &offset
);
1013 format
= brw
->render_target_format
[rb_format
];
1014 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
1015 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
1016 __func__
, _mesa_get_format_name(rb_format
));
1019 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
1020 format
<< BRW_SURFACE_FORMAT_SHIFT
);
1023 assert(mt
->offset
% mt
->cpp
== 0);
1024 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
1025 mt
->bo
->offset64
+ mt
->offset
);
1027 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
1028 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
1030 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
1031 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
1033 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
1035 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
1036 /* Note that the low bits of these fields are missing, so
1037 * there's the possibility of getting in trouble.
1039 assert(tile_x
% 4 == 0);
1040 assert(tile_y
% 2 == 0);
1041 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
1042 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
1043 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
1047 if (!ctx
->Color
.ColorLogicOpEnabled
&& !ctx
->Color
._AdvancedBlendMode
&&
1048 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
1049 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
1051 if (!ctx
->Color
.ColorMask
[unit
][0])
1052 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
1053 if (!ctx
->Color
.ColorMask
[unit
][1])
1054 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
1055 if (!ctx
->Color
.ColorMask
[unit
][2])
1056 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
1058 /* As mentioned above, disable writes to the alpha component when the
1059 * renderbuffer is XRGB.
1061 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
1062 !ctx
->Color
.ColorMask
[unit
][3]) {
1063 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
1067 brw_emit_reloc(&brw
->batch
, offset
+ 4, mt
->bo
, surf
[1] - mt
->bo
->offset64
,
1068 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
1074 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1077 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
1078 const struct gl_framebuffer
*fb
,
1079 uint32_t render_target_start
,
1080 uint32_t *surf_offset
)
1083 const unsigned int w
= _mesa_geometric_width(fb
);
1084 const unsigned int h
= _mesa_geometric_height(fb
);
1085 const unsigned int s
= _mesa_geometric_samples(fb
);
1087 /* Update surfaces for drawing buffers */
1088 if (fb
->_NumColorDrawBuffers
>= 1) {
1089 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1090 const uint32_t surf_index
= render_target_start
+ i
;
1091 const int flags
= (_mesa_geometric_layers(fb
) > 0 ?
1092 INTEL_RENDERBUFFER_LAYERED
: 0) |
1093 (brw
->draw_aux_buffer_disabled
[i
] ?
1094 INTEL_AUX_BUFFER_DISABLED
: 0);
1096 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
1097 surf_offset
[surf_index
] =
1098 brw
->vtbl
.update_renderbuffer_surface(
1099 brw
, fb
->_ColorDrawBuffers
[i
], flags
, i
, surf_index
);
1101 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
1102 &surf_offset
[surf_index
]);
1106 const uint32_t surf_index
= render_target_start
;
1107 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
1108 &surf_offset
[surf_index
]);
1113 update_renderbuffer_surfaces(struct brw_context
*brw
)
1115 const struct gl_context
*ctx
= &brw
->ctx
;
1117 /* BRW_NEW_FS_PROG_DATA */
1118 const struct brw_wm_prog_data
*wm_prog_data
=
1119 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1121 /* _NEW_BUFFERS | _NEW_COLOR */
1122 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1123 brw_update_renderbuffer_surfaces(
1125 wm_prog_data
->binding_table
.render_target_start
,
1126 brw
->wm
.base
.surf_offset
);
1127 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1130 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
1132 .mesa
= _NEW_BUFFERS
|
1134 .brw
= BRW_NEW_BATCH
|
1136 BRW_NEW_FS_PROG_DATA
,
1138 .emit
= update_renderbuffer_surfaces
,
1141 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
1143 .mesa
= _NEW_BUFFERS
,
1144 .brw
= BRW_NEW_BATCH
|
1147 .emit
= update_renderbuffer_surfaces
,
1151 update_renderbuffer_read_surfaces(struct brw_context
*brw
)
1153 const struct gl_context
*ctx
= &brw
->ctx
;
1155 /* BRW_NEW_FS_PROG_DATA */
1156 const struct brw_wm_prog_data
*wm_prog_data
=
1157 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1159 /* BRW_NEW_FRAGMENT_PROGRAM */
1160 if (!ctx
->Extensions
.MESA_shader_framebuffer_fetch
&&
1161 brw
->fragment_program
&& brw
->fragment_program
->info
.outputs_read
) {
1163 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1165 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1166 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1167 const struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1168 const unsigned surf_index
=
1169 wm_prog_data
->binding_table
.render_target_read_start
+ i
;
1170 uint32_t *surf_offset
= &brw
->wm
.base
.surf_offset
[surf_index
];
1173 const unsigned format
= brw
->render_target_format
[
1174 _mesa_get_render_format(ctx
, intel_rb_format(irb
))];
1175 assert(isl_format_supports_sampling(&brw
->screen
->devinfo
,
1178 /* Override the target of the texture if the render buffer is a
1179 * single slice of a 3D texture (since the minimum array element
1180 * field of the surface state structure is ignored by the sampler
1181 * unit for 3D textures on some hardware), or if the render buffer
1182 * is a 1D array (since shaders always provide the array index
1183 * coordinate at the Z component to avoid state-dependent
1184 * recompiles when changing the texture target of the
1187 const GLenum target
=
1188 (irb
->mt
->target
== GL_TEXTURE_3D
&&
1189 irb
->layer_count
== 1) ? GL_TEXTURE_2D
:
1190 irb
->mt
->target
== GL_TEXTURE_1D_ARRAY
? GL_TEXTURE_2D_ARRAY
:
1193 /* intel_renderbuffer::mt_layer is expressed in sample units for
1194 * the UMS and CMS multisample layouts, but
1195 * intel_renderbuffer::layer_count is expressed in units of whole
1196 * logical layers regardless of the multisample layout.
1198 const unsigned mt_layer_unit
=
1199 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
1200 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
1201 MAX2(irb
->mt
->num_samples
, 1) : 1;
1203 const struct isl_view view
= {
1205 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
1207 .base_array_layer
= irb
->mt_layer
/ mt_layer_unit
,
1208 .array_len
= irb
->layer_count
,
1209 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1210 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
1213 const int flags
= brw
->draw_aux_buffer_disabled
[i
] ?
1214 INTEL_AUX_BUFFER_DISABLED
: 0;
1215 brw_emit_surface_state(brw
, irb
->mt
, flags
, target
, view
,
1217 surf_offset
, surf_index
,
1218 I915_GEM_DOMAIN_SAMPLER
, 0);
1221 brw
->vtbl
.emit_null_surface_state(
1222 brw
, _mesa_geometric_width(fb
), _mesa_geometric_height(fb
),
1223 _mesa_geometric_samples(fb
), surf_offset
);
1227 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1231 const struct brw_tracked_state brw_renderbuffer_read_surfaces
= {
1233 .mesa
= _NEW_BUFFERS
,
1234 .brw
= BRW_NEW_BATCH
|
1235 BRW_NEW_FRAGMENT_PROGRAM
|
1236 BRW_NEW_FS_PROG_DATA
,
1238 .emit
= update_renderbuffer_read_surfaces
,
1242 update_stage_texture_surfaces(struct brw_context
*brw
,
1243 const struct gl_program
*prog
,
1244 struct brw_stage_state
*stage_state
,
1245 bool for_gather
, uint32_t plane
)
1250 struct gl_context
*ctx
= &brw
->ctx
;
1252 uint32_t *surf_offset
= stage_state
->surf_offset
;
1254 /* BRW_NEW_*_PROG_DATA */
1256 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1258 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1260 unsigned num_samplers
= util_last_bit(prog
->SamplersUsed
);
1261 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1264 if (prog
->SamplersUsed
& (1 << s
)) {
1265 const unsigned unit
= prog
->SamplerUnits
[s
];
1268 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1269 brw_update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1277 * Construct SURFACE_STATE objects for enabled textures.
1280 brw_update_texture_surfaces(struct brw_context
*brw
)
1282 /* BRW_NEW_VERTEX_PROGRAM */
1283 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1285 /* BRW_NEW_TESS_PROGRAMS */
1286 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1287 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1289 /* BRW_NEW_GEOMETRY_PROGRAM */
1290 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1292 /* BRW_NEW_FRAGMENT_PROGRAM */
1293 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1296 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1297 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1298 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1299 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1300 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1302 /* emit alternate set of surface state for gather. this
1303 * allows the surface format to be overriden for only the
1304 * gather4 messages. */
1306 if (vs
&& vs
->nir
->info
->uses_texture_gather
)
1307 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1308 if (tcs
&& tcs
->nir
->info
->uses_texture_gather
)
1309 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1310 if (tes
&& tes
->nir
->info
->uses_texture_gather
)
1311 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1312 if (gs
&& gs
->nir
->info
->uses_texture_gather
)
1313 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1314 if (fs
&& fs
->nir
->info
->uses_texture_gather
)
1315 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1319 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1320 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1323 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1326 const struct brw_tracked_state brw_texture_surfaces
= {
1328 .mesa
= _NEW_TEXTURE
,
1329 .brw
= BRW_NEW_BATCH
|
1331 BRW_NEW_FRAGMENT_PROGRAM
|
1332 BRW_NEW_FS_PROG_DATA
|
1333 BRW_NEW_GEOMETRY_PROGRAM
|
1334 BRW_NEW_GS_PROG_DATA
|
1335 BRW_NEW_TESS_PROGRAMS
|
1336 BRW_NEW_TCS_PROG_DATA
|
1337 BRW_NEW_TES_PROG_DATA
|
1338 BRW_NEW_TEXTURE_BUFFER
|
1339 BRW_NEW_VERTEX_PROGRAM
|
1340 BRW_NEW_VS_PROG_DATA
,
1342 .emit
= brw_update_texture_surfaces
,
1346 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1348 /* BRW_NEW_COMPUTE_PROGRAM */
1349 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1352 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1354 /* emit alternate set of surface state for gather. this
1355 * allows the surface format to be overriden for only the
1359 if (cs
&& cs
->nir
->info
->uses_texture_gather
)
1360 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1363 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1366 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1368 .mesa
= _NEW_TEXTURE
,
1369 .brw
= BRW_NEW_BATCH
|
1371 BRW_NEW_COMPUTE_PROGRAM
,
1373 .emit
= brw_update_cs_texture_surfaces
,
1378 brw_upload_ubo_surfaces(struct brw_context
*brw
, struct gl_program
*prog
,
1379 struct brw_stage_state
*stage_state
,
1380 struct brw_stage_prog_data
*prog_data
)
1382 struct gl_context
*ctx
= &brw
->ctx
;
1387 uint32_t *ubo_surf_offsets
=
1388 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1390 for (int i
= 0; i
< prog
->info
.num_ubos
; i
++) {
1391 struct gl_uniform_buffer_binding
*binding
=
1392 &ctx
->UniformBufferBindings
[prog
->sh
.UniformBlocks
[i
]->Binding
];
1394 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1395 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
1397 struct intel_buffer_object
*intel_bo
=
1398 intel_buffer_object(binding
->BufferObject
);
1399 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1400 if (!binding
->AutomaticSize
)
1401 size
= MIN2(size
, binding
->Size
);
1403 intel_bufferobj_buffer(brw
, intel_bo
,
1406 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1408 &ubo_surf_offsets
[i
]);
1412 uint32_t *ssbo_surf_offsets
=
1413 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1415 for (int i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
1416 struct gl_shader_storage_buffer_binding
*binding
=
1417 &ctx
->ShaderStorageBufferBindings
[prog
->sh
.ShaderStorageBlocks
[i
]->Binding
];
1419 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1420 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1422 struct intel_buffer_object
*intel_bo
=
1423 intel_buffer_object(binding
->BufferObject
);
1424 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1425 if (!binding
->AutomaticSize
)
1426 size
= MIN2(size
, binding
->Size
);
1428 intel_bufferobj_buffer(brw
, intel_bo
,
1431 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1433 &ssbo_surf_offsets
[i
]);
1437 if (prog
->info
.num_ubos
|| prog
->info
.num_ssbos
)
1438 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1442 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1444 struct gl_context
*ctx
= &brw
->ctx
;
1446 struct gl_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1448 /* BRW_NEW_FS_PROG_DATA */
1449 brw_upload_ubo_surfaces(brw
, prog
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1452 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1454 .mesa
= _NEW_PROGRAM
,
1455 .brw
= BRW_NEW_BATCH
|
1457 BRW_NEW_FS_PROG_DATA
|
1458 BRW_NEW_UNIFORM_BUFFER
,
1460 .emit
= brw_upload_wm_ubo_surfaces
,
1464 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1466 struct gl_context
*ctx
= &brw
->ctx
;
1468 struct gl_program
*prog
=
1469 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1471 /* BRW_NEW_CS_PROG_DATA */
1472 brw_upload_ubo_surfaces(brw
, prog
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1475 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1477 .mesa
= _NEW_PROGRAM
,
1478 .brw
= BRW_NEW_BATCH
|
1480 BRW_NEW_CS_PROG_DATA
|
1481 BRW_NEW_UNIFORM_BUFFER
,
1483 .emit
= brw_upload_cs_ubo_surfaces
,
1487 brw_upload_abo_surfaces(struct brw_context
*brw
,
1488 const struct gl_program
*prog
,
1489 struct brw_stage_state
*stage_state
,
1490 struct brw_stage_prog_data
*prog_data
)
1492 struct gl_context
*ctx
= &brw
->ctx
;
1493 uint32_t *surf_offsets
=
1494 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1496 if (prog
->info
.num_abos
) {
1497 for (unsigned i
= 0; i
< prog
->info
.num_abos
; i
++) {
1498 struct gl_atomic_buffer_binding
*binding
=
1499 &ctx
->AtomicBufferBindings
[prog
->sh
.AtomicBuffers
[i
]->Binding
];
1500 struct intel_buffer_object
*intel_bo
=
1501 intel_buffer_object(binding
->BufferObject
);
1502 struct brw_bo
*bo
= intel_bufferobj_buffer(
1503 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1505 brw_emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1506 binding
->Offset
, ISL_FORMAT_RAW
,
1507 bo
->size
- binding
->Offset
, 1, true);
1510 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1515 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1518 const struct gl_program
*wm
= brw
->fragment_program
;
1521 /* BRW_NEW_FS_PROG_DATA */
1522 brw_upload_abo_surfaces(brw
, wm
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1526 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1528 .mesa
= _NEW_PROGRAM
,
1529 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1532 BRW_NEW_FS_PROG_DATA
,
1534 .emit
= brw_upload_wm_abo_surfaces
,
1538 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1541 const struct gl_program
*cp
= brw
->compute_program
;
1544 /* BRW_NEW_CS_PROG_DATA */
1545 brw_upload_abo_surfaces(brw
, cp
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1549 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1551 .mesa
= _NEW_PROGRAM
,
1552 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1555 BRW_NEW_CS_PROG_DATA
,
1557 .emit
= brw_upload_cs_abo_surfaces
,
1561 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1564 const struct gl_program
*cp
= brw
->compute_program
;
1567 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1568 brw_upload_image_surfaces(brw
, cp
, &brw
->cs
.base
,
1569 brw
->cs
.base
.prog_data
);
1573 const struct brw_tracked_state brw_cs_image_surfaces
= {
1575 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1576 .brw
= BRW_NEW_BATCH
|
1578 BRW_NEW_CS_PROG_DATA
|
1581 .emit
= brw_upload_cs_image_surfaces
,
1585 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1587 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1588 uint32_t hw_format
= brw_isl_format_for_mesa_format(format
);
1589 if (access
== GL_WRITE_ONLY
) {
1591 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1592 /* Typed surface reads support a very limited subset of the shader
1593 * image formats. Translate it into the closest format the
1594 * hardware supports.
1596 return isl_lower_storage_image_format(devinfo
, hw_format
);
1598 /* The hardware doesn't actually support a typed format that we can use
1599 * so we have to fall back to untyped read/write messages.
1601 return ISL_FORMAT_RAW
;
1606 update_default_image_param(struct brw_context
*brw
,
1607 struct gl_image_unit
*u
,
1608 unsigned surface_idx
,
1609 struct brw_image_param
*param
)
1611 memset(param
, 0, sizeof(*param
));
1612 param
->surface_idx
= surface_idx
;
1613 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1614 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1615 * detailed explanation of these parameters.
1617 param
->swizzling
[0] = 0xff;
1618 param
->swizzling
[1] = 0xff;
1622 update_buffer_image_param(struct brw_context
*brw
,
1623 struct gl_image_unit
*u
,
1624 unsigned surface_idx
,
1625 struct brw_image_param
*param
)
1627 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1628 const uint32_t size
= MIN2((uint32_t)u
->TexObj
->BufferSize
, obj
->Size
);
1629 update_default_image_param(brw
, u
, surface_idx
, param
);
1631 param
->size
[0] = size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1632 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1636 update_texture_image_param(struct brw_context
*brw
,
1637 struct gl_image_unit
*u
,
1638 unsigned surface_idx
,
1639 struct brw_image_param
*param
)
1641 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1643 update_default_image_param(brw
, u
, surface_idx
, param
);
1645 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1646 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1647 param
->size
[2] = (!u
->Layered
? 1 :
1648 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1649 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1650 minify(mt
->logical_depth0
, u
->Level
) :
1651 mt
->logical_depth0
);
1653 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1657 param
->stride
[0] = mt
->cpp
;
1658 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1660 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1662 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1664 if (mt
->tiling
== I915_TILING_X
) {
1665 /* An X tile is a rectangular block of 512x8 bytes. */
1666 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1667 param
->tiling
[1] = _mesa_logbase2(8);
1669 if (brw
->has_swizzling
) {
1670 /* Right shifts required to swizzle bits 9 and 10 of the memory
1671 * address with bit 6.
1673 param
->swizzling
[0] = 3;
1674 param
->swizzling
[1] = 4;
1676 } else if (mt
->tiling
== I915_TILING_Y
) {
1677 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1678 * different to the layout of an X-tiled surface, we simply pretend that
1679 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1680 * one arranged in X-major order just like is the case for X-tiling.
1682 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1683 param
->tiling
[1] = _mesa_logbase2(32);
1685 if (brw
->has_swizzling
) {
1686 /* Right shift required to swizzle bit 9 of the memory address with
1689 param
->swizzling
[0] = 3;
1693 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1694 * address calculation algorithm (emit_address_calculation() in
1695 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1696 * modulus equal to the LOD.
1698 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1703 update_image_surface(struct brw_context
*brw
,
1704 struct gl_image_unit
*u
,
1706 unsigned surface_idx
,
1707 uint32_t *surf_offset
,
1708 struct brw_image_param
*param
)
1710 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1711 struct gl_texture_object
*obj
= u
->TexObj
;
1712 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1714 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1715 struct intel_buffer_object
*intel_obj
=
1716 intel_buffer_object(obj
->BufferObject
);
1717 const unsigned texel_size
= (format
== ISL_FORMAT_RAW
? 1 :
1718 _mesa_get_format_bytes(u
->_ActualFormat
));
1720 brw_emit_buffer_surface_state(
1721 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1722 format
, intel_obj
->Base
.Size
, texel_size
,
1723 access
!= GL_READ_ONLY
);
1725 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1728 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1729 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1731 if (format
== ISL_FORMAT_RAW
) {
1732 brw_emit_buffer_surface_state(
1733 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1734 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1735 access
!= GL_READ_ONLY
);
1738 const unsigned num_layers
= (!u
->Layered
? 1 :
1739 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1740 mt
->logical_depth0
);
1742 struct isl_view view
= {
1744 .base_level
= obj
->MinLevel
+ u
->Level
,
1746 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1747 .array_len
= num_layers
,
1748 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1749 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1752 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1753 const bool unresolved
= intel_miptree_has_color_unresolved(
1754 mt
, view
.base_level
, view
.levels
,
1755 view
.base_array_layer
, view
.array_len
);
1756 const int flags
= unresolved
? 0 : INTEL_AUX_BUFFER_DISABLED
;
1757 brw_emit_surface_state(brw
, mt
, flags
, mt
->target
, view
,
1759 surf_offset
, surf_index
,
1760 I915_GEM_DOMAIN_SAMPLER
,
1761 access
== GL_READ_ONLY
? 0 :
1762 I915_GEM_DOMAIN_SAMPLER
);
1765 update_texture_image_param(brw
, u
, surface_idx
, param
);
1769 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1770 update_default_image_param(brw
, u
, surface_idx
, param
);
1775 brw_upload_image_surfaces(struct brw_context
*brw
,
1776 const struct gl_program
*prog
,
1777 struct brw_stage_state
*stage_state
,
1778 struct brw_stage_prog_data
*prog_data
)
1781 struct gl_context
*ctx
= &brw
->ctx
;
1783 if (prog
->info
.num_images
) {
1784 for (unsigned i
= 0; i
< prog
->info
.num_images
; i
++) {
1785 struct gl_image_unit
*u
= &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[i
]];
1786 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1788 update_image_surface(brw
, u
, prog
->sh
.ImageAccess
[i
],
1790 &stage_state
->surf_offset
[surf_idx
],
1791 &prog_data
->image_param
[i
]);
1794 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1795 /* This may have changed the image metadata dependent on the context
1796 * image unit state and passed to the program as uniforms, make sure
1797 * that push and pull constants are reuploaded.
1799 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1804 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1806 /* BRW_NEW_FRAGMENT_PROGRAM */
1807 const struct gl_program
*wm
= brw
->fragment_program
;
1810 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1811 brw_upload_image_surfaces(brw
, wm
, &brw
->wm
.base
,
1812 brw
->wm
.base
.prog_data
);
1816 const struct brw_tracked_state brw_wm_image_surfaces
= {
1818 .mesa
= _NEW_TEXTURE
,
1819 .brw
= BRW_NEW_BATCH
|
1821 BRW_NEW_FRAGMENT_PROGRAM
|
1822 BRW_NEW_FS_PROG_DATA
|
1825 .emit
= brw_upload_wm_image_surfaces
,
1829 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1831 brw
->vtbl
.update_renderbuffer_surface
= gen4_update_renderbuffer_surface
;
1832 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1836 gen6_init_vtable_surface_functions(struct brw_context
*brw
)
1838 gen4_init_vtable_surface_functions(brw
);
1839 brw
->vtbl
.update_renderbuffer_surface
= brw_update_renderbuffer_surface
;
1843 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1845 struct gl_context
*ctx
= &brw
->ctx
;
1847 struct gl_program
*prog
=
1848 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1849 /* BRW_NEW_CS_PROG_DATA */
1850 const struct brw_cs_prog_data
*cs_prog_data
=
1851 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
1853 if (prog
&& cs_prog_data
->uses_num_work_groups
) {
1854 const unsigned surf_idx
=
1855 cs_prog_data
->binding_table
.work_groups_start
;
1856 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1860 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1862 intel_upload_data(brw
,
1863 (void *)brw
->compute
.num_work_groups
,
1869 bo
= brw
->compute
.num_work_groups_bo
;
1870 bo_offset
= brw
->compute
.num_work_groups_offset
;
1873 brw_emit_buffer_surface_state(brw
, surf_offset
,
1876 3 * sizeof(GLuint
), 1, true);
1877 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1881 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1883 .brw
= BRW_NEW_BLORP
|
1884 BRW_NEW_CS_PROG_DATA
|
1885 BRW_NEW_CS_WORK_GROUPS
1887 .emit
= brw_upload_cs_work_groups_surface
,