2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "compiler/nir/nir.h"
34 #include "main/context.h"
35 #include "main/blend.h"
36 #include "main/mtypes.h"
37 #include "main/samplerobj.h"
38 #include "main/shaderimage.h"
39 #include "main/teximage.h"
40 #include "program/prog_parameter.h"
41 #include "program/prog_instruction.h"
42 #include "main/framebuffer.h"
43 #include "main/shaderapi.h"
47 #include "intel_mipmap_tree.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_tex.h"
50 #include "intel_fbo.h"
51 #include "intel_buffer_objects.h"
53 #include "brw_context.h"
54 #include "brw_state.h"
55 #include "brw_defines.h"
58 uint32_t tex_mocs
[] = {
65 uint32_t rb_mocs
[] = {
73 get_isl_surf(struct brw_context
*brw
, struct intel_mipmap_tree
*mt
,
74 GLenum target
, struct isl_view
*view
,
75 uint32_t *tile_x
, uint32_t *tile_y
,
76 uint32_t *offset
, struct isl_surf
*surf
)
80 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
81 const enum isl_dim_layout dim_layout
=
82 get_isl_dim_layout(devinfo
, mt
->surf
.tiling
, target
);
84 if (surf
->dim_layout
== dim_layout
)
87 /* The layout of the specified texture target is not compatible with the
88 * actual layout of the miptree structure in memory -- You're entering
89 * dangerous territory, this can only possibly work if you only intended
90 * to access a single level and slice of the texture, and the hardware
91 * supports the tile offset feature in order to allow non-tile-aligned
92 * base offsets, since we'll have to point the hardware to the first
93 * texel of the level instead of relying on the usual base level/layer
96 assert(devinfo
->has_surface_tile_offset
);
97 assert(view
->levels
== 1 && view
->array_len
== 1);
98 assert(*tile_x
== 0 && *tile_y
== 0);
100 *offset
+= intel_miptree_get_tile_offsets(mt
, view
->base_level
,
101 view
->base_array_layer
,
104 /* Minify the logical dimensions of the texture. */
105 const unsigned l
= view
->base_level
- mt
->first_level
;
106 surf
->logical_level0_px
.width
= minify(surf
->logical_level0_px
.width
, l
);
107 surf
->logical_level0_px
.height
= surf
->dim
<= ISL_SURF_DIM_1D
? 1 :
108 minify(surf
->logical_level0_px
.height
, l
);
109 surf
->logical_level0_px
.depth
= surf
->dim
<= ISL_SURF_DIM_2D
? 1 :
110 minify(surf
->logical_level0_px
.depth
, l
);
112 /* Only the base level and layer can be addressed with the overridden
115 surf
->logical_level0_px
.array_len
= 1;
117 surf
->dim_layout
= dim_layout
;
119 /* The requested slice of the texture is now at the base level and
122 view
->base_level
= 0;
123 view
->base_array_layer
= 0;
127 brw_emit_surface_state(struct brw_context
*brw
,
128 struct intel_mipmap_tree
*mt
,
129 GLenum target
, struct isl_view view
,
130 enum isl_aux_usage aux_usage
,
131 uint32_t mocs
, uint32_t *surf_offset
, int surf_index
,
132 unsigned reloc_flags
)
134 uint32_t tile_x
= mt
->level
[0].level_x
;
135 uint32_t tile_y
= mt
->level
[0].level_y
;
136 uint32_t offset
= mt
->offset
;
138 struct isl_surf surf
;
140 get_isl_surf(brw
, mt
, target
, &view
, &tile_x
, &tile_y
, &offset
, &surf
);
142 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
144 struct brw_bo
*aux_bo
;
145 struct isl_surf
*aux_surf
= NULL
;
146 uint64_t aux_offset
= 0;
148 case ISL_AUX_USAGE_MCS
:
149 case ISL_AUX_USAGE_CCS_D
:
150 case ISL_AUX_USAGE_CCS_E
:
151 aux_surf
= &mt
->mcs_buf
->surf
;
152 aux_bo
= mt
->mcs_buf
->bo
;
153 aux_offset
= mt
->mcs_buf
->offset
;
156 case ISL_AUX_USAGE_HIZ
:
157 aux_surf
= &mt
->hiz_buf
->surf
;
158 aux_bo
= mt
->hiz_buf
->bo
;
162 case ISL_AUX_USAGE_NONE
:
166 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
167 /* We only really need a clear color if we also have an auxiliary
168 * surface. Without one, it does nothing.
170 clear_color
= mt
->fast_clear_color
;
173 void *state
= brw_state_batch(brw
,
174 brw
->isl_dev
.ss
.size
,
175 brw
->isl_dev
.ss
.align
,
178 isl_surf_fill_state(&brw
->isl_dev
, state
, .surf
= &mt
->surf
, .view
= &view
,
179 .address
= brw_emit_reloc(&brw
->batch
,
180 *surf_offset
+ brw
->isl_dev
.ss
.addr_offset
,
181 mt
->bo
, offset
, reloc_flags
),
182 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
183 .aux_address
= aux_offset
,
184 .mocs
= mocs
, .clear_color
= clear_color
,
185 .x_offset_sa
= tile_x
, .y_offset_sa
= tile_y
);
187 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
188 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
189 * contain other control information. Since buffer addresses are always
190 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
191 * an ordinary reloc to do the necessary address translation.
193 * FIXME: move to the point of assignment.
195 assert((aux_offset
& 0xfff) == 0);
196 uint32_t *aux_addr
= state
+ brw
->isl_dev
.ss
.aux_addr_offset
;
197 *aux_addr
= brw_emit_reloc(&brw
->batch
,
199 brw
->isl_dev
.ss
.aux_addr_offset
,
206 gen6_update_renderbuffer_surface(struct brw_context
*brw
,
207 struct gl_renderbuffer
*rb
,
211 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
212 struct gl_context
*ctx
= &brw
->ctx
;
213 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
214 struct intel_mipmap_tree
*mt
= irb
->mt
;
216 enum isl_aux_usage aux_usage
=
217 brw
->draw_aux_buffer_disabled
[unit
] ? ISL_AUX_USAGE_NONE
:
218 intel_miptree_render_aux_usage(brw
, mt
, ctx
->Color
.sRGBEnabled
,
219 ctx
->Color
.BlendEnabled
& (1 << unit
));
221 assert(brw_render_target_supported(brw
, rb
));
223 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
224 if (unlikely(!brw
->mesa_format_supports_render
[rb_format
])) {
225 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
226 __func__
, _mesa_get_format_name(rb_format
));
229 struct isl_view view
= {
230 .format
= brw
->mesa_to_isl_render_format
[rb_format
],
231 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
233 .base_array_layer
= irb
->mt_layer
,
234 .array_len
= MAX2(irb
->layer_count
, 1),
235 .swizzle
= ISL_SWIZZLE_IDENTITY
,
236 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
240 brw_emit_surface_state(brw
, mt
, mt
->target
, view
, aux_usage
,
241 rb_mocs
[devinfo
->gen
],
248 translate_tex_target(GLenum target
)
252 case GL_TEXTURE_1D_ARRAY_EXT
:
253 return BRW_SURFACE_1D
;
255 case GL_TEXTURE_RECTANGLE_NV
:
256 return BRW_SURFACE_2D
;
259 case GL_TEXTURE_2D_ARRAY_EXT
:
260 case GL_TEXTURE_EXTERNAL_OES
:
261 case GL_TEXTURE_2D_MULTISAMPLE
:
262 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
263 return BRW_SURFACE_2D
;
266 return BRW_SURFACE_3D
;
268 case GL_TEXTURE_CUBE_MAP
:
269 case GL_TEXTURE_CUBE_MAP_ARRAY
:
270 return BRW_SURFACE_CUBE
;
273 unreachable("not reached");
278 brw_get_surface_tiling_bits(enum isl_tiling tiling
)
282 return BRW_SURFACE_TILED
;
284 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
292 brw_get_surface_num_multisamples(unsigned num_samples
)
295 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
297 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
301 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
305 brw_get_texture_swizzle(const struct gl_context
*ctx
,
306 const struct gl_texture_object
*t
)
308 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
310 int swizzles
[SWIZZLE_NIL
+ 1] = {
320 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
321 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
322 GLenum depth_mode
= t
->DepthMode
;
324 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
325 * with depth component data specified with a sized internal format.
326 * Otherwise, it's left at the old default, GL_LUMINANCE.
328 if (_mesa_is_gles3(ctx
) &&
329 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
330 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
334 switch (depth_mode
) {
336 swizzles
[0] = SWIZZLE_ZERO
;
337 swizzles
[1] = SWIZZLE_ZERO
;
338 swizzles
[2] = SWIZZLE_ZERO
;
339 swizzles
[3] = SWIZZLE_X
;
342 swizzles
[0] = SWIZZLE_X
;
343 swizzles
[1] = SWIZZLE_X
;
344 swizzles
[2] = SWIZZLE_X
;
345 swizzles
[3] = SWIZZLE_ONE
;
348 swizzles
[0] = SWIZZLE_X
;
349 swizzles
[1] = SWIZZLE_X
;
350 swizzles
[2] = SWIZZLE_X
;
351 swizzles
[3] = SWIZZLE_X
;
354 swizzles
[0] = SWIZZLE_X
;
355 swizzles
[1] = SWIZZLE_ZERO
;
356 swizzles
[2] = SWIZZLE_ZERO
;
357 swizzles
[3] = SWIZZLE_ONE
;
362 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
364 /* If the texture's format is alpha-only, force R, G, and B to
365 * 0.0. Similarly, if the texture's format has no alpha channel,
366 * force the alpha value read to 1.0. This allows for the
367 * implementation to use an RGBA texture for any of these formats
368 * without leaking any unexpected values.
370 switch (img
->_BaseFormat
) {
372 swizzles
[0] = SWIZZLE_ZERO
;
373 swizzles
[1] = SWIZZLE_ZERO
;
374 swizzles
[2] = SWIZZLE_ZERO
;
377 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
378 swizzles
[0] = SWIZZLE_X
;
379 swizzles
[1] = SWIZZLE_X
;
380 swizzles
[2] = SWIZZLE_X
;
381 swizzles
[3] = SWIZZLE_ONE
;
384 case GL_LUMINANCE_ALPHA
:
385 if (datatype
== GL_SIGNED_NORMALIZED
) {
386 swizzles
[0] = SWIZZLE_X
;
387 swizzles
[1] = SWIZZLE_X
;
388 swizzles
[2] = SWIZZLE_X
;
389 swizzles
[3] = SWIZZLE_W
;
393 if (datatype
== GL_SIGNED_NORMALIZED
) {
394 swizzles
[0] = SWIZZLE_X
;
395 swizzles
[1] = SWIZZLE_X
;
396 swizzles
[2] = SWIZZLE_X
;
397 swizzles
[3] = SWIZZLE_X
;
403 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0 ||
404 img
->TexFormat
== MESA_FORMAT_RGB_DXT1
||
405 img
->TexFormat
== MESA_FORMAT_SRGB_DXT1
)
406 swizzles
[3] = SWIZZLE_ONE
;
410 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
411 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
412 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
413 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
417 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
418 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
420 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
423 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
425 * which is simply adding 4 then modding by 8 (or anding with 7).
427 * We then may need to apply workarounds for textureGather hardware bugs.
430 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
432 unsigned scs
= (swizzle
+ 4) & 7;
434 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
438 brw_aux_surface_disabled(const struct brw_context
*brw
,
439 const struct intel_mipmap_tree
*mt
)
441 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
443 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
444 const struct intel_renderbuffer
*irb
=
445 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
447 if (irb
&& irb
->mt
== mt
)
448 return brw
->draw_aux_buffer_disabled
[i
];
455 brw_update_texture_surface(struct gl_context
*ctx
,
457 uint32_t *surf_offset
,
461 struct brw_context
*brw
= brw_context(ctx
);
462 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
463 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
465 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
466 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
469 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
470 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
473 if (mt
->plane
[plane
- 1] == NULL
)
475 mt
= mt
->plane
[plane
- 1];
478 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
479 /* If this is a view with restricted NumLayers, then our effective depth
480 * is not just the miptree depth.
482 unsigned view_num_layers
;
483 if (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) {
484 view_num_layers
= obj
->NumLayers
;
486 view_num_layers
= mt
->surf
.dim
== ISL_SURF_DIM_3D
?
487 mt
->surf
.logical_level0_px
.depth
:
488 mt
->surf
.logical_level0_px
.array_len
;
491 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
492 * texturing functions that return a float, as our code generation always
493 * selects the .x channel (which would always be 0).
495 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
496 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
497 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
498 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
499 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
500 brw_get_texture_swizzle(&brw
->ctx
, obj
));
502 mesa_format mesa_fmt
= plane
== 0 ? intel_obj
->_Format
: mt
->format
;
503 enum isl_format format
= translate_tex_format(brw
, mesa_fmt
,
504 sampler
->sRGBDecode
);
506 /* Implement gen6 and gen7 gather work-around */
507 bool need_green_to_blue
= false;
509 if (devinfo
->gen
== 7 && (format
== ISL_FORMAT_R32G32_FLOAT
||
510 format
== ISL_FORMAT_R32G32_SINT
||
511 format
== ISL_FORMAT_R32G32_UINT
)) {
512 format
= ISL_FORMAT_R32G32_FLOAT_LD
;
513 need_green_to_blue
= devinfo
->is_haswell
;
514 } else if (devinfo
->gen
== 6) {
515 /* Sandybridge's gather4 message is broken for integer formats.
516 * To work around this, we pretend the surface is UNORM for
517 * 8 or 16-bit formats, and emit shader instructions to recover
518 * the real INT/UINT value. For 32-bit formats, we pretend
519 * the surface is FLOAT, and simply reinterpret the resulting
523 case ISL_FORMAT_R8_SINT
:
524 case ISL_FORMAT_R8_UINT
:
525 format
= ISL_FORMAT_R8_UNORM
;
528 case ISL_FORMAT_R16_SINT
:
529 case ISL_FORMAT_R16_UINT
:
530 format
= ISL_FORMAT_R16_UNORM
;
533 case ISL_FORMAT_R32_SINT
:
534 case ISL_FORMAT_R32_UINT
:
535 format
= ISL_FORMAT_R32_FLOAT
;
544 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
545 if (devinfo
->gen
<= 7) {
546 assert(mt
->r8stencil_mt
&& !mt
->stencil_mt
->r8stencil_needs_update
);
547 mt
= mt
->r8stencil_mt
;
551 format
= ISL_FORMAT_R8_UINT
;
552 } else if (devinfo
->gen
<= 7 && mt
->format
== MESA_FORMAT_S_UINT8
) {
553 assert(mt
->r8stencil_mt
&& !mt
->r8stencil_needs_update
);
554 mt
= mt
->r8stencil_mt
;
555 format
= ISL_FORMAT_R8_UINT
;
558 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
560 struct isl_view view
= {
562 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
563 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
564 .base_array_layer
= obj
->MinLayer
,
565 .array_len
= view_num_layers
,
567 .r
= swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
568 .g
= swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
569 .b
= swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
570 .a
= swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
572 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
575 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
576 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
577 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
579 enum isl_aux_usage aux_usage
=
580 intel_miptree_texture_aux_usage(brw
, mt
, format
);
582 if (brw_aux_surface_disabled(brw
, mt
))
583 aux_usage
= ISL_AUX_USAGE_NONE
;
585 brw_emit_surface_state(brw
, mt
, mt
->target
, view
, aux_usage
,
586 tex_mocs
[devinfo
->gen
],
587 surf_offset
, surf_index
,
593 brw_emit_buffer_surface_state(struct brw_context
*brw
,
594 uint32_t *out_offset
,
596 unsigned buffer_offset
,
597 unsigned surface_format
,
598 unsigned buffer_size
,
600 unsigned reloc_flags
)
602 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
603 uint32_t *dw
= brw_state_batch(brw
,
604 brw
->isl_dev
.ss
.size
,
605 brw
->isl_dev
.ss
.align
,
608 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
609 .address
= !bo
? buffer_offset
:
610 brw_emit_reloc(&brw
->batch
,
611 *out_offset
+ brw
->isl_dev
.ss
.addr_offset
,
615 .format
= surface_format
,
617 .mocs
= tex_mocs
[devinfo
->gen
]);
621 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
623 uint32_t *surf_offset
)
625 struct brw_context
*brw
= brw_context(ctx
);
626 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
627 struct intel_buffer_object
*intel_obj
=
628 intel_buffer_object(tObj
->BufferObject
);
629 uint32_t size
= tObj
->BufferSize
;
630 struct brw_bo
*bo
= NULL
;
631 mesa_format format
= tObj
->_BufferObjectFormat
;
632 const enum isl_format isl_format
= brw_isl_format_for_mesa_format(format
);
633 int texel_size
= _mesa_get_format_bytes(format
);
636 size
= MIN2(size
, intel_obj
->Base
.Size
);
637 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
,
641 /* The ARB_texture_buffer_specification says:
643 * "The number of texels in the buffer texture's texel array is given by
645 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
647 * where <buffer_size> is the size of the buffer object, in basic
648 * machine units and <components> and <base_type> are the element count
649 * and base data type for elements, as specified in Table X.1. The
650 * number of texels in the texel array is then clamped to the
651 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
653 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
654 * so that when ISL divides by stride to obtain the number of texels, that
655 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
657 size
= MIN2(size
, ctx
->Const
.MaxTextureBufferSize
* (unsigned) texel_size
);
659 if (isl_format
== ISL_FORMAT_UNSUPPORTED
) {
660 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
661 _mesa_get_format_name(format
));
664 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
673 * Create the constant buffer surface. Vertex/fragment shader constants will be
674 * read from this buffer with Data Port Read instructions/messages.
677 brw_create_constant_surface(struct brw_context
*brw
,
681 uint32_t *out_offset
)
683 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
684 ISL_FORMAT_R32G32B32A32_FLOAT
,
689 * Create the buffer surface. Shader buffer variables will be
690 * read from / write to this buffer with Data Port Read/Write
691 * instructions/messages.
694 brw_create_buffer_surface(struct brw_context
*brw
,
698 uint32_t *out_offset
)
700 /* Use a raw surface so we can reuse existing untyped read/write/atomic
701 * messages. We need these specifically for the fragment shader since they
702 * include a pixel mask header that we need to ensure correct behavior
703 * with helper invocations, which cannot write to the buffer.
705 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
707 size
, 1, RELOC_WRITE
);
711 * Set up a binding table entry for use by stream output logic (transform
714 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
717 brw_update_sol_surface(struct brw_context
*brw
,
718 struct gl_buffer_object
*buffer_obj
,
719 uint32_t *out_offset
, unsigned num_vector_components
,
720 unsigned stride_dwords
, unsigned offset_dwords
)
722 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
723 uint32_t offset_bytes
= 4 * offset_dwords
;
724 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
726 buffer_obj
->Size
- offset_bytes
,
728 uint32_t *surf
= brw_state_batch(brw
, 6 * 4, 32, out_offset
);
729 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
730 size_t size_dwords
= buffer_obj
->Size
/ 4;
731 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
733 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
734 * too big to map using a single binding table entry?
736 assert((size_dwords
- offset_dwords
) / stride_dwords
737 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
739 if (size_dwords
> offset_dwords
+ num_vector_components
) {
740 /* There is room for at least 1 transform feedback output in the buffer.
741 * Compute the number of additional transform feedback outputs the
742 * buffer has room for.
744 buffer_size_minus_1
=
745 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
747 /* There isn't even room for a single transform feedback output in the
748 * buffer. We can't configure the binding table entry to prevent output
749 * entirely; we'll have to rely on the geometry shader to detect
750 * overflow. But to minimize the damage in case of a bug, set up the
751 * binding table entry to just allow a single output.
753 buffer_size_minus_1
= 0;
755 width
= buffer_size_minus_1
& 0x7f;
756 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
757 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
759 switch (num_vector_components
) {
761 surface_format
= ISL_FORMAT_R32_FLOAT
;
764 surface_format
= ISL_FORMAT_R32G32_FLOAT
;
767 surface_format
= ISL_FORMAT_R32G32B32_FLOAT
;
770 surface_format
= ISL_FORMAT_R32G32B32A32_FLOAT
;
773 unreachable("Invalid vector size for transform feedback output");
776 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
777 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
778 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
779 BRW_SURFACE_RC_READ_WRITE
;
780 surf
[1] = brw_emit_reloc(&brw
->batch
,
781 *out_offset
+ 4, bo
, offset_bytes
, RELOC_WRITE
);
782 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
783 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
784 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
785 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
790 /* Creates a new WM constant buffer reflecting the current fragment program's
791 * constants, if needed by the fragment program.
793 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
797 brw_upload_wm_pull_constants(struct brw_context
*brw
)
799 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
800 /* BRW_NEW_FRAGMENT_PROGRAM */
801 struct brw_program
*fp
= (struct brw_program
*) brw
->fragment_program
;
802 /* BRW_NEW_FS_PROG_DATA */
803 struct brw_stage_prog_data
*prog_data
= brw
->wm
.base
.prog_data
;
805 _mesa_shader_write_subroutine_indices(&brw
->ctx
, MESA_SHADER_FRAGMENT
);
806 /* _NEW_PROGRAM_CONSTANTS */
807 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
,
808 stage_state
, prog_data
);
811 const struct brw_tracked_state brw_wm_pull_constants
= {
813 .mesa
= _NEW_PROGRAM_CONSTANTS
,
814 .brw
= BRW_NEW_BATCH
|
815 BRW_NEW_FRAGMENT_PROGRAM
|
816 BRW_NEW_FS_PROG_DATA
,
818 .emit
= brw_upload_wm_pull_constants
,
822 * Creates a null renderbuffer surface.
824 * This is used when the shader doesn't write to any color output. An FB
825 * write to target 0 will still be emitted, because that's how the thread is
826 * terminated (and computed depth is returned), so we need to have the
827 * hardware discard the target 0 color output..
830 emit_null_surface_state(struct brw_context
*brw
,
831 const struct gl_framebuffer
*fb
,
832 uint32_t *out_offset
)
834 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
835 uint32_t *surf
= brw_state_batch(brw
,
836 brw
->isl_dev
.ss
.size
,
837 brw
->isl_dev
.ss
.align
,
840 /* Use the fb dimensions or 1x1x1 */
841 const unsigned width
= fb
? _mesa_geometric_width(fb
) : 1;
842 const unsigned height
= fb
? _mesa_geometric_height(fb
) : 1;
843 const unsigned samples
= fb
? _mesa_geometric_samples(fb
) : 1;
845 if (devinfo
->gen
!= 6 || samples
<= 1) {
846 isl_null_fill_state(&brw
->isl_dev
, surf
,
847 isl_extent3d(width
, height
, 1));
851 /* On Gen6, null render targets seem to cause GPU hangs when multisampling.
852 * So work around this problem by rendering into dummy color buffer.
854 * To decrease the amount of memory needed by the workaround buffer, we
855 * set its pitch to 128 bytes (the width of a Y tile). This means that
856 * the amount of memory needed for the workaround buffer is
857 * (width_in_tiles + height_in_tiles - 1) tiles.
859 * Note that since the workaround buffer will be interpreted by the
860 * hardware as an interleaved multisampled buffer, we need to compute
861 * width_in_tiles and height_in_tiles by dividing the width and height
862 * by 16 rather than the normal Y-tile size of 32.
864 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
865 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
866 unsigned pitch_minus_1
= 127;
867 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
868 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
871 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
872 ISL_FORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
873 surf
[1] = brw_emit_reloc(&brw
->batch
, *out_offset
+ 4,
874 brw
->wm
.multisampled_null_render_target_bo
,
877 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
878 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
880 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
883 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
885 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
886 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
887 surf
[4] = BRW_SURFACE_MULTISAMPLECOUNT_4
;
892 * Sets up a surface state structure to point at the given region.
893 * While it is only used for the front/back buffer currently, it should be
894 * usable for further buffers when doing ARB_draw_buffer support.
897 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
898 struct gl_renderbuffer
*rb
,
902 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
903 struct gl_context
*ctx
= &brw
->ctx
;
904 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
905 struct intel_mipmap_tree
*mt
= irb
->mt
;
907 uint32_t tile_x
, tile_y
;
908 enum isl_format format
;
911 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
912 /* BRW_NEW_FS_PROG_DATA */
914 if (rb
->TexImage
&& !devinfo
->has_surface_tile_offset
) {
915 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
917 if (tile_x
!= 0 || tile_y
!= 0) {
918 /* Original gen4 hardware couldn't draw to a non-tile-aligned
919 * destination in a miptree unless you actually setup your renderbuffer
920 * as a miptree and used the fragile lod/array_index/etc. controls to
921 * select the image. So, instead, we just make a new single-level
922 * miptree and render into that.
924 intel_renderbuffer_move_to_temp(brw
, irb
, false);
925 assert(irb
->align_wa_mt
);
926 mt
= irb
->align_wa_mt
;
930 surf
= brw_state_batch(brw
, 6 * 4, 32, &offset
);
932 format
= brw
->mesa_to_isl_render_format
[rb_format
];
933 if (unlikely(!brw
->mesa_format_supports_render
[rb_format
])) {
934 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
935 __func__
, _mesa_get_format_name(rb_format
));
938 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
939 format
<< BRW_SURFACE_FORMAT_SHIFT
);
942 assert(mt
->offset
% mt
->cpp
== 0);
943 surf
[1] = brw_emit_reloc(&brw
->batch
, offset
+ 4, mt
->bo
,
945 intel_renderbuffer_get_tile_offsets(irb
,
950 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
951 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
953 surf
[3] = (brw_get_surface_tiling_bits(mt
->surf
.tiling
) |
954 (mt
->surf
.row_pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
956 surf
[4] = brw_get_surface_num_multisamples(mt
->surf
.samples
);
958 assert(devinfo
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
959 /* Note that the low bits of these fields are missing, so
960 * there's the possibility of getting in trouble.
962 assert(tile_x
% 4 == 0);
963 assert(tile_y
% 2 == 0);
964 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
965 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
966 (mt
->surf
.image_alignment_el
.height
== 4 ?
967 BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
969 if (devinfo
->gen
< 6) {
971 if (!ctx
->Color
.ColorLogicOpEnabled
&& !ctx
->Color
._AdvancedBlendMode
&&
972 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
973 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
975 if (!ctx
->Color
.ColorMask
[unit
][0])
976 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
977 if (!ctx
->Color
.ColorMask
[unit
][1])
978 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
979 if (!ctx
->Color
.ColorMask
[unit
][2])
980 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
982 /* As mentioned above, disable writes to the alpha component when the
983 * renderbuffer is XRGB.
985 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
986 !ctx
->Color
.ColorMask
[unit
][3]) {
987 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
995 update_renderbuffer_surfaces(struct brw_context
*brw
)
997 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
998 const struct gl_context
*ctx
= &brw
->ctx
;
1000 /* _NEW_BUFFERS | _NEW_COLOR */
1001 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1003 /* Render targets always start at binding table index 0. */
1004 const unsigned rt_start
= 0;
1006 uint32_t *surf_offsets
= brw
->wm
.base
.surf_offset
;
1008 /* Update surfaces for drawing buffers */
1009 if (fb
->_NumColorDrawBuffers
>= 1) {
1010 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1011 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1013 if (intel_renderbuffer(rb
)) {
1014 surf_offsets
[rt_start
+ i
] = devinfo
->gen
>= 6 ?
1015 gen6_update_renderbuffer_surface(brw
, rb
, i
, rt_start
+ i
) :
1016 gen4_update_renderbuffer_surface(brw
, rb
, i
, rt_start
+ i
);
1018 emit_null_surface_state(brw
, fb
, &surf_offsets
[rt_start
+ i
]);
1022 emit_null_surface_state(brw
, fb
, &surf_offsets
[rt_start
]);
1025 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1028 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
1030 .mesa
= _NEW_BUFFERS
|
1032 .brw
= BRW_NEW_BATCH
,
1034 .emit
= update_renderbuffer_surfaces
,
1037 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
1039 .mesa
= _NEW_BUFFERS
,
1040 .brw
= BRW_NEW_BATCH
|
1041 BRW_NEW_FAST_CLEAR_COLOR
,
1043 .emit
= update_renderbuffer_surfaces
,
1047 update_renderbuffer_read_surfaces(struct brw_context
*brw
)
1049 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1050 const struct gl_context
*ctx
= &brw
->ctx
;
1052 /* BRW_NEW_FS_PROG_DATA */
1053 const struct brw_wm_prog_data
*wm_prog_data
=
1054 brw_wm_prog_data(brw
->wm
.base
.prog_data
);
1056 if (wm_prog_data
->has_render_target_reads
&&
1057 !ctx
->Extensions
.MESA_shader_framebuffer_fetch
) {
1059 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
1061 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
1062 struct gl_renderbuffer
*rb
= fb
->_ColorDrawBuffers
[i
];
1063 const struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
1064 const unsigned surf_index
=
1065 wm_prog_data
->binding_table
.render_target_read_start
+ i
;
1066 uint32_t *surf_offset
= &brw
->wm
.base
.surf_offset
[surf_index
];
1069 const enum isl_format format
= brw
->mesa_to_isl_render_format
[
1070 _mesa_get_render_format(ctx
, intel_rb_format(irb
))];
1071 assert(isl_format_supports_sampling(&brw
->screen
->devinfo
,
1074 /* Override the target of the texture if the render buffer is a
1075 * single slice of a 3D texture (since the minimum array element
1076 * field of the surface state structure is ignored by the sampler
1077 * unit for 3D textures on some hardware), or if the render buffer
1078 * is a 1D array (since shaders always provide the array index
1079 * coordinate at the Z component to avoid state-dependent
1080 * recompiles when changing the texture target of the
1083 const GLenum target
=
1084 (irb
->mt
->target
== GL_TEXTURE_3D
&&
1085 irb
->layer_count
== 1) ? GL_TEXTURE_2D
:
1086 irb
->mt
->target
== GL_TEXTURE_1D_ARRAY
? GL_TEXTURE_2D_ARRAY
:
1089 const struct isl_view view
= {
1091 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
1093 .base_array_layer
= irb
->mt_layer
,
1094 .array_len
= irb
->layer_count
,
1095 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1096 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
1099 enum isl_aux_usage aux_usage
=
1100 intel_miptree_texture_aux_usage(brw
, irb
->mt
, format
);
1101 if (brw
->draw_aux_buffer_disabled
[i
])
1102 aux_usage
= ISL_AUX_USAGE_NONE
;
1104 brw_emit_surface_state(brw
, irb
->mt
, target
, view
, aux_usage
,
1105 tex_mocs
[devinfo
->gen
],
1106 surf_offset
, surf_index
,
1110 emit_null_surface_state(brw
, fb
, surf_offset
);
1114 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1118 const struct brw_tracked_state brw_renderbuffer_read_surfaces
= {
1120 .mesa
= _NEW_BUFFERS
,
1121 .brw
= BRW_NEW_BATCH
|
1122 BRW_NEW_FAST_CLEAR_COLOR
|
1123 BRW_NEW_FS_PROG_DATA
,
1125 .emit
= update_renderbuffer_read_surfaces
,
1129 update_stage_texture_surfaces(struct brw_context
*brw
,
1130 const struct gl_program
*prog
,
1131 struct brw_stage_state
*stage_state
,
1132 bool for_gather
, uint32_t plane
)
1137 struct gl_context
*ctx
= &brw
->ctx
;
1139 uint32_t *surf_offset
= stage_state
->surf_offset
;
1141 /* BRW_NEW_*_PROG_DATA */
1143 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
1145 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1147 unsigned num_samplers
= util_last_bit(prog
->SamplersUsed
);
1148 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1151 if (prog
->SamplersUsed
& (1 << s
)) {
1152 const unsigned unit
= prog
->SamplerUnits
[s
];
1155 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1156 brw_update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1164 * Construct SURFACE_STATE objects for enabled textures.
1167 brw_update_texture_surfaces(struct brw_context
*brw
)
1169 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1171 /* BRW_NEW_VERTEX_PROGRAM */
1172 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1174 /* BRW_NEW_TESS_PROGRAMS */
1175 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1176 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1178 /* BRW_NEW_GEOMETRY_PROGRAM */
1179 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1181 /* BRW_NEW_FRAGMENT_PROGRAM */
1182 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1185 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1186 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1187 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1188 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1189 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1191 /* emit alternate set of surface state for gather. this
1192 * allows the surface format to be overriden for only the
1193 * gather4 messages. */
1194 if (devinfo
->gen
< 8) {
1195 if (vs
&& vs
->nir
->info
.uses_texture_gather
)
1196 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1197 if (tcs
&& tcs
->nir
->info
.uses_texture_gather
)
1198 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1199 if (tes
&& tes
->nir
->info
.uses_texture_gather
)
1200 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1201 if (gs
&& gs
->nir
->info
.uses_texture_gather
)
1202 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1203 if (fs
&& fs
->nir
->info
.uses_texture_gather
)
1204 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1208 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1209 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1212 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1215 const struct brw_tracked_state brw_texture_surfaces
= {
1217 .mesa
= _NEW_TEXTURE
,
1218 .brw
= BRW_NEW_BATCH
|
1219 BRW_NEW_FAST_CLEAR_COLOR
|
1220 BRW_NEW_FRAGMENT_PROGRAM
|
1221 BRW_NEW_FS_PROG_DATA
|
1222 BRW_NEW_GEOMETRY_PROGRAM
|
1223 BRW_NEW_GS_PROG_DATA
|
1224 BRW_NEW_TESS_PROGRAMS
|
1225 BRW_NEW_TCS_PROG_DATA
|
1226 BRW_NEW_TES_PROG_DATA
|
1227 BRW_NEW_TEXTURE_BUFFER
|
1228 BRW_NEW_VERTEX_PROGRAM
|
1229 BRW_NEW_VS_PROG_DATA
,
1231 .emit
= brw_update_texture_surfaces
,
1235 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1237 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1239 /* BRW_NEW_COMPUTE_PROGRAM */
1240 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1243 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1245 /* emit alternate set of surface state for gather. this
1246 * allows the surface format to be overriden for only the
1249 if (devinfo
->gen
< 8) {
1250 if (cs
&& cs
->nir
->info
.uses_texture_gather
)
1251 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1254 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1257 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1259 .mesa
= _NEW_TEXTURE
,
1260 .brw
= BRW_NEW_BATCH
|
1261 BRW_NEW_COMPUTE_PROGRAM
|
1262 BRW_NEW_FAST_CLEAR_COLOR
,
1264 .emit
= brw_update_cs_texture_surfaces
,
1269 brw_upload_ubo_surfaces(struct brw_context
*brw
, struct gl_program
*prog
,
1270 struct brw_stage_state
*stage_state
,
1271 struct brw_stage_prog_data
*prog_data
)
1273 struct gl_context
*ctx
= &brw
->ctx
;
1278 uint32_t *ubo_surf_offsets
=
1279 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1281 for (int i
= 0; i
< prog
->info
.num_ubos
; i
++) {
1282 struct gl_uniform_buffer_binding
*binding
=
1283 &ctx
->UniformBufferBindings
[prog
->sh
.UniformBlocks
[i
]->Binding
];
1285 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1286 emit_null_surface_state(brw
, NULL
, &ubo_surf_offsets
[i
]);
1288 struct intel_buffer_object
*intel_bo
=
1289 intel_buffer_object(binding
->BufferObject
);
1290 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1291 if (!binding
->AutomaticSize
)
1292 size
= MIN2(size
, binding
->Size
);
1294 intel_bufferobj_buffer(brw
, intel_bo
,
1297 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1299 &ubo_surf_offsets
[i
]);
1303 uint32_t *ssbo_surf_offsets
=
1304 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1306 for (int i
= 0; i
< prog
->info
.num_ssbos
; i
++) {
1307 struct gl_shader_storage_buffer_binding
*binding
=
1308 &ctx
->ShaderStorageBufferBindings
[prog
->sh
.ShaderStorageBlocks
[i
]->Binding
];
1310 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1311 emit_null_surface_state(brw
, NULL
, &ssbo_surf_offsets
[i
]);
1313 struct intel_buffer_object
*intel_bo
=
1314 intel_buffer_object(binding
->BufferObject
);
1315 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1316 if (!binding
->AutomaticSize
)
1317 size
= MIN2(size
, binding
->Size
);
1319 intel_bufferobj_buffer(brw
, intel_bo
,
1322 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1324 &ssbo_surf_offsets
[i
]);
1328 stage_state
->push_constants_dirty
= true;
1330 if (prog
->info
.num_ubos
|| prog
->info
.num_ssbos
)
1331 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1335 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1337 struct gl_context
*ctx
= &brw
->ctx
;
1339 struct gl_program
*prog
= ctx
->FragmentProgram
._Current
;
1341 /* BRW_NEW_FS_PROG_DATA */
1342 brw_upload_ubo_surfaces(brw
, prog
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1345 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1347 .mesa
= _NEW_PROGRAM
,
1348 .brw
= BRW_NEW_BATCH
|
1349 BRW_NEW_FS_PROG_DATA
|
1350 BRW_NEW_UNIFORM_BUFFER
,
1352 .emit
= brw_upload_wm_ubo_surfaces
,
1356 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1358 struct gl_context
*ctx
= &brw
->ctx
;
1360 struct gl_program
*prog
=
1361 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1363 /* BRW_NEW_CS_PROG_DATA */
1364 brw_upload_ubo_surfaces(brw
, prog
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1367 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1369 .mesa
= _NEW_PROGRAM
,
1370 .brw
= BRW_NEW_BATCH
|
1371 BRW_NEW_CS_PROG_DATA
|
1372 BRW_NEW_UNIFORM_BUFFER
,
1374 .emit
= brw_upload_cs_ubo_surfaces
,
1378 brw_upload_abo_surfaces(struct brw_context
*brw
,
1379 const struct gl_program
*prog
,
1380 struct brw_stage_state
*stage_state
,
1381 struct brw_stage_prog_data
*prog_data
)
1383 struct gl_context
*ctx
= &brw
->ctx
;
1384 uint32_t *surf_offsets
=
1385 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1387 if (prog
->info
.num_abos
) {
1388 for (unsigned i
= 0; i
< prog
->info
.num_abos
; i
++) {
1389 struct gl_atomic_buffer_binding
*binding
=
1390 &ctx
->AtomicBufferBindings
[prog
->sh
.AtomicBuffers
[i
]->Binding
];
1391 struct intel_buffer_object
*intel_bo
=
1392 intel_buffer_object(binding
->BufferObject
);
1394 intel_bufferobj_buffer(brw
, intel_bo
, binding
->Offset
,
1395 intel_bo
->Base
.Size
- binding
->Offset
,
1398 brw_emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1399 binding
->Offset
, ISL_FORMAT_RAW
,
1400 bo
->size
- binding
->Offset
, 1,
1404 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1409 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1412 const struct gl_program
*wm
= brw
->fragment_program
;
1415 /* BRW_NEW_FS_PROG_DATA */
1416 brw_upload_abo_surfaces(brw
, wm
, &brw
->wm
.base
, brw
->wm
.base
.prog_data
);
1420 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1422 .mesa
= _NEW_PROGRAM
,
1423 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1425 BRW_NEW_FS_PROG_DATA
,
1427 .emit
= brw_upload_wm_abo_surfaces
,
1431 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1434 const struct gl_program
*cp
= brw
->compute_program
;
1437 /* BRW_NEW_CS_PROG_DATA */
1438 brw_upload_abo_surfaces(brw
, cp
, &brw
->cs
.base
, brw
->cs
.base
.prog_data
);
1442 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1444 .mesa
= _NEW_PROGRAM
,
1445 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1447 BRW_NEW_CS_PROG_DATA
,
1449 .emit
= brw_upload_cs_abo_surfaces
,
1453 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1456 const struct gl_program
*cp
= brw
->compute_program
;
1459 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1460 brw_upload_image_surfaces(brw
, cp
, &brw
->cs
.base
,
1461 brw
->cs
.base
.prog_data
);
1465 const struct brw_tracked_state brw_cs_image_surfaces
= {
1467 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1468 .brw
= BRW_NEW_BATCH
|
1469 BRW_NEW_CS_PROG_DATA
|
1470 BRW_NEW_FAST_CLEAR_COLOR
|
1473 .emit
= brw_upload_cs_image_surfaces
,
1477 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1479 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1480 enum isl_format hw_format
= brw_isl_format_for_mesa_format(format
);
1481 if (access
== GL_WRITE_ONLY
) {
1483 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1484 /* Typed surface reads support a very limited subset of the shader
1485 * image formats. Translate it into the closest format the
1486 * hardware supports.
1488 return isl_lower_storage_image_format(devinfo
, hw_format
);
1490 /* The hardware doesn't actually support a typed format that we can use
1491 * so we have to fall back to untyped read/write messages.
1493 return ISL_FORMAT_RAW
;
1498 update_default_image_param(struct brw_context
*brw
,
1499 struct gl_image_unit
*u
,
1500 unsigned surface_idx
,
1501 struct brw_image_param
*param
)
1503 memset(param
, 0, sizeof(*param
));
1504 param
->surface_idx
= surface_idx
;
1505 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1506 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1507 * detailed explanation of these parameters.
1509 param
->swizzling
[0] = 0xff;
1510 param
->swizzling
[1] = 0xff;
1514 update_buffer_image_param(struct brw_context
*brw
,
1515 struct gl_image_unit
*u
,
1516 unsigned surface_idx
,
1517 struct brw_image_param
*param
)
1519 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1520 const uint32_t size
= MIN2((uint32_t)u
->TexObj
->BufferSize
, obj
->Size
);
1521 update_default_image_param(brw
, u
, surface_idx
, param
);
1523 param
->size
[0] = size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1524 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1528 get_image_num_layers(const struct intel_mipmap_tree
*mt
, GLenum target
,
1531 if (target
== GL_TEXTURE_CUBE_MAP
)
1534 return target
== GL_TEXTURE_3D
?
1535 minify(mt
->surf
.logical_level0_px
.depth
, level
) :
1536 mt
->surf
.logical_level0_px
.array_len
;
1540 update_image_surface(struct brw_context
*brw
,
1541 struct gl_image_unit
*u
,
1543 unsigned surface_idx
,
1544 uint32_t *surf_offset
,
1545 struct brw_image_param
*param
)
1547 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
1549 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1550 struct gl_texture_object
*obj
= u
->TexObj
;
1551 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1553 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1554 struct intel_buffer_object
*intel_obj
=
1555 intel_buffer_object(obj
->BufferObject
);
1556 const unsigned texel_size
= (format
== ISL_FORMAT_RAW
? 1 :
1557 _mesa_get_format_bytes(u
->_ActualFormat
));
1559 brw_emit_buffer_surface_state(
1560 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1561 format
, intel_obj
->Base
.Size
, texel_size
,
1562 access
!= GL_READ_ONLY
? RELOC_WRITE
: 0);
1564 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1567 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1568 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1569 const unsigned num_layers
= u
->Layered
?
1570 get_image_num_layers(mt
, obj
->Target
, u
->Level
) : 1;
1572 struct isl_view view
= {
1574 .base_level
= obj
->MinLevel
+ u
->Level
,
1576 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1577 .array_len
= num_layers
,
1578 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1579 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1582 if (format
== ISL_FORMAT_RAW
) {
1583 brw_emit_buffer_surface_state(
1584 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1585 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1586 access
!= GL_READ_ONLY
? RELOC_WRITE
: 0);
1589 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1590 assert(!intel_miptree_has_color_unresolved(mt
,
1592 view
.base_array_layer
,
1594 brw_emit_surface_state(brw
, mt
, mt
->target
, view
,
1595 ISL_AUX_USAGE_NONE
, tex_mocs
[devinfo
->gen
],
1596 surf_offset
, surf_index
,
1597 access
== GL_READ_ONLY
? 0 : RELOC_WRITE
);
1600 isl_surf_fill_image_param(&brw
->isl_dev
, param
, &mt
->surf
, &view
);
1601 param
->surface_idx
= surface_idx
;
1605 emit_null_surface_state(brw
, NULL
, surf_offset
);
1606 update_default_image_param(brw
, u
, surface_idx
, param
);
1611 brw_upload_image_surfaces(struct brw_context
*brw
,
1612 const struct gl_program
*prog
,
1613 struct brw_stage_state
*stage_state
,
1614 struct brw_stage_prog_data
*prog_data
)
1617 struct gl_context
*ctx
= &brw
->ctx
;
1619 if (prog
->info
.num_images
) {
1620 for (unsigned i
= 0; i
< prog
->info
.num_images
; i
++) {
1621 struct gl_image_unit
*u
= &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[i
]];
1622 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1624 update_image_surface(brw
, u
, prog
->sh
.ImageAccess
[i
],
1626 &stage_state
->surf_offset
[surf_idx
],
1627 &prog_data
->image_param
[i
]);
1630 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1631 /* This may have changed the image metadata dependent on the context
1632 * image unit state and passed to the program as uniforms, make sure
1633 * that push and pull constants are reuploaded.
1635 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1640 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1642 /* BRW_NEW_FRAGMENT_PROGRAM */
1643 const struct gl_program
*wm
= brw
->fragment_program
;
1646 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1647 brw_upload_image_surfaces(brw
, wm
, &brw
->wm
.base
,
1648 brw
->wm
.base
.prog_data
);
1652 const struct brw_tracked_state brw_wm_image_surfaces
= {
1654 .mesa
= _NEW_TEXTURE
,
1655 .brw
= BRW_NEW_BATCH
|
1656 BRW_NEW_FAST_CLEAR_COLOR
|
1657 BRW_NEW_FRAGMENT_PROGRAM
|
1658 BRW_NEW_FS_PROG_DATA
|
1661 .emit
= brw_upload_wm_image_surfaces
,
1665 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1667 struct gl_context
*ctx
= &brw
->ctx
;
1669 struct gl_program
*prog
=
1670 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1671 /* BRW_NEW_CS_PROG_DATA */
1672 const struct brw_cs_prog_data
*cs_prog_data
=
1673 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
1675 if (prog
&& cs_prog_data
->uses_num_work_groups
) {
1676 const unsigned surf_idx
=
1677 cs_prog_data
->binding_table
.work_groups_start
;
1678 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1682 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1684 intel_upload_data(brw
,
1685 (void *)brw
->compute
.num_work_groups
,
1691 bo
= brw
->compute
.num_work_groups_bo
;
1692 bo_offset
= brw
->compute
.num_work_groups_offset
;
1695 brw_emit_buffer_surface_state(brw
, surf_offset
,
1698 3 * sizeof(GLuint
), 1,
1700 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1704 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1706 .brw
= BRW_NEW_CS_PROG_DATA
|
1707 BRW_NEW_CS_WORK_GROUPS
1709 .emit
= brw_upload_cs_work_groups_surface
,