2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "main/teximage.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_instruction.h"
41 #include "main/framebuffer.h"
45 #include "intel_mipmap_tree.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_tex.h"
48 #include "intel_fbo.h"
49 #include "intel_buffer_objects.h"
51 #include "brw_context.h"
52 #include "brw_state.h"
53 #include "brw_defines.h"
56 struct surface_state_info
{
58 unsigned ss_align
; /* Required alignment of RENDER_SURFACE_STATE in bytes */
60 unsigned aux_reloc_dw
;
65 static const struct surface_state_info surface_state_infos
[] = {
69 [7] = {8, 32, 1, 6, GEN7_MOCS_L3
, GEN7_MOCS_L3
},
70 [8] = {13, 64, 8, 10, BDW_MOCS_WB
, BDW_MOCS_PTE
},
71 [9] = {16, 64, 8, 10, SKL_MOCS_WB
, SKL_MOCS_PTE
},
75 brw_emit_surface_state(struct brw_context
*brw
,
76 struct intel_mipmap_tree
*mt
,
77 const struct isl_view
*view
,
78 uint32_t mocs
, bool for_gather
,
79 uint32_t *surf_offset
, int surf_index
,
80 unsigned read_domains
, unsigned write_domains
)
82 const struct surface_state_info ss_info
= surface_state_infos
[brw
->gen
];
85 intel_miptree_get_isl_surf(brw
, mt
, &surf
);
87 union isl_color_value clear_color
= { .u32
= { 0, 0, 0, 0 } };
89 struct isl_surf
*aux_surf
= NULL
, aux_surf_s
;
90 uint64_t aux_offset
= 0;
91 enum isl_aux_usage aux_usage
= ISL_AUX_USAGE_NONE
;
93 ((view
->usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) ||
94 mt
->fast_clear_state
!= INTEL_FAST_CLEAR_STATE_RESOLVED
)) {
95 intel_miptree_get_aux_isl_surf(brw
, mt
, &aux_surf_s
, &aux_usage
);
96 aux_surf
= &aux_surf_s
;
97 assert(mt
->mcs_mt
->offset
== 0);
98 aux_offset
= mt
->mcs_mt
->bo
->offset64
;
100 /* We only really need a clear color if we also have an auxiliary
101 * surfacae. Without one, it does nothing.
103 clear_color
= intel_miptree_get_isl_clear_color(brw
, mt
);
106 uint32_t *dw
= __brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
107 ss_info
.num_dwords
* 4, ss_info
.ss_align
,
108 surf_index
, surf_offset
);
110 isl_surf_fill_state(&brw
->isl_dev
, dw
, .surf
= &surf
, .view
= view
,
111 .address
= mt
->bo
->offset64
+ mt
->offset
,
112 .aux_surf
= aux_surf
, .aux_usage
= aux_usage
,
113 .aux_address
= aux_offset
,
114 .mocs
= mocs
, .clear_color
= clear_color
);
116 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
117 *surf_offset
+ 4 * ss_info
.reloc_dw
,
119 read_domains
, write_domains
);
122 /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
123 * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
124 * contain other control information. Since buffer addresses are always
125 * on 4k boundaries (and thus have their lower 12 bits zero), we can use
126 * an ordinary reloc to do the necessary address translation.
128 assert((aux_offset
& 0xfff) == 0);
129 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
130 *surf_offset
+ 4 * ss_info
.aux_reloc_dw
,
131 mt
->mcs_mt
->bo
, dw
[ss_info
.aux_reloc_dw
] & 0xfff,
132 read_domains
, write_domains
);
137 brw_update_renderbuffer_surface(struct brw_context
*brw
,
138 struct gl_renderbuffer
*rb
,
139 bool layered
, unsigned unit
/* unused */,
142 struct gl_context
*ctx
= &brw
->ctx
;
143 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
144 struct intel_mipmap_tree
*mt
= irb
->mt
;
146 assert(brw_render_target_supported(brw
, rb
));
147 intel_miptree_used_for_rendering(mt
);
149 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
150 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
151 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
152 __func__
, _mesa_get_format_name(rb_format
));
155 const unsigned layer_multiplier
=
156 (irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_UMS
||
157 irb
->mt
->msaa_layout
== INTEL_MSAA_LAYOUT_CMS
) ?
158 MAX2(irb
->mt
->num_samples
, 1) : 1;
160 struct isl_view view
= {
161 .format
= brw
->render_target_format
[rb_format
],
162 .base_level
= irb
->mt_level
- irb
->mt
->first_level
,
164 .base_array_layer
= irb
->mt_layer
/ layer_multiplier
,
165 .array_len
= MAX2(irb
->layer_count
, 1),
167 ISL_CHANNEL_SELECT_RED
,
168 ISL_CHANNEL_SELECT_GREEN
,
169 ISL_CHANNEL_SELECT_BLUE
,
170 ISL_CHANNEL_SELECT_ALPHA
,
172 .usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
,
176 brw_emit_surface_state(brw
, mt
, &view
,
177 surface_state_infos
[brw
->gen
].rb_mocs
, false,
179 I915_GEM_DOMAIN_RENDER
,
180 I915_GEM_DOMAIN_RENDER
);
185 translate_tex_target(GLenum target
)
189 case GL_TEXTURE_1D_ARRAY_EXT
:
190 return BRW_SURFACE_1D
;
192 case GL_TEXTURE_RECTANGLE_NV
:
193 return BRW_SURFACE_2D
;
196 case GL_TEXTURE_2D_ARRAY_EXT
:
197 case GL_TEXTURE_EXTERNAL_OES
:
198 case GL_TEXTURE_2D_MULTISAMPLE
:
199 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
200 return BRW_SURFACE_2D
;
203 return BRW_SURFACE_3D
;
205 case GL_TEXTURE_CUBE_MAP
:
206 case GL_TEXTURE_CUBE_MAP_ARRAY
:
207 return BRW_SURFACE_CUBE
;
210 unreachable("not reached");
215 brw_get_surface_tiling_bits(uint32_t tiling
)
219 return BRW_SURFACE_TILED
;
221 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
229 brw_get_surface_num_multisamples(unsigned num_samples
)
232 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
234 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
238 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
242 brw_get_texture_swizzle(const struct gl_context
*ctx
,
243 const struct gl_texture_object
*t
)
245 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
247 int swizzles
[SWIZZLE_NIL
+ 1] = {
257 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
258 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
259 GLenum depth_mode
= t
->DepthMode
;
261 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
262 * with depth component data specified with a sized internal format.
263 * Otherwise, it's left at the old default, GL_LUMINANCE.
265 if (_mesa_is_gles3(ctx
) &&
266 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
267 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
271 switch (depth_mode
) {
273 swizzles
[0] = SWIZZLE_ZERO
;
274 swizzles
[1] = SWIZZLE_ZERO
;
275 swizzles
[2] = SWIZZLE_ZERO
;
276 swizzles
[3] = SWIZZLE_X
;
279 swizzles
[0] = SWIZZLE_X
;
280 swizzles
[1] = SWIZZLE_X
;
281 swizzles
[2] = SWIZZLE_X
;
282 swizzles
[3] = SWIZZLE_ONE
;
285 swizzles
[0] = SWIZZLE_X
;
286 swizzles
[1] = SWIZZLE_X
;
287 swizzles
[2] = SWIZZLE_X
;
288 swizzles
[3] = SWIZZLE_X
;
291 swizzles
[0] = SWIZZLE_X
;
292 swizzles
[1] = SWIZZLE_ZERO
;
293 swizzles
[2] = SWIZZLE_ZERO
;
294 swizzles
[3] = SWIZZLE_ONE
;
299 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
301 /* If the texture's format is alpha-only, force R, G, and B to
302 * 0.0. Similarly, if the texture's format has no alpha channel,
303 * force the alpha value read to 1.0. This allows for the
304 * implementation to use an RGBA texture for any of these formats
305 * without leaking any unexpected values.
307 switch (img
->_BaseFormat
) {
309 swizzles
[0] = SWIZZLE_ZERO
;
310 swizzles
[1] = SWIZZLE_ZERO
;
311 swizzles
[2] = SWIZZLE_ZERO
;
314 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
315 swizzles
[0] = SWIZZLE_X
;
316 swizzles
[1] = SWIZZLE_X
;
317 swizzles
[2] = SWIZZLE_X
;
318 swizzles
[3] = SWIZZLE_ONE
;
321 case GL_LUMINANCE_ALPHA
:
322 if (datatype
== GL_SIGNED_NORMALIZED
) {
323 swizzles
[0] = SWIZZLE_X
;
324 swizzles
[1] = SWIZZLE_X
;
325 swizzles
[2] = SWIZZLE_X
;
326 swizzles
[3] = SWIZZLE_W
;
330 if (datatype
== GL_SIGNED_NORMALIZED
) {
331 swizzles
[0] = SWIZZLE_X
;
332 swizzles
[1] = SWIZZLE_X
;
333 swizzles
[2] = SWIZZLE_X
;
334 swizzles
[3] = SWIZZLE_X
;
340 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
341 swizzles
[3] = SWIZZLE_ONE
;
345 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
346 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
347 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
348 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
352 * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
353 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
355 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
358 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
360 * which is simply adding 4 then modding by 8 (or anding with 7).
362 * We then may need to apply workarounds for textureGather hardware bugs.
365 swizzle_to_scs(GLenum swizzle
, bool need_green_to_blue
)
367 unsigned scs
= (swizzle
+ 4) & 7;
369 return (need_green_to_blue
&& scs
== HSW_SCS_GREEN
) ? HSW_SCS_BLUE
: scs
;
373 brw_update_texture_surface(struct gl_context
*ctx
,
375 uint32_t *surf_offset
,
379 struct brw_context
*brw
= brw_context(ctx
);
380 struct gl_texture_object
*obj
= ctx
->Texture
.Unit
[unit
]._Current
;
382 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
383 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
386 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
387 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
388 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
389 /* If this is a view with restricted NumLayers, then our effective depth
390 * is not just the miptree depth.
392 const unsigned mt_num_layers
=
393 mt
->logical_depth0
* (_mesa_is_cube_map_texture(mt
->target
) ? 6 : 1);
394 const unsigned view_num_layers
=
395 (obj
->Immutable
&& obj
->Target
!= GL_TEXTURE_3D
) ? obj
->NumLayers
:
398 /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
399 * texturing functions that return a float, as our code generation always
400 * selects the .x channel (which would always be 0).
402 struct gl_texture_image
*firstImage
= obj
->Image
[0][obj
->BaseLevel
];
403 const bool alpha_depth
= obj
->DepthMode
== GL_ALPHA
&&
404 (firstImage
->_BaseFormat
== GL_DEPTH_COMPONENT
||
405 firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
);
406 const unsigned swizzle
= (unlikely(alpha_depth
) ? SWIZZLE_XYZW
:
407 brw_get_texture_swizzle(&brw
->ctx
, obj
));
409 unsigned format
= translate_tex_format(
410 brw
, intel_obj
->_Format
, sampler
->sRGBDecode
);
412 /* Implement gen6 and gen7 gather work-around */
413 bool need_green_to_blue
= false;
415 if (brw
->gen
== 7 && format
== BRW_SURFACEFORMAT_R32G32_FLOAT
) {
416 format
= BRW_SURFACEFORMAT_R32G32_FLOAT_LD
;
417 need_green_to_blue
= brw
->is_haswell
;
418 } else if (brw
->gen
== 6) {
419 /* Sandybridge's gather4 message is broken for integer formats.
420 * To work around this, we pretend the surface is UNORM for
421 * 8 or 16-bit formats, and emit shader instructions to recover
422 * the real INT/UINT value. For 32-bit formats, we pretend
423 * the surface is FLOAT, and simply reinterpret the resulting
427 case BRW_SURFACEFORMAT_R8_SINT
:
428 case BRW_SURFACEFORMAT_R8_UINT
:
429 format
= BRW_SURFACEFORMAT_R8_UNORM
;
432 case BRW_SURFACEFORMAT_R16_SINT
:
433 case BRW_SURFACEFORMAT_R16_UINT
:
434 format
= BRW_SURFACEFORMAT_R16_UNORM
;
437 case BRW_SURFACEFORMAT_R32_SINT
:
438 case BRW_SURFACEFORMAT_R32_UINT
:
439 format
= BRW_SURFACEFORMAT_R32_FLOAT
;
448 if (obj
->StencilSampling
&& firstImage
->_BaseFormat
== GL_DEPTH_STENCIL
) {
449 assert(brw
->gen
>= 8);
451 format
= BRW_SURFACEFORMAT_R8_UINT
;
452 } else if (obj
->Target
== GL_TEXTURE_EXTERNAL_OES
) {
454 mt
= mt
->plane
[plane
- 1];
457 format
= translate_tex_format(brw
, mt
->format
, sampler
->sRGBDecode
);
460 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
462 struct isl_view view
= {
464 .base_level
= obj
->MinLevel
+ obj
->BaseLevel
,
465 .levels
= intel_obj
->_MaxLevel
- obj
->BaseLevel
+ 1,
466 .base_array_layer
= obj
->MinLayer
,
467 .array_len
= view_num_layers
,
469 swizzle_to_scs(GET_SWZ(swizzle
, 0), need_green_to_blue
),
470 swizzle_to_scs(GET_SWZ(swizzle
, 1), need_green_to_blue
),
471 swizzle_to_scs(GET_SWZ(swizzle
, 2), need_green_to_blue
),
472 swizzle_to_scs(GET_SWZ(swizzle
, 3), need_green_to_blue
),
474 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
477 if (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
478 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
)
479 view
.usage
|= ISL_SURF_USAGE_CUBE_BIT
;
481 brw_emit_surface_state(brw
, mt
, &view
,
482 surface_state_infos
[brw
->gen
].tex_mocs
, for_gather
,
483 surf_offset
, surf_index
,
484 I915_GEM_DOMAIN_SAMPLER
, 0);
489 brw_emit_buffer_surface_state(struct brw_context
*brw
,
490 uint32_t *out_offset
,
492 unsigned buffer_offset
,
493 unsigned surface_format
,
494 unsigned buffer_size
,
498 const struct surface_state_info ss_info
= surface_state_infos
[brw
->gen
];
500 uint32_t *dw
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
501 ss_info
.num_dwords
* 4, ss_info
.ss_align
,
504 isl_buffer_fill_state(&brw
->isl_dev
, dw
,
505 .address
= (bo
? bo
->offset64
: 0) + buffer_offset
,
507 .format
= surface_format
,
509 .mocs
= ss_info
.tex_mocs
);
512 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
513 *out_offset
+ 4 * ss_info
.reloc_dw
,
515 I915_GEM_DOMAIN_SAMPLER
,
516 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
521 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
523 uint32_t *surf_offset
)
525 struct brw_context
*brw
= brw_context(ctx
);
526 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
527 struct intel_buffer_object
*intel_obj
=
528 intel_buffer_object(tObj
->BufferObject
);
529 uint32_t size
= tObj
->BufferSize
;
530 drm_intel_bo
*bo
= NULL
;
531 mesa_format format
= tObj
->_BufferObjectFormat
;
532 uint32_t brw_format
= brw_format_for_mesa_format(format
);
533 int texel_size
= _mesa_get_format_bytes(format
);
536 size
= MIN2(size
, intel_obj
->Base
.Size
);
537 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
540 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
541 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
542 _mesa_get_format_name(format
));
545 brw_emit_buffer_surface_state(brw
, surf_offset
, bo
,
554 * Create the constant buffer surface. Vertex/fragment shader constants will be
555 * read from this buffer with Data Port Read instructions/messages.
558 brw_create_constant_surface(struct brw_context
*brw
,
562 uint32_t *out_offset
)
564 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
565 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
,
570 * Create the buffer surface. Shader buffer variables will be
571 * read from / write to this buffer with Data Port Read/Write
572 * instructions/messages.
575 brw_create_buffer_surface(struct brw_context
*brw
,
579 uint32_t *out_offset
)
581 /* Use a raw surface so we can reuse existing untyped read/write/atomic
582 * messages. We need these specifically for the fragment shader since they
583 * include a pixel mask header that we need to ensure correct behavior
584 * with helper invocations, which cannot write to the buffer.
586 brw_emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
587 BRW_SURFACEFORMAT_RAW
,
592 * Set up a binding table entry for use by stream output logic (transform
595 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
598 brw_update_sol_surface(struct brw_context
*brw
,
599 struct gl_buffer_object
*buffer_obj
,
600 uint32_t *out_offset
, unsigned num_vector_components
,
601 unsigned stride_dwords
, unsigned offset_dwords
)
603 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
604 uint32_t offset_bytes
= 4 * offset_dwords
;
605 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
607 buffer_obj
->Size
- offset_bytes
);
608 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
610 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
611 size_t size_dwords
= buffer_obj
->Size
/ 4;
612 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
614 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
615 * too big to map using a single binding table entry?
617 assert((size_dwords
- offset_dwords
) / stride_dwords
618 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
620 if (size_dwords
> offset_dwords
+ num_vector_components
) {
621 /* There is room for at least 1 transform feedback output in the buffer.
622 * Compute the number of additional transform feedback outputs the
623 * buffer has room for.
625 buffer_size_minus_1
=
626 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
628 /* There isn't even room for a single transform feedback output in the
629 * buffer. We can't configure the binding table entry to prevent output
630 * entirely; we'll have to rely on the geometry shader to detect
631 * overflow. But to minimize the damage in case of a bug, set up the
632 * binding table entry to just allow a single output.
634 buffer_size_minus_1
= 0;
636 width
= buffer_size_minus_1
& 0x7f;
637 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
638 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
640 switch (num_vector_components
) {
642 surface_format
= BRW_SURFACEFORMAT_R32_FLOAT
;
645 surface_format
= BRW_SURFACEFORMAT_R32G32_FLOAT
;
648 surface_format
= BRW_SURFACEFORMAT_R32G32B32_FLOAT
;
651 surface_format
= BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
654 unreachable("Invalid vector size for transform feedback output");
657 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
658 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
659 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
660 BRW_SURFACE_RC_READ_WRITE
;
661 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
662 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
663 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
664 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
665 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
669 /* Emit relocation to surface contents. */
670 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
673 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
676 /* Creates a new WM constant buffer reflecting the current fragment program's
677 * constants, if needed by the fragment program.
679 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
683 brw_upload_wm_pull_constants(struct brw_context
*brw
)
685 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
686 /* BRW_NEW_FRAGMENT_PROGRAM */
687 struct brw_fragment_program
*fp
=
688 (struct brw_fragment_program
*) brw
->fragment_program
;
689 /* BRW_NEW_FS_PROG_DATA */
690 struct brw_stage_prog_data
*prog_data
= &brw
->wm
.prog_data
->base
;
692 /* _NEW_PROGRAM_CONSTANTS */
693 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
.Base
,
694 stage_state
, prog_data
);
697 const struct brw_tracked_state brw_wm_pull_constants
= {
699 .mesa
= _NEW_PROGRAM_CONSTANTS
,
700 .brw
= BRW_NEW_BATCH
|
702 BRW_NEW_FRAGMENT_PROGRAM
|
703 BRW_NEW_FS_PROG_DATA
,
705 .emit
= brw_upload_wm_pull_constants
,
709 * Creates a null renderbuffer surface.
711 * This is used when the shader doesn't write to any color output. An FB
712 * write to target 0 will still be emitted, because that's how the thread is
713 * terminated (and computed depth is returned), so we need to have the
714 * hardware discard the target 0 color output..
717 brw_emit_null_surface_state(struct brw_context
*brw
,
721 uint32_t *out_offset
)
723 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
726 * A null surface will be used in instances where an actual surface is
727 * not bound. When a write message is generated to a null surface, no
728 * actual surface is written to. When a read message (including any
729 * sampling engine message) is generated to a null surface, the result
730 * is all zeros. Note that a null surface type is allowed to be used
731 * with all messages, even if it is not specificially indicated as
732 * supported. All of the remaining fields in surface state are ignored
733 * for null surfaces, with the following exceptions:
735 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
736 * depth buffer’s corresponding state for all render target surfaces,
739 * - Surface Format must be R8G8B8A8_UNORM.
741 unsigned surface_type
= BRW_SURFACE_NULL
;
742 drm_intel_bo
*bo
= NULL
;
743 unsigned pitch_minus_1
= 0;
744 uint32_t multisampling_state
= 0;
745 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
749 /* On Gen6, null render targets seem to cause GPU hangs when
750 * multisampling. So work around this problem by rendering into dummy
753 * To decrease the amount of memory needed by the workaround buffer, we
754 * set its pitch to 128 bytes (the width of a Y tile). This means that
755 * the amount of memory needed for the workaround buffer is
756 * (width_in_tiles + height_in_tiles - 1) tiles.
758 * Note that since the workaround buffer will be interpreted by the
759 * hardware as an interleaved multisampled buffer, we need to compute
760 * width_in_tiles and height_in_tiles by dividing the width and height
761 * by 16 rather than the normal Y-tile size of 32.
763 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
764 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
765 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
766 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
768 bo
= brw
->wm
.multisampled_null_render_target_bo
;
769 surface_type
= BRW_SURFACE_2D
;
771 multisampling_state
= brw_get_surface_num_multisamples(samples
);
774 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
775 BRW_SURFACEFORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
777 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
778 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
779 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
780 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
782 surf
[1] = bo
? bo
->offset64
: 0;
783 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
784 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
786 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
789 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
791 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
792 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
793 surf
[4] = multisampling_state
;
797 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
800 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
805 * Sets up a surface state structure to point at the given region.
806 * While it is only used for the front/back buffer currently, it should be
807 * usable for further buffers when doing ARB_draw_buffer support.
810 gen4_update_renderbuffer_surface(struct brw_context
*brw
,
811 struct gl_renderbuffer
*rb
,
812 bool layered
, unsigned unit
,
815 struct gl_context
*ctx
= &brw
->ctx
;
816 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
817 struct intel_mipmap_tree
*mt
= irb
->mt
;
819 uint32_t tile_x
, tile_y
;
823 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
824 /* BRW_NEW_FS_PROG_DATA */
828 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
829 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
831 if (tile_x
!= 0 || tile_y
!= 0) {
832 /* Original gen4 hardware couldn't draw to a non-tile-aligned
833 * destination in a miptree unless you actually setup your renderbuffer
834 * as a miptree and used the fragile lod/array_index/etc. controls to
835 * select the image. So, instead, we just make a new single-level
836 * miptree and render into that.
838 intel_renderbuffer_move_to_temp(brw
, irb
, false);
843 intel_miptree_used_for_rendering(irb
->mt
);
845 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32, &offset
);
847 format
= brw
->render_target_format
[rb_format
];
848 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
849 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
850 __func__
, _mesa_get_format_name(rb_format
));
853 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
854 format
<< BRW_SURFACE_FORMAT_SHIFT
);
857 assert(mt
->offset
% mt
->cpp
== 0);
858 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
859 mt
->bo
->offset64
+ mt
->offset
);
861 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
862 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
864 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
865 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
867 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
869 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
870 /* Note that the low bits of these fields are missing, so
871 * there's the possibility of getting in trouble.
873 assert(tile_x
% 4 == 0);
874 assert(tile_y
% 2 == 0);
875 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
876 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
877 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
881 if (!ctx
->Color
.ColorLogicOpEnabled
&&
882 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
883 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
885 if (!ctx
->Color
.ColorMask
[unit
][0])
886 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
887 if (!ctx
->Color
.ColorMask
[unit
][1])
888 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
889 if (!ctx
->Color
.ColorMask
[unit
][2])
890 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
892 /* As mentioned above, disable writes to the alpha component when the
893 * renderbuffer is XRGB.
895 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
896 !ctx
->Color
.ColorMask
[unit
][3]) {
897 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
901 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
904 surf
[1] - mt
->bo
->offset64
,
905 I915_GEM_DOMAIN_RENDER
,
906 I915_GEM_DOMAIN_RENDER
);
912 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
915 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
916 const struct gl_framebuffer
*fb
,
917 uint32_t render_target_start
,
918 uint32_t *surf_offset
)
921 const unsigned int w
= _mesa_geometric_width(fb
);
922 const unsigned int h
= _mesa_geometric_height(fb
);
923 const unsigned int s
= _mesa_geometric_samples(fb
);
925 /* Update surfaces for drawing buffers */
926 if (fb
->_NumColorDrawBuffers
>= 1) {
927 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
928 const uint32_t surf_index
= render_target_start
+ i
;
930 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
931 surf_offset
[surf_index
] =
932 brw
->vtbl
.update_renderbuffer_surface(
933 brw
, fb
->_ColorDrawBuffers
[i
],
934 _mesa_geometric_layers(fb
) > 0, i
, surf_index
);
936 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
937 &surf_offset
[surf_index
]);
941 const uint32_t surf_index
= render_target_start
;
942 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
943 &surf_offset
[surf_index
]);
948 update_renderbuffer_surfaces(struct brw_context
*brw
)
950 const struct gl_context
*ctx
= &brw
->ctx
;
952 /* _NEW_BUFFERS | _NEW_COLOR */
953 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
954 brw_update_renderbuffer_surfaces(
956 brw
->wm
.prog_data
->binding_table
.render_target_start
,
957 brw
->wm
.base
.surf_offset
);
958 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
961 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
963 .mesa
= _NEW_BUFFERS
|
965 .brw
= BRW_NEW_BATCH
|
967 BRW_NEW_FS_PROG_DATA
,
969 .emit
= update_renderbuffer_surfaces
,
972 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
974 .mesa
= _NEW_BUFFERS
,
975 .brw
= BRW_NEW_BATCH
|
978 .emit
= update_renderbuffer_surfaces
,
983 update_stage_texture_surfaces(struct brw_context
*brw
,
984 const struct gl_program
*prog
,
985 struct brw_stage_state
*stage_state
,
986 bool for_gather
, uint32_t plane
)
991 struct gl_context
*ctx
= &brw
->ctx
;
993 uint32_t *surf_offset
= stage_state
->surf_offset
;
995 /* BRW_NEW_*_PROG_DATA */
997 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
999 surf_offset
+= stage_state
->prog_data
->binding_table
.plane_start
[plane
];
1001 unsigned num_samplers
= _mesa_fls(prog
->SamplersUsed
);
1002 for (unsigned s
= 0; s
< num_samplers
; s
++) {
1005 if (prog
->SamplersUsed
& (1 << s
)) {
1006 const unsigned unit
= prog
->SamplerUnits
[s
];
1009 if (ctx
->Texture
.Unit
[unit
]._Current
) {
1010 brw
->vtbl
.update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
, plane
);
1018 * Construct SURFACE_STATE objects for enabled textures.
1021 brw_update_texture_surfaces(struct brw_context
*brw
)
1023 /* BRW_NEW_VERTEX_PROGRAM */
1024 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
1026 /* BRW_NEW_TESS_PROGRAMS */
1027 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
1028 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
1030 /* BRW_NEW_GEOMETRY_PROGRAM */
1031 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
1033 /* BRW_NEW_FRAGMENT_PROGRAM */
1034 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
1037 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false, 0);
1038 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false, 0);
1039 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false, 0);
1040 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false, 0);
1041 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 0);
1043 /* emit alternate set of surface state for gather. this
1044 * allows the surface format to be overriden for only the
1045 * gather4 messages. */
1047 if (vs
&& vs
->UsesGather
)
1048 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true, 0);
1049 if (tcs
&& tcs
->UsesGather
)
1050 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true, 0);
1051 if (tes
&& tes
->UsesGather
)
1052 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true, 0);
1053 if (gs
&& gs
->UsesGather
)
1054 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true, 0);
1055 if (fs
&& fs
->UsesGather
)
1056 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true, 0);
1060 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 1);
1061 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false, 2);
1064 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1067 const struct brw_tracked_state brw_texture_surfaces
= {
1069 .mesa
= _NEW_TEXTURE
,
1070 .brw
= BRW_NEW_BATCH
|
1072 BRW_NEW_FRAGMENT_PROGRAM
|
1073 BRW_NEW_FS_PROG_DATA
|
1074 BRW_NEW_GEOMETRY_PROGRAM
|
1075 BRW_NEW_GS_PROG_DATA
|
1076 BRW_NEW_TESS_PROGRAMS
|
1077 BRW_NEW_TCS_PROG_DATA
|
1078 BRW_NEW_TES_PROG_DATA
|
1079 BRW_NEW_TEXTURE_BUFFER
|
1080 BRW_NEW_VERTEX_PROGRAM
|
1081 BRW_NEW_VS_PROG_DATA
,
1083 .emit
= brw_update_texture_surfaces
,
1087 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
1089 /* BRW_NEW_COMPUTE_PROGRAM */
1090 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
1093 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false, 0);
1095 /* emit alternate set of surface state for gather. this
1096 * allows the surface format to be overriden for only the
1100 if (cs
&& cs
->UsesGather
)
1101 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true, 0);
1104 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1107 const struct brw_tracked_state brw_cs_texture_surfaces
= {
1109 .mesa
= _NEW_TEXTURE
,
1110 .brw
= BRW_NEW_BATCH
|
1112 BRW_NEW_COMPUTE_PROGRAM
,
1114 .emit
= brw_update_cs_texture_surfaces
,
1119 brw_upload_ubo_surfaces(struct brw_context
*brw
,
1120 struct gl_linked_shader
*shader
,
1121 struct brw_stage_state
*stage_state
,
1122 struct brw_stage_prog_data
*prog_data
)
1124 struct gl_context
*ctx
= &brw
->ctx
;
1129 uint32_t *ubo_surf_offsets
=
1130 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
1132 for (int i
= 0; i
< shader
->NumUniformBlocks
; i
++) {
1133 struct gl_uniform_buffer_binding
*binding
=
1134 &ctx
->UniformBufferBindings
[shader
->UniformBlocks
[i
]->Binding
];
1136 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1137 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
1139 struct intel_buffer_object
*intel_bo
=
1140 intel_buffer_object(binding
->BufferObject
);
1141 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1142 if (!binding
->AutomaticSize
)
1143 size
= MIN2(size
, binding
->Size
);
1145 intel_bufferobj_buffer(brw
, intel_bo
,
1148 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
1150 &ubo_surf_offsets
[i
]);
1154 uint32_t *ssbo_surf_offsets
=
1155 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
1157 for (int i
= 0; i
< shader
->NumShaderStorageBlocks
; i
++) {
1158 struct gl_shader_storage_buffer_binding
*binding
=
1159 &ctx
->ShaderStorageBufferBindings
[shader
->ShaderStorageBlocks
[i
]->Binding
];
1161 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1162 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1164 struct intel_buffer_object
*intel_bo
=
1165 intel_buffer_object(binding
->BufferObject
);
1166 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1167 if (!binding
->AutomaticSize
)
1168 size
= MIN2(size
, binding
->Size
);
1170 intel_bufferobj_buffer(brw
, intel_bo
,
1173 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1175 &ssbo_surf_offsets
[i
]);
1179 if (shader
->NumUniformBlocks
|| shader
->NumShaderStorageBlocks
)
1180 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1184 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1186 struct gl_context
*ctx
= &brw
->ctx
;
1188 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1193 /* BRW_NEW_FS_PROG_DATA */
1194 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1195 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1198 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1200 .mesa
= _NEW_PROGRAM
,
1201 .brw
= BRW_NEW_BATCH
|
1203 BRW_NEW_FS_PROG_DATA
|
1204 BRW_NEW_UNIFORM_BUFFER
,
1206 .emit
= brw_upload_wm_ubo_surfaces
,
1210 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1212 struct gl_context
*ctx
= &brw
->ctx
;
1214 struct gl_shader_program
*prog
=
1215 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1220 /* BRW_NEW_CS_PROG_DATA */
1221 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1222 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1225 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1227 .mesa
= _NEW_PROGRAM
,
1228 .brw
= BRW_NEW_BATCH
|
1230 BRW_NEW_CS_PROG_DATA
|
1231 BRW_NEW_UNIFORM_BUFFER
,
1233 .emit
= brw_upload_cs_ubo_surfaces
,
1237 brw_upload_abo_surfaces(struct brw_context
*brw
,
1238 struct gl_linked_shader
*shader
,
1239 struct brw_stage_state
*stage_state
,
1240 struct brw_stage_prog_data
*prog_data
)
1242 struct gl_context
*ctx
= &brw
->ctx
;
1243 uint32_t *surf_offsets
=
1244 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1246 if (shader
&& shader
->NumAtomicBuffers
) {
1247 for (unsigned i
= 0; i
< shader
->NumAtomicBuffers
; i
++) {
1248 struct gl_atomic_buffer_binding
*binding
=
1249 &ctx
->AtomicBufferBindings
[shader
->AtomicBuffers
[i
]->Binding
];
1250 struct intel_buffer_object
*intel_bo
=
1251 intel_buffer_object(binding
->BufferObject
);
1252 drm_intel_bo
*bo
= intel_bufferobj_buffer(
1253 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1255 brw_emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1256 binding
->Offset
, BRW_SURFACEFORMAT_RAW
,
1257 bo
->size
- binding
->Offset
, 1, true);
1260 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1265 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1267 struct gl_context
*ctx
= &brw
->ctx
;
1269 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1272 /* BRW_NEW_FS_PROG_DATA */
1273 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1274 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1278 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1280 .mesa
= _NEW_PROGRAM
,
1281 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1284 BRW_NEW_FS_PROG_DATA
,
1286 .emit
= brw_upload_wm_abo_surfaces
,
1290 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1292 struct gl_context
*ctx
= &brw
->ctx
;
1294 struct gl_shader_program
*prog
=
1295 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1298 /* BRW_NEW_CS_PROG_DATA */
1299 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1300 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1304 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1306 .mesa
= _NEW_PROGRAM
,
1307 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1310 BRW_NEW_CS_PROG_DATA
,
1312 .emit
= brw_upload_cs_abo_surfaces
,
1316 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1318 struct gl_context
*ctx
= &brw
->ctx
;
1320 struct gl_shader_program
*prog
=
1321 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1324 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1325 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1326 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1330 const struct brw_tracked_state brw_cs_image_surfaces
= {
1332 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1333 .brw
= BRW_NEW_BATCH
|
1335 BRW_NEW_CS_PROG_DATA
|
1338 .emit
= brw_upload_cs_image_surfaces
,
1342 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1344 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
1345 uint32_t hw_format
= brw_format_for_mesa_format(format
);
1346 if (access
== GL_WRITE_ONLY
) {
1348 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1349 /* Typed surface reads support a very limited subset of the shader
1350 * image formats. Translate it into the closest format the
1351 * hardware supports.
1353 return isl_lower_storage_image_format(devinfo
, hw_format
);
1355 /* The hardware doesn't actually support a typed format that we can use
1356 * so we have to fall back to untyped read/write messages.
1358 return BRW_SURFACEFORMAT_RAW
;
1363 update_default_image_param(struct brw_context
*brw
,
1364 struct gl_image_unit
*u
,
1365 unsigned surface_idx
,
1366 struct brw_image_param
*param
)
1368 memset(param
, 0, sizeof(*param
));
1369 param
->surface_idx
= surface_idx
;
1370 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1371 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1372 * detailed explanation of these parameters.
1374 param
->swizzling
[0] = 0xff;
1375 param
->swizzling
[1] = 0xff;
1379 update_buffer_image_param(struct brw_context
*brw
,
1380 struct gl_image_unit
*u
,
1381 unsigned surface_idx
,
1382 struct brw_image_param
*param
)
1384 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1386 update_default_image_param(brw
, u
, surface_idx
, param
);
1388 param
->size
[0] = obj
->Size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1389 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1393 update_texture_image_param(struct brw_context
*brw
,
1394 struct gl_image_unit
*u
,
1395 unsigned surface_idx
,
1396 struct brw_image_param
*param
)
1398 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1400 update_default_image_param(brw
, u
, surface_idx
, param
);
1402 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1403 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1404 param
->size
[2] = (!u
->Layered
? 1 :
1405 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1406 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1407 minify(mt
->logical_depth0
, u
->Level
) :
1408 mt
->logical_depth0
);
1410 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1414 param
->stride
[0] = mt
->cpp
;
1415 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1417 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1419 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1421 if (mt
->tiling
== I915_TILING_X
) {
1422 /* An X tile is a rectangular block of 512x8 bytes. */
1423 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1424 param
->tiling
[1] = _mesa_logbase2(8);
1426 if (brw
->has_swizzling
) {
1427 /* Right shifts required to swizzle bits 9 and 10 of the memory
1428 * address with bit 6.
1430 param
->swizzling
[0] = 3;
1431 param
->swizzling
[1] = 4;
1433 } else if (mt
->tiling
== I915_TILING_Y
) {
1434 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1435 * different to the layout of an X-tiled surface, we simply pretend that
1436 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1437 * one arranged in X-major order just like is the case for X-tiling.
1439 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1440 param
->tiling
[1] = _mesa_logbase2(32);
1442 if (brw
->has_swizzling
) {
1443 /* Right shift required to swizzle bit 9 of the memory address with
1446 param
->swizzling
[0] = 3;
1450 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1451 * address calculation algorithm (emit_address_calculation() in
1452 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1453 * modulus equal to the LOD.
1455 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1460 update_image_surface(struct brw_context
*brw
,
1461 struct gl_image_unit
*u
,
1463 unsigned surface_idx
,
1464 uint32_t *surf_offset
,
1465 struct brw_image_param
*param
)
1467 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1468 struct gl_texture_object
*obj
= u
->TexObj
;
1469 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1471 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1472 struct intel_buffer_object
*intel_obj
=
1473 intel_buffer_object(obj
->BufferObject
);
1474 const unsigned texel_size
= (format
== BRW_SURFACEFORMAT_RAW
? 1 :
1475 _mesa_get_format_bytes(u
->_ActualFormat
));
1477 brw_emit_buffer_surface_state(
1478 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1479 format
, intel_obj
->Base
.Size
, texel_size
,
1480 access
!= GL_READ_ONLY
);
1482 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1485 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1486 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1488 if (format
== BRW_SURFACEFORMAT_RAW
) {
1489 brw_emit_buffer_surface_state(
1490 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1491 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1492 access
!= GL_READ_ONLY
);
1495 const unsigned num_layers
= (!u
->Layered
? 1 :
1496 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1497 mt
->logical_depth0
);
1499 struct isl_view view
= {
1501 .base_level
= obj
->MinLevel
+ u
->Level
,
1503 .base_array_layer
= obj
->MinLayer
+ u
->_Layer
,
1504 .array_len
= num_layers
,
1506 ISL_CHANNEL_SELECT_RED
,
1507 ISL_CHANNEL_SELECT_GREEN
,
1508 ISL_CHANNEL_SELECT_BLUE
,
1509 ISL_CHANNEL_SELECT_ALPHA
,
1511 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
1514 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1516 brw_emit_surface_state(brw
, mt
, &view
,
1517 surface_state_infos
[brw
->gen
].rb_mocs
, false,
1518 surf_offset
, surf_index
,
1519 I915_GEM_DOMAIN_SAMPLER
,
1520 access
== GL_READ_ONLY
? 0 :
1521 I915_GEM_DOMAIN_SAMPLER
);
1524 update_texture_image_param(brw
, u
, surface_idx
, param
);
1528 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1529 update_default_image_param(brw
, u
, surface_idx
, param
);
1534 brw_upload_image_surfaces(struct brw_context
*brw
,
1535 struct gl_linked_shader
*shader
,
1536 struct brw_stage_state
*stage_state
,
1537 struct brw_stage_prog_data
*prog_data
)
1539 struct gl_context
*ctx
= &brw
->ctx
;
1541 if (shader
&& shader
->NumImages
) {
1542 for (unsigned i
= 0; i
< shader
->NumImages
; i
++) {
1543 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[i
]];
1544 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1546 update_image_surface(brw
, u
, shader
->ImageAccess
[i
],
1548 &stage_state
->surf_offset
[surf_idx
],
1549 &prog_data
->image_param
[i
]);
1552 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1553 /* This may have changed the image metadata dependent on the context
1554 * image unit state and passed to the program as uniforms, make sure
1555 * that push and pull constants are reuploaded.
1557 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1562 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1564 struct gl_context
*ctx
= &brw
->ctx
;
1565 /* BRW_NEW_FRAGMENT_PROGRAM */
1566 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1569 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1570 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1571 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1575 const struct brw_tracked_state brw_wm_image_surfaces
= {
1577 .mesa
= _NEW_TEXTURE
,
1578 .brw
= BRW_NEW_BATCH
|
1580 BRW_NEW_FRAGMENT_PROGRAM
|
1581 BRW_NEW_FS_PROG_DATA
|
1584 .emit
= brw_upload_wm_image_surfaces
,
1588 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1590 brw
->vtbl
.update_texture_surface
= brw_update_texture_surface
;
1591 brw
->vtbl
.update_renderbuffer_surface
= gen4_update_renderbuffer_surface
;
1592 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1596 gen6_init_vtable_surface_functions(struct brw_context
*brw
)
1598 gen4_init_vtable_surface_functions(brw
);
1599 brw
->vtbl
.update_renderbuffer_surface
= brw_update_renderbuffer_surface
;
1603 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1605 struct gl_context
*ctx
= &brw
->ctx
;
1607 struct gl_shader_program
*prog
=
1608 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1610 if (prog
&& brw
->cs
.prog_data
->uses_num_work_groups
) {
1611 const unsigned surf_idx
=
1612 brw
->cs
.prog_data
->binding_table
.work_groups_start
;
1613 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1617 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1619 intel_upload_data(brw
,
1620 (void *)brw
->compute
.num_work_groups
,
1626 bo
= brw
->compute
.num_work_groups_bo
;
1627 bo_offset
= brw
->compute
.num_work_groups_offset
;
1630 brw_emit_buffer_surface_state(brw
, surf_offset
,
1632 BRW_SURFACEFORMAT_RAW
,
1633 3 * sizeof(GLuint
), 1, true);
1634 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1638 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1640 .brw
= BRW_NEW_BLORP
|
1641 BRW_NEW_CS_WORK_GROUPS
1643 .emit
= brw_upload_cs_work_groups_surface
,