2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "main/context.h"
34 #include "main/blend.h"
35 #include "main/mtypes.h"
36 #include "main/samplerobj.h"
37 #include "main/shaderimage.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_instruction.h"
40 #include "main/framebuffer.h"
44 #include "intel_mipmap_tree.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_tex.h"
47 #include "intel_fbo.h"
48 #include "intel_buffer_objects.h"
50 #include "brw_context.h"
51 #include "brw_state.h"
52 #include "brw_defines.h"
56 translate_tex_target(GLenum target
)
60 case GL_TEXTURE_1D_ARRAY_EXT
:
61 return BRW_SURFACE_1D
;
63 case GL_TEXTURE_RECTANGLE_NV
:
64 return BRW_SURFACE_2D
;
67 case GL_TEXTURE_2D_ARRAY_EXT
:
68 case GL_TEXTURE_EXTERNAL_OES
:
69 case GL_TEXTURE_2D_MULTISAMPLE
:
70 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY
:
71 return BRW_SURFACE_2D
;
74 return BRW_SURFACE_3D
;
76 case GL_TEXTURE_CUBE_MAP
:
77 case GL_TEXTURE_CUBE_MAP_ARRAY
:
78 return BRW_SURFACE_CUBE
;
81 unreachable("not reached");
86 brw_get_surface_tiling_bits(uint32_t tiling
)
90 return BRW_SURFACE_TILED
;
92 return BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
;
100 brw_get_surface_num_multisamples(unsigned num_samples
)
103 return BRW_SURFACE_MULTISAMPLECOUNT_4
;
105 return BRW_SURFACE_MULTISAMPLECOUNT_1
;
109 brw_configure_w_tiled(const struct intel_mipmap_tree
*mt
,
110 bool is_render_target
,
111 unsigned *width
, unsigned *height
,
112 unsigned *pitch
, uint32_t *tiling
, unsigned *format
)
114 static const unsigned halign_stencil
= 8;
116 /* In Y-tiling row is twice as wide as in W-tiling, and subsequently
117 * there are half as many rows.
118 * In addition, mip-levels are accessed manually by the program and
119 * therefore the surface is setup to cover all the mip-levels for one slice.
120 * (Hardware is still used to access individual slices).
122 *tiling
= I915_TILING_Y
;
123 *pitch
= mt
->pitch
* 2;
124 *width
= ALIGN(mt
->total_width
, halign_stencil
) * 2;
125 *height
= (mt
->total_height
/ mt
->physical_depth0
) / 2;
127 if (is_render_target
) {
128 *format
= BRW_SURFACEFORMAT_R8_UINT
;
134 * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
138 brw_get_texture_swizzle(const struct gl_context
*ctx
,
139 const struct gl_texture_object
*t
)
141 const struct gl_texture_image
*img
= t
->Image
[0][t
->BaseLevel
];
143 int swizzles
[SWIZZLE_NIL
+ 1] = {
153 if (img
->_BaseFormat
== GL_DEPTH_COMPONENT
||
154 img
->_BaseFormat
== GL_DEPTH_STENCIL
) {
155 GLenum depth_mode
= t
->DepthMode
;
157 /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
158 * with depth component data specified with a sized internal format.
159 * Otherwise, it's left at the old default, GL_LUMINANCE.
161 if (_mesa_is_gles3(ctx
) &&
162 img
->InternalFormat
!= GL_DEPTH_COMPONENT
&&
163 img
->InternalFormat
!= GL_DEPTH_STENCIL
) {
167 switch (depth_mode
) {
169 swizzles
[0] = SWIZZLE_ZERO
;
170 swizzles
[1] = SWIZZLE_ZERO
;
171 swizzles
[2] = SWIZZLE_ZERO
;
172 swizzles
[3] = SWIZZLE_X
;
175 swizzles
[0] = SWIZZLE_X
;
176 swizzles
[1] = SWIZZLE_X
;
177 swizzles
[2] = SWIZZLE_X
;
178 swizzles
[3] = SWIZZLE_ONE
;
181 swizzles
[0] = SWIZZLE_X
;
182 swizzles
[1] = SWIZZLE_X
;
183 swizzles
[2] = SWIZZLE_X
;
184 swizzles
[3] = SWIZZLE_X
;
187 swizzles
[0] = SWIZZLE_X
;
188 swizzles
[1] = SWIZZLE_ZERO
;
189 swizzles
[2] = SWIZZLE_ZERO
;
190 swizzles
[3] = SWIZZLE_ONE
;
195 GLenum datatype
= _mesa_get_format_datatype(img
->TexFormat
);
197 /* If the texture's format is alpha-only, force R, G, and B to
198 * 0.0. Similarly, if the texture's format has no alpha channel,
199 * force the alpha value read to 1.0. This allows for the
200 * implementation to use an RGBA texture for any of these formats
201 * without leaking any unexpected values.
203 switch (img
->_BaseFormat
) {
205 swizzles
[0] = SWIZZLE_ZERO
;
206 swizzles
[1] = SWIZZLE_ZERO
;
207 swizzles
[2] = SWIZZLE_ZERO
;
210 if (t
->_IsIntegerFormat
|| datatype
== GL_SIGNED_NORMALIZED
) {
211 swizzles
[0] = SWIZZLE_X
;
212 swizzles
[1] = SWIZZLE_X
;
213 swizzles
[2] = SWIZZLE_X
;
214 swizzles
[3] = SWIZZLE_ONE
;
217 case GL_LUMINANCE_ALPHA
:
218 if (datatype
== GL_SIGNED_NORMALIZED
) {
219 swizzles
[0] = SWIZZLE_X
;
220 swizzles
[1] = SWIZZLE_X
;
221 swizzles
[2] = SWIZZLE_X
;
222 swizzles
[3] = SWIZZLE_W
;
226 if (datatype
== GL_SIGNED_NORMALIZED
) {
227 swizzles
[0] = SWIZZLE_X
;
228 swizzles
[1] = SWIZZLE_X
;
229 swizzles
[2] = SWIZZLE_X
;
230 swizzles
[3] = SWIZZLE_X
;
236 if (_mesa_get_format_bits(img
->TexFormat
, GL_ALPHA_BITS
) > 0)
237 swizzles
[3] = SWIZZLE_ONE
;
241 return MAKE_SWIZZLE4(swizzles
[GET_SWZ(t
->_Swizzle
, 0)],
242 swizzles
[GET_SWZ(t
->_Swizzle
, 1)],
243 swizzles
[GET_SWZ(t
->_Swizzle
, 2)],
244 swizzles
[GET_SWZ(t
->_Swizzle
, 3)]);
248 gen4_emit_buffer_surface_state(struct brw_context
*brw
,
249 uint32_t *out_offset
,
251 unsigned buffer_offset
,
252 unsigned surface_format
,
253 unsigned buffer_size
,
257 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
258 6 * 4, 32, out_offset
);
259 memset(surf
, 0, 6 * 4);
261 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
262 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
263 (brw
->gen
>= 6 ? BRW_SURFACE_RC_READ_WRITE
: 0);
264 surf
[1] = (bo
? bo
->offset64
: 0) + buffer_offset
; /* reloc */
265 surf
[2] = (buffer_size
& 0x7f) << BRW_SURFACE_WIDTH_SHIFT
|
266 ((buffer_size
>> 7) & 0x1fff) << BRW_SURFACE_HEIGHT_SHIFT
;
267 surf
[3] = ((buffer_size
>> 20) & 0x7f) << BRW_SURFACE_DEPTH_SHIFT
|
268 (pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
;
270 /* Emit relocation to surface contents. The 965 PRM, Volume 4, section
271 * 5.1.2 "Data Cache" says: "the data cache does not exist as a separate
272 * physical cache. It is mapped in hardware to the sampler cache."
275 drm_intel_bo_emit_reloc(brw
->batch
.bo
, *out_offset
+ 4,
277 I915_GEM_DOMAIN_SAMPLER
,
278 (rw
? I915_GEM_DOMAIN_SAMPLER
: 0));
283 brw_update_buffer_texture_surface(struct gl_context
*ctx
,
285 uint32_t *surf_offset
)
287 struct brw_context
*brw
= brw_context(ctx
);
288 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
289 struct intel_buffer_object
*intel_obj
=
290 intel_buffer_object(tObj
->BufferObject
);
291 uint32_t size
= tObj
->BufferSize
;
292 drm_intel_bo
*bo
= NULL
;
293 mesa_format format
= tObj
->_BufferObjectFormat
;
294 uint32_t brw_format
= brw_format_for_mesa_format(format
);
295 int texel_size
= _mesa_get_format_bytes(format
);
298 size
= MIN2(size
, intel_obj
->Base
.Size
);
299 bo
= intel_bufferobj_buffer(brw
, intel_obj
, tObj
->BufferOffset
, size
);
302 if (brw_format
== 0 && format
!= MESA_FORMAT_RGBA_FLOAT32
) {
303 _mesa_problem(NULL
, "bad format %s for texture buffer\n",
304 _mesa_get_format_name(format
));
307 brw
->vtbl
.emit_buffer_surface_state(brw
, surf_offset
, bo
,
316 brw_update_texture_surface(struct gl_context
*ctx
,
318 uint32_t *surf_offset
,
321 struct brw_context
*brw
= brw_context(ctx
);
322 struct gl_texture_object
*tObj
= ctx
->Texture
.Unit
[unit
]._Current
;
323 struct intel_texture_object
*intelObj
= intel_texture_object(tObj
);
324 struct intel_mipmap_tree
*mt
= intelObj
->mt
;
325 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, unit
);
328 /* BRW_NEW_TEXTURE_BUFFER */
329 if (tObj
->Target
== GL_TEXTURE_BUFFER
) {
330 brw_update_buffer_texture_surface(ctx
, unit
, surf_offset
);
334 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
335 6 * 4, 32, surf_offset
);
337 uint32_t tex_format
= translate_tex_format(brw
, mt
->format
,
338 sampler
->sRGBDecode
);
341 /* Sandybridge's gather4 message is broken for integer formats.
342 * To work around this, we pretend the surface is UNORM for
343 * 8 or 16-bit formats, and emit shader instructions to recover
344 * the real INT/UINT value. For 32-bit formats, we pretend
345 * the surface is FLOAT, and simply reinterpret the resulting
348 switch (tex_format
) {
349 case BRW_SURFACEFORMAT_R8_SINT
:
350 case BRW_SURFACEFORMAT_R8_UINT
:
351 tex_format
= BRW_SURFACEFORMAT_R8_UNORM
;
354 case BRW_SURFACEFORMAT_R16_SINT
:
355 case BRW_SURFACEFORMAT_R16_UINT
:
356 tex_format
= BRW_SURFACEFORMAT_R16_UNORM
;
359 case BRW_SURFACEFORMAT_R32_SINT
:
360 case BRW_SURFACEFORMAT_R32_UINT
:
361 tex_format
= BRW_SURFACEFORMAT_R32_FLOAT
;
369 surf
[0] = (translate_tex_target(tObj
->Target
) << BRW_SURFACE_TYPE_SHIFT
|
370 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
371 BRW_SURFACE_CUBEFACE_ENABLES
|
372 tex_format
<< BRW_SURFACE_FORMAT_SHIFT
);
374 surf
[1] = mt
->bo
->offset64
+ mt
->offset
; /* reloc */
376 surf
[2] = ((intelObj
->_MaxLevel
- tObj
->BaseLevel
) << BRW_SURFACE_LOD_SHIFT
|
377 (mt
->logical_width0
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
378 (mt
->logical_height0
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
380 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
381 (mt
->logical_depth0
- 1) << BRW_SURFACE_DEPTH_SHIFT
|
382 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
384 surf
[4] = (brw_get_surface_num_multisamples(mt
->num_samples
) |
385 SET_FIELD(tObj
->BaseLevel
- mt
->first_level
, BRW_SURFACE_MIN_LOD
));
387 surf
[5] = mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0;
389 /* Emit relocation to surface contents */
390 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
393 surf
[1] - mt
->bo
->offset64
,
394 I915_GEM_DOMAIN_SAMPLER
, 0);
398 * Create the constant buffer surface. Vertex/fragment shader constants will be
399 * read from this buffer with Data Port Read instructions/messages.
402 brw_create_constant_surface(struct brw_context
*brw
,
406 uint32_t *out_offset
)
408 brw
->vtbl
.emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
409 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
,
414 * Create the buffer surface. Shader buffer variables will be
415 * read from / write to this buffer with Data Port Read/Write
416 * instructions/messages.
419 brw_create_buffer_surface(struct brw_context
*brw
,
423 uint32_t *out_offset
)
425 /* Use a raw surface so we can reuse existing untyped read/write/atomic
426 * messages. We need these specifically for the fragment shader since they
427 * include a pixel mask header that we need to ensure correct behavior
428 * with helper invocations, which cannot write to the buffer.
430 brw
->vtbl
.emit_buffer_surface_state(brw
, out_offset
, bo
, offset
,
431 BRW_SURFACEFORMAT_RAW
,
436 * Set up a binding table entry for use by stream output logic (transform
439 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
442 brw_update_sol_surface(struct brw_context
*brw
,
443 struct gl_buffer_object
*buffer_obj
,
444 uint32_t *out_offset
, unsigned num_vector_components
,
445 unsigned stride_dwords
, unsigned offset_dwords
)
447 struct intel_buffer_object
*intel_bo
= intel_buffer_object(buffer_obj
);
448 uint32_t offset_bytes
= 4 * offset_dwords
;
449 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_bo
,
451 buffer_obj
->Size
- offset_bytes
);
452 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
454 uint32_t pitch_minus_1
= 4*stride_dwords
- 1;
455 size_t size_dwords
= buffer_obj
->Size
/ 4;
456 uint32_t buffer_size_minus_1
, width
, height
, depth
, surface_format
;
458 /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
459 * too big to map using a single binding table entry?
461 assert((size_dwords
- offset_dwords
) / stride_dwords
462 <= BRW_MAX_NUM_BUFFER_ENTRIES
);
464 if (size_dwords
> offset_dwords
+ num_vector_components
) {
465 /* There is room for at least 1 transform feedback output in the buffer.
466 * Compute the number of additional transform feedback outputs the
467 * buffer has room for.
469 buffer_size_minus_1
=
470 (size_dwords
- offset_dwords
- num_vector_components
) / stride_dwords
;
472 /* There isn't even room for a single transform feedback output in the
473 * buffer. We can't configure the binding table entry to prevent output
474 * entirely; we'll have to rely on the geometry shader to detect
475 * overflow. But to minimize the damage in case of a bug, set up the
476 * binding table entry to just allow a single output.
478 buffer_size_minus_1
= 0;
480 width
= buffer_size_minus_1
& 0x7f;
481 height
= (buffer_size_minus_1
& 0xfff80) >> 7;
482 depth
= (buffer_size_minus_1
& 0x7f00000) >> 20;
484 switch (num_vector_components
) {
486 surface_format
= BRW_SURFACEFORMAT_R32_FLOAT
;
489 surface_format
= BRW_SURFACEFORMAT_R32G32_FLOAT
;
492 surface_format
= BRW_SURFACEFORMAT_R32G32B32_FLOAT
;
495 surface_format
= BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
;
498 unreachable("Invalid vector size for transform feedback output");
501 surf
[0] = BRW_SURFACE_BUFFER
<< BRW_SURFACE_TYPE_SHIFT
|
502 BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< BRW_SURFACE_MIPLAYOUT_SHIFT
|
503 surface_format
<< BRW_SURFACE_FORMAT_SHIFT
|
504 BRW_SURFACE_RC_READ_WRITE
;
505 surf
[1] = bo
->offset64
+ offset_bytes
; /* reloc */
506 surf
[2] = (width
<< BRW_SURFACE_WIDTH_SHIFT
|
507 height
<< BRW_SURFACE_HEIGHT_SHIFT
);
508 surf
[3] = (depth
<< BRW_SURFACE_DEPTH_SHIFT
|
509 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
513 /* Emit relocation to surface contents. */
514 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
517 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
520 /* Creates a new WM constant buffer reflecting the current fragment program's
521 * constants, if needed by the fragment program.
523 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
527 brw_upload_wm_pull_constants(struct brw_context
*brw
)
529 struct brw_stage_state
*stage_state
= &brw
->wm
.base
;
530 /* BRW_NEW_FRAGMENT_PROGRAM */
531 struct brw_fragment_program
*fp
=
532 (struct brw_fragment_program
*) brw
->fragment_program
;
533 /* BRW_NEW_FS_PROG_DATA */
534 struct brw_stage_prog_data
*prog_data
= &brw
->wm
.prog_data
->base
;
536 /* _NEW_PROGRAM_CONSTANTS */
537 brw_upload_pull_constants(brw
, BRW_NEW_SURFACES
, &fp
->program
.Base
,
538 stage_state
, prog_data
);
541 const struct brw_tracked_state brw_wm_pull_constants
= {
543 .mesa
= _NEW_PROGRAM_CONSTANTS
,
544 .brw
= BRW_NEW_BATCH
|
546 BRW_NEW_FRAGMENT_PROGRAM
|
547 BRW_NEW_FS_PROG_DATA
,
549 .emit
= brw_upload_wm_pull_constants
,
553 * Creates a null renderbuffer surface.
555 * This is used when the shader doesn't write to any color output. An FB
556 * write to target 0 will still be emitted, because that's how the thread is
557 * terminated (and computed depth is returned), so we need to have the
558 * hardware discard the target 0 color output..
561 brw_emit_null_surface_state(struct brw_context
*brw
,
565 uint32_t *out_offset
)
567 /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
570 * A null surface will be used in instances where an actual surface is
571 * not bound. When a write message is generated to a null surface, no
572 * actual surface is written to. When a read message (including any
573 * sampling engine message) is generated to a null surface, the result
574 * is all zeros. Note that a null surface type is allowed to be used
575 * with all messages, even if it is not specificially indicated as
576 * supported. All of the remaining fields in surface state are ignored
577 * for null surfaces, with the following exceptions:
579 * - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
580 * depth buffer’s corresponding state for all render target surfaces,
583 * - Surface Format must be R8G8B8A8_UNORM.
585 unsigned surface_type
= BRW_SURFACE_NULL
;
586 drm_intel_bo
*bo
= NULL
;
587 unsigned pitch_minus_1
= 0;
588 uint32_t multisampling_state
= 0;
589 uint32_t *surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32,
593 /* On Gen6, null render targets seem to cause GPU hangs when
594 * multisampling. So work around this problem by rendering into dummy
597 * To decrease the amount of memory needed by the workaround buffer, we
598 * set its pitch to 128 bytes (the width of a Y tile). This means that
599 * the amount of memory needed for the workaround buffer is
600 * (width_in_tiles + height_in_tiles - 1) tiles.
602 * Note that since the workaround buffer will be interpreted by the
603 * hardware as an interleaved multisampled buffer, we need to compute
604 * width_in_tiles and height_in_tiles by dividing the width and height
605 * by 16 rather than the normal Y-tile size of 32.
607 unsigned width_in_tiles
= ALIGN(width
, 16) / 16;
608 unsigned height_in_tiles
= ALIGN(height
, 16) / 16;
609 unsigned size_needed
= (width_in_tiles
+ height_in_tiles
- 1) * 4096;
610 brw_get_scratch_bo(brw
, &brw
->wm
.multisampled_null_render_target_bo
,
612 bo
= brw
->wm
.multisampled_null_render_target_bo
;
613 surface_type
= BRW_SURFACE_2D
;
615 multisampling_state
= brw_get_surface_num_multisamples(samples
);
618 surf
[0] = (surface_type
<< BRW_SURFACE_TYPE_SHIFT
|
619 BRW_SURFACEFORMAT_B8G8R8A8_UNORM
<< BRW_SURFACE_FORMAT_SHIFT
);
621 surf
[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
|
622 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
|
623 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
|
624 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
);
626 surf
[1] = bo
? bo
->offset64
: 0;
627 surf
[2] = ((width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
628 (height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
630 /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
633 * If Surface Type is SURFTYPE_NULL, this field must be TRUE
635 surf
[3] = (BRW_SURFACE_TILED
| BRW_SURFACE_TILED_Y
|
636 pitch_minus_1
<< BRW_SURFACE_PITCH_SHIFT
);
637 surf
[4] = multisampling_state
;
641 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
644 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
);
649 * Sets up a surface state structure to point at the given region.
650 * While it is only used for the front/back buffer currently, it should be
651 * usable for further buffers when doing ARB_draw_buffer support.
654 brw_update_renderbuffer_surface(struct brw_context
*brw
,
655 struct gl_renderbuffer
*rb
,
656 bool layered
, unsigned unit
,
659 struct gl_context
*ctx
= &brw
->ctx
;
660 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
661 struct intel_mipmap_tree
*mt
= irb
->mt
;
663 uint32_t tile_x
, tile_y
;
667 mesa_format rb_format
= _mesa_get_render_format(ctx
, intel_rb_format(irb
));
668 /* BRW_NEW_FS_PROG_DATA */
672 if (rb
->TexImage
&& !brw
->has_surface_tile_offset
) {
673 intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
);
675 if (tile_x
!= 0 || tile_y
!= 0) {
676 /* Original gen4 hardware couldn't draw to a non-tile-aligned
677 * destination in a miptree unless you actually setup your renderbuffer
678 * as a miptree and used the fragile lod/array_index/etc. controls to
679 * select the image. So, instead, we just make a new single-level
680 * miptree and render into that.
682 intel_renderbuffer_move_to_temp(brw
, irb
, false);
687 intel_miptree_used_for_rendering(irb
->mt
);
689 surf
= brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
, 6 * 4, 32, &offset
);
691 format
= brw
->render_target_format
[rb_format
];
692 if (unlikely(!brw
->format_supported_as_render_target
[rb_format
])) {
693 _mesa_problem(ctx
, "%s: renderbuffer format %s unsupported\n",
694 __func__
, _mesa_get_format_name(rb_format
));
697 surf
[0] = (BRW_SURFACE_2D
<< BRW_SURFACE_TYPE_SHIFT
|
698 format
<< BRW_SURFACE_FORMAT_SHIFT
);
701 assert(mt
->offset
% mt
->cpp
== 0);
702 surf
[1] = (intel_renderbuffer_get_tile_offsets(irb
, &tile_x
, &tile_y
) +
703 mt
->bo
->offset64
+ mt
->offset
);
705 surf
[2] = ((rb
->Width
- 1) << BRW_SURFACE_WIDTH_SHIFT
|
706 (rb
->Height
- 1) << BRW_SURFACE_HEIGHT_SHIFT
);
708 surf
[3] = (brw_get_surface_tiling_bits(mt
->tiling
) |
709 (mt
->pitch
- 1) << BRW_SURFACE_PITCH_SHIFT
);
711 surf
[4] = brw_get_surface_num_multisamples(mt
->num_samples
);
713 assert(brw
->has_surface_tile_offset
|| (tile_x
== 0 && tile_y
== 0));
714 /* Note that the low bits of these fields are missing, so
715 * there's the possibility of getting in trouble.
717 assert(tile_x
% 4 == 0);
718 assert(tile_y
% 2 == 0);
719 surf
[5] = ((tile_x
/ 4) << BRW_SURFACE_X_OFFSET_SHIFT
|
720 (tile_y
/ 2) << BRW_SURFACE_Y_OFFSET_SHIFT
|
721 (mt
->valign
== 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE
: 0));
725 if (!ctx
->Color
.ColorLogicOpEnabled
&&
726 (ctx
->Color
.BlendEnabled
& (1 << unit
)))
727 surf
[0] |= BRW_SURFACE_BLEND_ENABLED
;
729 if (!ctx
->Color
.ColorMask
[unit
][0])
730 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT
;
731 if (!ctx
->Color
.ColorMask
[unit
][1])
732 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT
;
733 if (!ctx
->Color
.ColorMask
[unit
][2])
734 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT
;
736 /* As mentioned above, disable writes to the alpha component when the
737 * renderbuffer is XRGB.
739 if (ctx
->DrawBuffer
->Visual
.alphaBits
== 0 ||
740 !ctx
->Color
.ColorMask
[unit
][3]) {
741 surf
[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT
;
745 drm_intel_bo_emit_reloc(brw
->batch
.bo
,
748 surf
[1] - mt
->bo
->offset64
,
749 I915_GEM_DOMAIN_RENDER
,
750 I915_GEM_DOMAIN_RENDER
);
756 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
759 brw_update_renderbuffer_surfaces(struct brw_context
*brw
,
760 const struct gl_framebuffer
*fb
,
761 uint32_t render_target_start
,
762 uint32_t *surf_offset
)
765 const unsigned int w
= _mesa_geometric_width(fb
);
766 const unsigned int h
= _mesa_geometric_height(fb
);
767 const unsigned int s
= _mesa_geometric_samples(fb
);
769 /* Update surfaces for drawing buffers */
770 if (fb
->_NumColorDrawBuffers
>= 1) {
771 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
772 const uint32_t surf_index
= render_target_start
+ i
;
774 if (intel_renderbuffer(fb
->_ColorDrawBuffers
[i
])) {
775 surf_offset
[surf_index
] =
776 brw
->vtbl
.update_renderbuffer_surface(
777 brw
, fb
->_ColorDrawBuffers
[i
],
778 _mesa_geometric_layers(fb
) > 0, i
, surf_index
);
780 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
781 &surf_offset
[surf_index
]);
785 const uint32_t surf_index
= render_target_start
;
786 brw
->vtbl
.emit_null_surface_state(brw
, w
, h
, s
,
787 &surf_offset
[surf_index
]);
792 update_renderbuffer_surfaces(struct brw_context
*brw
)
794 const struct gl_context
*ctx
= &brw
->ctx
;
796 /* _NEW_BUFFERS | _NEW_COLOR */
797 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
798 brw_update_renderbuffer_surfaces(
800 brw
->wm
.prog_data
->binding_table
.render_target_start
,
801 brw
->wm
.base
.surf_offset
);
802 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
805 const struct brw_tracked_state brw_renderbuffer_surfaces
= {
807 .mesa
= _NEW_BUFFERS
|
809 .brw
= BRW_NEW_BATCH
|
811 BRW_NEW_FS_PROG_DATA
,
813 .emit
= update_renderbuffer_surfaces
,
816 const struct brw_tracked_state gen6_renderbuffer_surfaces
= {
818 .mesa
= _NEW_BUFFERS
,
819 .brw
= BRW_NEW_BATCH
|
822 .emit
= update_renderbuffer_surfaces
,
827 update_stage_texture_surfaces(struct brw_context
*brw
,
828 const struct gl_program
*prog
,
829 struct brw_stage_state
*stage_state
,
835 struct gl_context
*ctx
= &brw
->ctx
;
837 uint32_t *surf_offset
= stage_state
->surf_offset
;
839 /* BRW_NEW_*_PROG_DATA */
841 surf_offset
+= stage_state
->prog_data
->binding_table
.gather_texture_start
;
843 surf_offset
+= stage_state
->prog_data
->binding_table
.texture_start
;
845 unsigned num_samplers
= _mesa_fls(prog
->SamplersUsed
);
846 for (unsigned s
= 0; s
< num_samplers
; s
++) {
849 if (prog
->SamplersUsed
& (1 << s
)) {
850 const unsigned unit
= prog
->SamplerUnits
[s
];
853 if (ctx
->Texture
.Unit
[unit
]._Current
) {
854 brw
->vtbl
.update_texture_surface(ctx
, unit
, surf_offset
+ s
, for_gather
);
862 * Construct SURFACE_STATE objects for enabled textures.
865 brw_update_texture_surfaces(struct brw_context
*brw
)
867 /* BRW_NEW_VERTEX_PROGRAM */
868 struct gl_program
*vs
= (struct gl_program
*) brw
->vertex_program
;
870 /* BRW_NEW_TESS_PROGRAMS */
871 struct gl_program
*tcs
= (struct gl_program
*) brw
->tess_ctrl_program
;
872 struct gl_program
*tes
= (struct gl_program
*) brw
->tess_eval_program
;
874 /* BRW_NEW_GEOMETRY_PROGRAM */
875 struct gl_program
*gs
= (struct gl_program
*) brw
->geometry_program
;
877 /* BRW_NEW_FRAGMENT_PROGRAM */
878 struct gl_program
*fs
= (struct gl_program
*) brw
->fragment_program
;
881 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, false);
882 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, false);
883 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, false);
884 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, false);
885 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, false);
887 /* emit alternate set of surface state for gather. this
888 * allows the surface format to be overriden for only the
889 * gather4 messages. */
891 if (vs
&& vs
->UsesGather
)
892 update_stage_texture_surfaces(brw
, vs
, &brw
->vs
.base
, true);
893 if (tcs
&& tcs
->UsesGather
)
894 update_stage_texture_surfaces(brw
, tcs
, &brw
->tcs
.base
, true);
895 if (tes
&& tes
->UsesGather
)
896 update_stage_texture_surfaces(brw
, tes
, &brw
->tes
.base
, true);
897 if (gs
&& gs
->UsesGather
)
898 update_stage_texture_surfaces(brw
, gs
, &brw
->gs
.base
, true);
899 if (fs
&& fs
->UsesGather
)
900 update_stage_texture_surfaces(brw
, fs
, &brw
->wm
.base
, true);
903 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
906 const struct brw_tracked_state brw_texture_surfaces
= {
908 .mesa
= _NEW_TEXTURE
,
909 .brw
= BRW_NEW_BATCH
|
911 BRW_NEW_FRAGMENT_PROGRAM
|
912 BRW_NEW_FS_PROG_DATA
|
913 BRW_NEW_GEOMETRY_PROGRAM
|
914 BRW_NEW_GS_PROG_DATA
|
915 BRW_NEW_TESS_PROGRAMS
|
916 BRW_NEW_TCS_PROG_DATA
|
917 BRW_NEW_TES_PROG_DATA
|
918 BRW_NEW_TEXTURE_BUFFER
|
919 BRW_NEW_VERTEX_PROGRAM
|
920 BRW_NEW_VS_PROG_DATA
,
922 .emit
= brw_update_texture_surfaces
,
926 brw_update_cs_texture_surfaces(struct brw_context
*brw
)
928 /* BRW_NEW_COMPUTE_PROGRAM */
929 struct gl_program
*cs
= (struct gl_program
*) brw
->compute_program
;
932 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, false);
934 /* emit alternate set of surface state for gather. this
935 * allows the surface format to be overriden for only the
939 if (cs
&& cs
->UsesGather
)
940 update_stage_texture_surfaces(brw
, cs
, &brw
->cs
.base
, true);
943 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
946 const struct brw_tracked_state brw_cs_texture_surfaces
= {
948 .mesa
= _NEW_TEXTURE
,
949 .brw
= BRW_NEW_BATCH
|
951 BRW_NEW_COMPUTE_PROGRAM
,
953 .emit
= brw_update_cs_texture_surfaces
,
958 brw_upload_ubo_surfaces(struct brw_context
*brw
,
959 struct gl_shader
*shader
,
960 struct brw_stage_state
*stage_state
,
961 struct brw_stage_prog_data
*prog_data
)
963 struct gl_context
*ctx
= &brw
->ctx
;
968 uint32_t *ubo_surf_offsets
=
969 &stage_state
->surf_offset
[prog_data
->binding_table
.ubo_start
];
971 for (int i
= 0; i
< shader
->NumUniformBlocks
; i
++) {
972 struct gl_uniform_buffer_binding
*binding
=
973 &ctx
->UniformBufferBindings
[shader
->UniformBlocks
[i
]->Binding
];
975 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
976 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ubo_surf_offsets
[i
]);
978 struct intel_buffer_object
*intel_bo
=
979 intel_buffer_object(binding
->BufferObject
);
980 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
981 if (!binding
->AutomaticSize
)
982 size
= MIN2(size
, binding
->Size
);
984 intel_bufferobj_buffer(brw
, intel_bo
,
987 brw_create_constant_surface(brw
, bo
, binding
->Offset
,
989 &ubo_surf_offsets
[i
]);
993 uint32_t *ssbo_surf_offsets
=
994 &stage_state
->surf_offset
[prog_data
->binding_table
.ssbo_start
];
996 for (int i
= 0; i
< shader
->NumShaderStorageBlocks
; i
++) {
997 struct gl_shader_storage_buffer_binding
*binding
=
998 &ctx
->ShaderStorageBufferBindings
[shader
->ShaderStorageBlocks
[i
]->Binding
];
1000 if (binding
->BufferObject
== ctx
->Shared
->NullBufferObj
) {
1001 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, &ssbo_surf_offsets
[i
]);
1003 struct intel_buffer_object
*intel_bo
=
1004 intel_buffer_object(binding
->BufferObject
);
1005 GLsizeiptr size
= binding
->BufferObject
->Size
- binding
->Offset
;
1006 if (!binding
->AutomaticSize
)
1007 size
= MIN2(size
, binding
->Size
);
1009 intel_bufferobj_buffer(brw
, intel_bo
,
1012 brw_create_buffer_surface(brw
, bo
, binding
->Offset
,
1014 &ssbo_surf_offsets
[i
]);
1018 if (shader
->NumUniformBlocks
|| shader
->NumShaderStorageBlocks
)
1019 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1023 brw_upload_wm_ubo_surfaces(struct brw_context
*brw
)
1025 struct gl_context
*ctx
= &brw
->ctx
;
1027 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1032 /* BRW_NEW_FS_PROG_DATA */
1033 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1034 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1037 const struct brw_tracked_state brw_wm_ubo_surfaces
= {
1039 .mesa
= _NEW_PROGRAM
,
1040 .brw
= BRW_NEW_BATCH
|
1042 BRW_NEW_FS_PROG_DATA
|
1043 BRW_NEW_UNIFORM_BUFFER
,
1045 .emit
= brw_upload_wm_ubo_surfaces
,
1049 brw_upload_cs_ubo_surfaces(struct brw_context
*brw
)
1051 struct gl_context
*ctx
= &brw
->ctx
;
1053 struct gl_shader_program
*prog
=
1054 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1059 /* BRW_NEW_CS_PROG_DATA */
1060 brw_upload_ubo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1061 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1064 const struct brw_tracked_state brw_cs_ubo_surfaces
= {
1066 .mesa
= _NEW_PROGRAM
,
1067 .brw
= BRW_NEW_BATCH
|
1069 BRW_NEW_CS_PROG_DATA
|
1070 BRW_NEW_UNIFORM_BUFFER
,
1072 .emit
= brw_upload_cs_ubo_surfaces
,
1076 brw_upload_abo_surfaces(struct brw_context
*brw
,
1077 struct gl_shader
*shader
,
1078 struct brw_stage_state
*stage_state
,
1079 struct brw_stage_prog_data
*prog_data
)
1081 struct gl_context
*ctx
= &brw
->ctx
;
1082 uint32_t *surf_offsets
=
1083 &stage_state
->surf_offset
[prog_data
->binding_table
.abo_start
];
1085 if (shader
&& shader
->NumAtomicBuffers
) {
1086 for (unsigned i
= 0; i
< shader
->NumAtomicBuffers
; i
++) {
1087 struct gl_atomic_buffer_binding
*binding
=
1088 &ctx
->AtomicBufferBindings
[shader
->AtomicBuffers
[i
]->Binding
];
1089 struct intel_buffer_object
*intel_bo
=
1090 intel_buffer_object(binding
->BufferObject
);
1091 drm_intel_bo
*bo
= intel_bufferobj_buffer(
1092 brw
, intel_bo
, binding
->Offset
, intel_bo
->Base
.Size
- binding
->Offset
);
1094 brw
->vtbl
.emit_buffer_surface_state(brw
, &surf_offsets
[i
], bo
,
1095 binding
->Offset
, BRW_SURFACEFORMAT_RAW
,
1096 bo
->size
- binding
->Offset
, 1, true);
1099 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1104 brw_upload_wm_abo_surfaces(struct brw_context
*brw
)
1106 struct gl_context
*ctx
= &brw
->ctx
;
1108 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1111 /* BRW_NEW_FS_PROG_DATA */
1112 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1113 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1117 const struct brw_tracked_state brw_wm_abo_surfaces
= {
1119 .mesa
= _NEW_PROGRAM
,
1120 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1123 BRW_NEW_FS_PROG_DATA
,
1125 .emit
= brw_upload_wm_abo_surfaces
,
1129 brw_upload_cs_abo_surfaces(struct brw_context
*brw
)
1131 struct gl_context
*ctx
= &brw
->ctx
;
1133 struct gl_shader_program
*prog
=
1134 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1137 /* BRW_NEW_CS_PROG_DATA */
1138 brw_upload_abo_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1139 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1143 const struct brw_tracked_state brw_cs_abo_surfaces
= {
1145 .mesa
= _NEW_PROGRAM
,
1146 .brw
= BRW_NEW_ATOMIC_BUFFER
|
1149 BRW_NEW_CS_PROG_DATA
,
1151 .emit
= brw_upload_cs_abo_surfaces
,
1155 brw_upload_cs_image_surfaces(struct brw_context
*brw
)
1157 struct gl_context
*ctx
= &brw
->ctx
;
1159 struct gl_shader_program
*prog
=
1160 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1163 /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1164 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
],
1165 &brw
->cs
.base
, &brw
->cs
.prog_data
->base
);
1169 const struct brw_tracked_state brw_cs_image_surfaces
= {
1171 .mesa
= _NEW_TEXTURE
| _NEW_PROGRAM
,
1172 .brw
= BRW_NEW_BATCH
|
1174 BRW_NEW_CS_PROG_DATA
|
1177 .emit
= brw_upload_cs_image_surfaces
,
1181 get_image_format(struct brw_context
*brw
, mesa_format format
, GLenum access
)
1183 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
1184 uint32_t hw_format
= brw_format_for_mesa_format(format
);
1185 if (access
== GL_WRITE_ONLY
) {
1187 } else if (isl_has_matching_typed_storage_image_format(devinfo
, hw_format
)) {
1188 /* Typed surface reads support a very limited subset of the shader
1189 * image formats. Translate it into the closest format the
1190 * hardware supports.
1192 return isl_lower_storage_image_format(devinfo
, hw_format
);
1194 /* The hardware doesn't actually support a typed format that we can use
1195 * so we have to fall back to untyped read/write messages.
1197 return BRW_SURFACEFORMAT_RAW
;
1202 update_default_image_param(struct brw_context
*brw
,
1203 struct gl_image_unit
*u
,
1204 unsigned surface_idx
,
1205 struct brw_image_param
*param
)
1207 memset(param
, 0, sizeof(*param
));
1208 param
->surface_idx
= surface_idx
;
1209 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1210 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1211 * detailed explanation of these parameters.
1213 param
->swizzling
[0] = 0xff;
1214 param
->swizzling
[1] = 0xff;
1218 update_buffer_image_param(struct brw_context
*brw
,
1219 struct gl_image_unit
*u
,
1220 unsigned surface_idx
,
1221 struct brw_image_param
*param
)
1223 struct gl_buffer_object
*obj
= u
->TexObj
->BufferObject
;
1225 update_default_image_param(brw
, u
, surface_idx
, param
);
1227 param
->size
[0] = obj
->Size
/ _mesa_get_format_bytes(u
->_ActualFormat
);
1228 param
->stride
[0] = _mesa_get_format_bytes(u
->_ActualFormat
);
1232 update_texture_image_param(struct brw_context
*brw
,
1233 struct gl_image_unit
*u
,
1234 unsigned surface_idx
,
1235 struct brw_image_param
*param
)
1237 struct intel_mipmap_tree
*mt
= intel_texture_object(u
->TexObj
)->mt
;
1239 update_default_image_param(brw
, u
, surface_idx
, param
);
1241 param
->size
[0] = minify(mt
->logical_width0
, u
->Level
);
1242 param
->size
[1] = minify(mt
->logical_height0
, u
->Level
);
1243 param
->size
[2] = (!u
->Layered
? 1 :
1244 u
->TexObj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1245 u
->TexObj
->Target
== GL_TEXTURE_3D
?
1246 minify(mt
->logical_depth0
, u
->Level
) :
1247 mt
->logical_depth0
);
1249 intel_miptree_get_image_offset(mt
, u
->Level
, u
->_Layer
,
1253 param
->stride
[0] = mt
->cpp
;
1254 param
->stride
[1] = mt
->pitch
/ mt
->cpp
;
1256 brw_miptree_get_horizontal_slice_pitch(brw
, mt
, u
->Level
);
1258 brw_miptree_get_vertical_slice_pitch(brw
, mt
, u
->Level
);
1260 if (mt
->tiling
== I915_TILING_X
) {
1261 /* An X tile is a rectangular block of 512x8 bytes. */
1262 param
->tiling
[0] = _mesa_logbase2(512 / mt
->cpp
);
1263 param
->tiling
[1] = _mesa_logbase2(8);
1265 if (brw
->has_swizzling
) {
1266 /* Right shifts required to swizzle bits 9 and 10 of the memory
1267 * address with bit 6.
1269 param
->swizzling
[0] = 3;
1270 param
->swizzling
[1] = 4;
1272 } else if (mt
->tiling
== I915_TILING_Y
) {
1273 /* The layout of a Y-tiled surface in memory isn't really fundamentally
1274 * different to the layout of an X-tiled surface, we simply pretend that
1275 * the surface is broken up in a number of smaller 16Bx32 tiles, each
1276 * one arranged in X-major order just like is the case for X-tiling.
1278 param
->tiling
[0] = _mesa_logbase2(16 / mt
->cpp
);
1279 param
->tiling
[1] = _mesa_logbase2(32);
1281 if (brw
->has_swizzling
) {
1282 /* Right shift required to swizzle bit 9 of the memory address with
1285 param
->swizzling
[0] = 3;
1289 /* 3D textures are arranged in 2D in memory with 2^lod slices per row. The
1290 * address calculation algorithm (emit_address_calculation() in
1291 * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1292 * modulus equal to the LOD.
1294 param
->tiling
[2] = (u
->TexObj
->Target
== GL_TEXTURE_3D
? u
->Level
:
1299 update_image_surface(struct brw_context
*brw
,
1300 struct gl_image_unit
*u
,
1302 unsigned surface_idx
,
1303 uint32_t *surf_offset
,
1304 struct brw_image_param
*param
)
1306 if (_mesa_is_image_unit_valid(&brw
->ctx
, u
)) {
1307 struct gl_texture_object
*obj
= u
->TexObj
;
1308 const unsigned format
= get_image_format(brw
, u
->_ActualFormat
, access
);
1310 if (obj
->Target
== GL_TEXTURE_BUFFER
) {
1311 struct intel_buffer_object
*intel_obj
=
1312 intel_buffer_object(obj
->BufferObject
);
1313 const unsigned texel_size
= (format
== BRW_SURFACEFORMAT_RAW
? 1 :
1314 _mesa_get_format_bytes(u
->_ActualFormat
));
1316 brw
->vtbl
.emit_buffer_surface_state(
1317 brw
, surf_offset
, intel_obj
->buffer
, obj
->BufferOffset
,
1318 format
, intel_obj
->Base
.Size
/ texel_size
, texel_size
,
1319 access
!= GL_READ_ONLY
);
1321 update_buffer_image_param(brw
, u
, surface_idx
, param
);
1324 struct intel_texture_object
*intel_obj
= intel_texture_object(obj
);
1325 struct intel_mipmap_tree
*mt
= intel_obj
->mt
;
1327 if (format
== BRW_SURFACEFORMAT_RAW
) {
1328 brw
->vtbl
.emit_buffer_surface_state(
1329 brw
, surf_offset
, mt
->bo
, mt
->offset
,
1330 format
, mt
->bo
->size
- mt
->offset
, 1 /* pitch */,
1331 access
!= GL_READ_ONLY
);
1334 const unsigned min_layer
= obj
->MinLayer
+ u
->_Layer
;
1335 const unsigned min_level
= obj
->MinLevel
+ u
->Level
;
1336 const unsigned num_layers
= (!u
->Layered
? 1 :
1337 obj
->Target
== GL_TEXTURE_CUBE_MAP
? 6 :
1338 mt
->logical_depth0
);
1339 const GLenum target
= (obj
->Target
== GL_TEXTURE_CUBE_MAP
||
1340 obj
->Target
== GL_TEXTURE_CUBE_MAP_ARRAY
?
1341 GL_TEXTURE_2D_ARRAY
: obj
->Target
);
1342 const int surf_index
= surf_offset
- &brw
->wm
.base
.surf_offset
[0];
1344 brw
->vtbl
.emit_texture_surface_state(
1346 min_layer
, min_layer
+ num_layers
,
1347 min_level
, min_level
+ 1,
1348 format
, SWIZZLE_XYZW
,
1349 surf_offset
, surf_index
, access
!= GL_READ_ONLY
, false);
1352 update_texture_image_param(brw
, u
, surface_idx
, param
);
1356 brw
->vtbl
.emit_null_surface_state(brw
, 1, 1, 1, surf_offset
);
1357 update_default_image_param(brw
, u
, surface_idx
, param
);
1362 brw_upload_image_surfaces(struct brw_context
*brw
,
1363 struct gl_shader
*shader
,
1364 struct brw_stage_state
*stage_state
,
1365 struct brw_stage_prog_data
*prog_data
)
1367 struct gl_context
*ctx
= &brw
->ctx
;
1369 if (shader
&& shader
->NumImages
) {
1370 for (unsigned i
= 0; i
< shader
->NumImages
; i
++) {
1371 struct gl_image_unit
*u
= &ctx
->ImageUnits
[shader
->ImageUnits
[i
]];
1372 const unsigned surf_idx
= prog_data
->binding_table
.image_start
+ i
;
1374 update_image_surface(brw
, u
, shader
->ImageAccess
[i
],
1376 &stage_state
->surf_offset
[surf_idx
],
1377 &prog_data
->image_param
[i
]);
1380 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1381 /* This may have changed the image metadata dependent on the context
1382 * image unit state and passed to the program as uniforms, make sure
1383 * that push and pull constants are reuploaded.
1385 brw
->NewGLState
|= _NEW_PROGRAM_CONSTANTS
;
1390 brw_upload_wm_image_surfaces(struct brw_context
*brw
)
1392 struct gl_context
*ctx
= &brw
->ctx
;
1393 /* BRW_NEW_FRAGMENT_PROGRAM */
1394 struct gl_shader_program
*prog
= ctx
->_Shader
->_CurrentFragmentProgram
;
1397 /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1398 brw_upload_image_surfaces(brw
, prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
],
1399 &brw
->wm
.base
, &brw
->wm
.prog_data
->base
);
1403 const struct brw_tracked_state brw_wm_image_surfaces
= {
1405 .mesa
= _NEW_TEXTURE
,
1406 .brw
= BRW_NEW_BATCH
|
1408 BRW_NEW_FRAGMENT_PROGRAM
|
1409 BRW_NEW_FS_PROG_DATA
|
1412 .emit
= brw_upload_wm_image_surfaces
,
1416 gen4_init_vtable_surface_functions(struct brw_context
*brw
)
1418 brw
->vtbl
.update_texture_surface
= brw_update_texture_surface
;
1419 brw
->vtbl
.update_renderbuffer_surface
= brw_update_renderbuffer_surface
;
1420 brw
->vtbl
.emit_null_surface_state
= brw_emit_null_surface_state
;
1421 brw
->vtbl
.emit_buffer_surface_state
= gen4_emit_buffer_surface_state
;
1425 brw_upload_cs_work_groups_surface(struct brw_context
*brw
)
1427 struct gl_context
*ctx
= &brw
->ctx
;
1429 struct gl_shader_program
*prog
=
1430 ctx
->_Shader
->CurrentProgram
[MESA_SHADER_COMPUTE
];
1432 if (prog
&& brw
->cs
.prog_data
->uses_num_work_groups
) {
1433 const unsigned surf_idx
=
1434 brw
->cs
.prog_data
->binding_table
.work_groups_start
;
1435 uint32_t *surf_offset
= &brw
->cs
.base
.surf_offset
[surf_idx
];
1439 if (brw
->compute
.num_work_groups_bo
== NULL
) {
1441 intel_upload_data(brw
,
1442 (void *)brw
->compute
.num_work_groups
,
1448 bo
= brw
->compute
.num_work_groups_bo
;
1449 bo_offset
= brw
->compute
.num_work_groups_offset
;
1452 brw
->vtbl
.emit_buffer_surface_state(brw
, surf_offset
,
1454 BRW_SURFACEFORMAT_RAW
,
1455 3 * sizeof(GLuint
), 1, true);
1456 brw
->ctx
.NewDriverState
|= BRW_NEW_SURFACES
;
1460 const struct brw_tracked_state brw_cs_work_groups_surface
= {
1462 .brw
= BRW_NEW_BLORP
|
1463 BRW_NEW_CS_WORK_GROUPS
1465 .emit
= brw_upload_cs_work_groups_surface
,