1 #include "swrast/swrast.h"
2 #include "main/renderbuffer.h"
3 #include "main/texobj.h"
4 #include "main/teximage.h"
5 #include "main/mipmap.h"
6 #include "drivers/common/meta.h"
7 #include "brw_context.h"
8 #include "intel_buffer_objects.h"
9 #include "intel_mipmap_tree.h"
10 #include "intel_tex.h"
11 #include "intel_fbo.h"
12 #include "intel_reg.h"
14 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
16 static struct gl_texture_image
*
17 intelNewTextureImage(struct gl_context
* ctx
)
19 DBG("%s\n", __func__
);
21 return (struct gl_texture_image
*) CALLOC_STRUCT(intel_texture_image
);
25 intelDeleteTextureImage(struct gl_context
* ctx
, struct gl_texture_image
*img
)
27 /* nothing special (yet) for intel_texture_image */
28 _mesa_delete_texture_image(ctx
, img
);
32 static struct gl_texture_object
*
33 intelNewTextureObject(struct gl_context
* ctx
, GLuint name
, GLenum target
)
35 struct intel_texture_object
*obj
= CALLOC_STRUCT(intel_texture_object
);
39 DBG("%s\n", __func__
);
44 _mesa_initialize_texture_object(ctx
, &obj
->base
, name
, target
);
46 obj
->needs_validate
= true;
52 intelDeleteTextureObject(struct gl_context
*ctx
,
53 struct gl_texture_object
*texObj
)
55 struct intel_texture_object
*intelObj
= intel_texture_object(texObj
);
57 intel_miptree_release(&intelObj
->mt
);
58 _mesa_delete_texture_object(ctx
, texObj
);
62 intel_alloc_texture_image_buffer(struct gl_context
*ctx
,
63 struct gl_texture_image
*image
)
65 struct brw_context
*brw
= brw_context(ctx
);
66 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
67 struct gl_texture_object
*texobj
= image
->TexObject
;
68 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
70 assert(image
->Border
== 0);
72 /* Quantize sample count */
73 if (image
->NumSamples
) {
74 image
->NumSamples
= intel_quantize_num_samples(brw
->intelScreen
, image
->NumSamples
);
75 if (!image
->NumSamples
)
79 /* Because the driver uses AllocTextureImageBuffer() internally, it may end
80 * up mismatched with FreeTextureImageBuffer(), but that is safe to call
83 ctx
->Driver
.FreeTextureImageBuffer(ctx
, image
);
85 if (!_swrast_init_texture_image(image
))
88 if (intel_texobj
->mt
&&
89 intel_miptree_match_image(intel_texobj
->mt
, image
)) {
90 intel_miptree_reference(&intel_image
->mt
, intel_texobj
->mt
);
91 DBG("%s: alloc obj %p level %d %dx%dx%d using object's miptree %p\n",
92 __func__
, texobj
, image
->Level
,
93 image
->Width
, image
->Height
, image
->Depth
, intel_texobj
->mt
);
95 intel_image
->mt
= intel_miptree_create_for_teximage(brw
, intel_texobj
,
99 /* Even if the object currently has a mipmap tree associated
100 * with it, this one is a more likely candidate to represent the
101 * whole object since our level didn't fit what was there
102 * before, and any lower levels would fit into our miptree.
104 intel_miptree_reference(&intel_texobj
->mt
, intel_image
->mt
);
106 DBG("%s: alloc obj %p level %d %dx%dx%d using new miptree %p\n",
107 __func__
, texobj
, image
->Level
,
108 image
->Width
, image
->Height
, image
->Depth
, intel_image
->mt
);
111 intel_texobj
->needs_validate
= true;
117 * ctx->Driver.AllocTextureStorage() handler.
119 * Compare this to _mesa_AllocTextureStorage_sw, which would call into
120 * intel_alloc_texture_image_buffer() above.
123 intel_alloc_texture_storage(struct gl_context
*ctx
,
124 struct gl_texture_object
*texobj
,
125 GLsizei levels
, GLsizei width
,
126 GLsizei height
, GLsizei depth
)
128 struct brw_context
*brw
= brw_context(ctx
);
129 struct intel_texture_object
*intel_texobj
= intel_texture_object(texobj
);
130 struct gl_texture_image
*first_image
= texobj
->Image
[0][0];
131 int num_samples
= intel_quantize_num_samples(brw
->intelScreen
,
132 first_image
->NumSamples
);
133 const int numFaces
= _mesa_num_tex_faces(texobj
->Target
);
137 /* If the object's current miptree doesn't match what we need, make a new
140 if (!intel_texobj
->mt
||
141 !intel_miptree_match_image(intel_texobj
->mt
, first_image
) ||
142 intel_texobj
->mt
->last_level
!= levels
- 1) {
143 intel_miptree_release(&intel_texobj
->mt
);
144 intel_texobj
->mt
= intel_miptree_create(brw
, texobj
->Target
,
145 first_image
->TexFormat
,
147 width
, height
, depth
,
149 MIPTREE_LAYOUT_TILING_ANY
);
151 if (intel_texobj
->mt
== NULL
) {
156 for (face
= 0; face
< numFaces
; face
++) {
157 for (level
= 0; level
< levels
; level
++) {
158 struct gl_texture_image
*image
= texobj
->Image
[face
][level
];
159 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
161 image
->NumSamples
= num_samples
;
163 _swrast_free_texture_image_buffer(ctx
, image
);
164 if (!_swrast_init_texture_image(image
))
167 intel_miptree_reference(&intel_image
->mt
, intel_texobj
->mt
);
171 /* The miptree is in a validated state, so no need to check later. */
172 intel_texobj
->needs_validate
= false;
173 intel_texobj
->validated_first_level
= 0;
174 intel_texobj
->validated_last_level
= levels
- 1;
175 intel_texobj
->_Format
= intel_texobj
->mt
->format
;
182 intel_free_texture_image_buffer(struct gl_context
* ctx
,
183 struct gl_texture_image
*texImage
)
185 struct intel_texture_image
*intelImage
= intel_texture_image(texImage
);
187 DBG("%s\n", __func__
);
189 intel_miptree_release(&intelImage
->mt
);
191 _swrast_free_texture_image_buffer(ctx
, texImage
);
195 * Map texture memory/buffer into user space.
196 * Note: the region of interest parameters are ignored here.
197 * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT
198 * \param mapOut returns start of mapping of region of interest
199 * \param rowStrideOut returns row stride in bytes
202 intel_map_texture_image(struct gl_context
*ctx
,
203 struct gl_texture_image
*tex_image
,
205 GLuint x
, GLuint y
, GLuint w
, GLuint h
,
210 struct brw_context
*brw
= brw_context(ctx
);
211 struct intel_texture_image
*intel_image
= intel_texture_image(tex_image
);
212 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
215 /* Our texture data is always stored in a miptree. */
218 /* Check that our caller wasn't confused about how to map a 1D texture. */
219 assert(tex_image
->TexObject
->Target
!= GL_TEXTURE_1D_ARRAY
||
222 /* intel_miptree_map operates on a unified "slice" number that references the
223 * cube face, since it's all just slices to the miptree code.
225 if (tex_image
->TexObject
->Target
== GL_TEXTURE_CUBE_MAP
)
226 slice
= tex_image
->Face
;
228 intel_miptree_map(brw
, mt
,
229 tex_image
->Level
+ tex_image
->TexObject
->MinLevel
,
230 slice
+ tex_image
->TexObject
->MinLayer
,
232 (void **)map
, &stride
);
234 *out_stride
= stride
;
238 intel_unmap_texture_image(struct gl_context
*ctx
,
239 struct gl_texture_image
*tex_image
, GLuint slice
)
241 struct brw_context
*brw
= brw_context(ctx
);
242 struct intel_texture_image
*intel_image
= intel_texture_image(tex_image
);
243 struct intel_mipmap_tree
*mt
= intel_image
->mt
;
245 if (tex_image
->TexObject
->Target
== GL_TEXTURE_CUBE_MAP
)
246 slice
= tex_image
->Face
;
248 intel_miptree_unmap(brw
, mt
,
249 tex_image
->Level
+ tex_image
->TexObject
->MinLevel
,
250 slice
+ tex_image
->TexObject
->MinLayer
);
254 intel_texture_view(struct gl_context
*ctx
,
255 struct gl_texture_object
*texObj
,
256 struct gl_texture_object
*origTexObj
)
258 struct brw_context
*brw
= brw_context(ctx
);
259 struct intel_texture_object
*intel_tex
= intel_texture_object(texObj
);
260 struct intel_texture_object
*intel_orig_tex
= intel_texture_object(origTexObj
);
262 assert(intel_orig_tex
->mt
);
263 intel_miptree_reference(&intel_tex
->mt
, intel_orig_tex
->mt
);
265 /* Since we can only make views of immutable-format textures,
266 * we can assume that everything is in origTexObj's miptree.
268 * Mesa core has already made us a copy of all the teximage objects,
269 * except it hasn't copied our mt pointers, etc.
271 const int numFaces
= _mesa_num_tex_faces(texObj
->Target
);
272 const int numLevels
= texObj
->NumLevels
;
277 for (face
= 0; face
< numFaces
; face
++) {
278 for (level
= 0; level
< numLevels
; level
++) {
279 struct gl_texture_image
*image
= texObj
->Image
[face
][level
];
280 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
282 intel_miptree_reference(&intel_image
->mt
, intel_orig_tex
->mt
);
286 /* The miptree is in a validated state, so no need to check later. */
287 intel_tex
->needs_validate
= false;
288 intel_tex
->validated_first_level
= 0;
289 intel_tex
->validated_last_level
= numLevels
- 1;
291 /* Set the validated texture format, with the same adjustments that
292 * would have been applied to determine the underlying texture's
295 intel_tex
->_Format
= intel_depth_format_for_depthstencil_format(
296 intel_lower_compressed_format(brw
, texObj
->Image
[0][0]->TexFormat
));
302 intel_set_texture_storage_for_buffer_object(struct gl_context
*ctx
,
303 struct gl_texture_object
*tex_obj
,
304 struct gl_buffer_object
*buffer_obj
,
305 uint32_t buffer_offset
,
309 struct brw_context
*brw
= brw_context(ctx
);
310 struct intel_texture_object
*intel_texobj
= intel_texture_object(tex_obj
);
311 struct gl_texture_image
*image
= tex_obj
->Image
[0][0];
312 struct intel_texture_image
*intel_image
= intel_texture_image(image
);
313 struct intel_buffer_object
*intel_buffer_obj
= intel_buffer_object(buffer_obj
);
316 /* Renderbuffers have the restriction that the buffer offset and
317 * surface pitch must be a multiple of the element size. If it's
318 * not, we have to fail and fall back to software.
320 int cpp
= _mesa_get_format_bytes(image
->TexFormat
);
321 if (buffer_offset
% cpp
|| row_stride
% cpp
) {
322 perf_debug("Bad PBO alignment; fallback to CPU mapping\n");
326 if (!brw
->format_supported_as_render_target
[image
->TexFormat
]) {
327 perf_debug("Non-renderable PBO format; fallback to CPU mapping\n");
332 assert(intel_texobj
->mt
== NULL
);
334 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
, intel_buffer_obj
,
336 row_stride
* image
->Height
);
338 intel_miptree_create_for_bo(brw
, bo
,
341 image
->Width
, image
->Height
, image
->Depth
,
344 if (!intel_texobj
->mt
)
347 if (!_swrast_init_texture_image(image
))
350 intel_miptree_reference(&intel_image
->mt
, intel_texobj
->mt
);
352 /* The miptree is in a validated state, so no need to check later. */
353 intel_texobj
->needs_validate
= false;
354 intel_texobj
->validated_first_level
= 0;
355 intel_texobj
->validated_last_level
= 0;
356 intel_texobj
->_Format
= intel_texobj
->mt
->format
;
362 intel_texture_barrier(struct gl_context
*ctx
)
364 struct brw_context
*brw
= brw_context(ctx
);
368 /* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
369 * Flush Enable = 1, a PIPE_CONTROL with any non-zero
370 * post-sync-op is required.
372 brw_emit_post_sync_nonzero_flush(brw
);
375 brw_emit_pipe_control_flush(brw
,
376 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
377 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
378 PIPE_CONTROL_CS_STALL
);
380 brw_emit_pipe_control_flush(brw
,
381 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
383 brw_emit_mi_flush(brw
);
388 intelInitTextureFuncs(struct dd_function_table
*functions
)
390 functions
->NewTextureObject
= intelNewTextureObject
;
391 functions
->NewTextureImage
= intelNewTextureImage
;
392 functions
->DeleteTextureImage
= intelDeleteTextureImage
;
393 functions
->DeleteTexture
= intelDeleteTextureObject
;
394 functions
->AllocTextureImageBuffer
= intel_alloc_texture_image_buffer
;
395 functions
->FreeTextureImageBuffer
= intel_free_texture_image_buffer
;
396 functions
->AllocTextureStorage
= intel_alloc_texture_storage
;
397 functions
->MapTextureImage
= intel_map_texture_image
;
398 functions
->UnmapTextureImage
= intel_unmap_texture_image
;
399 functions
->TextureView
= intel_texture_view
;
400 functions
->SetTextureStorageForBufferObject
=
401 intel_set_texture_storage_for_buffer_object
;
402 functions
->TextureBarrier
= intel_texture_barrier
;