i965: Update brw_wm_debug_recompile() for newer key entries.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_mipmap_tree.h
1 /*
2 * Copyright 2006 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 /** @file intel_mipmap_tree.h
27 *
28 * This file defines the structure that wraps a BO and describes how the
29 * mipmap levels and slices of a texture are laid out.
30 *
31 * The hardware has a fixed layout of a texture depending on parameters such
32 * as the target/type (2D, 3D, CUBE), width, height, pitch, and number of
33 * mipmap levels. The individual level/layer slices are each 2D rectangles of
34 * pixels at some x/y offset from the start of the brw_bo.
35 *
36 * Original OpenGL allowed texture miplevels to be specified in arbitrary
37 * order, and a texture may change size over time. Thus, each
38 * intel_texture_image has a reference to a miptree that contains the pixel
39 * data sized appropriately for it, which will later be referenced by/copied
40 * to the intel_texture_object at draw time (intel_finalize_mipmap_tree()) so
41 * that there's a single miptree for the complete texture.
42 */
43
44 #ifndef INTEL_MIPMAP_TREE_H
45 #define INTEL_MIPMAP_TREE_H
46
47 #include <assert.h>
48
49 #include "main/mtypes.h"
50 #include "isl/isl.h"
51 #include "blorp/blorp.h"
52 #include "brw_bufmgr.h"
53 #include "brw_context.h"
54 #include <GL/internal/dri_interface.h>
55
56 #ifdef __cplusplus
57 extern "C" {
58 #endif
59
60 struct brw_context;
61 struct intel_renderbuffer;
62
63 struct intel_texture_image;
64
65 /**
66 * This bit extends the set of GL_MAP_*_BIT enums.
67 *
68 * When calling intel_miptree_map() on an ETC-transcoded-to-RGB miptree or a
69 * depthstencil-split-to-separate-stencil miptree, we'll normally make a
70 * temporary and recreate the kind of data requested by Mesa core, since we're
71 * satisfying some glGetTexImage() request or something.
72 *
73 * However, occasionally you want to actually map the miptree's current data
74 * without transcoding back. This flag to intel_miptree_map() gets you that.
75 */
76 #define BRW_MAP_DIRECT_BIT 0x80000000
77
78 struct intel_miptree_map {
79 /** Bitfield of GL_MAP_*_BIT and BRW_MAP_*_BIT. */
80 GLbitfield mode;
81 /** Region of interest for the map. */
82 int x, y, w, h;
83 /** Possibly malloced temporary buffer for the mapping. */
84 void *buffer;
85 /** Possible pointer to a temporary linear miptree for the mapping. */
86 struct intel_mipmap_tree *linear_mt;
87 /** Pointer to the start of (map_x, map_y) returned by the mapping. */
88 void *ptr;
89 /** Stride of the mapping. */
90 int stride;
91 };
92
93 /**
94 * Describes the location of each texture image within a miptree.
95 */
96 struct intel_mipmap_level
97 {
98 /** Offset to this miptree level, used in computing x_offset. */
99 GLuint level_x;
100 /** Offset to this miptree level, used in computing y_offset. */
101 GLuint level_y;
102
103 /**
104 * \brief Is HiZ enabled for this level?
105 *
106 * If \c mt->level[l].has_hiz is set, then (1) \c mt->hiz_mt has been
107 * allocated and (2) the HiZ memory for the slices in this level reside at
108 * \c mt->hiz_mt->level[l].
109 */
110 bool has_hiz;
111
112 /**
113 * \brief List of 2D images in this mipmap level.
114 *
115 * This may be a list of cube faces, array slices in 2D array texture, or
116 * layers in a 3D texture. The list's length is \c depth.
117 */
118 struct intel_mipmap_slice {
119 /**
120 * Mapping information. Persistent for the duration of
121 * intel_miptree_map/unmap on this slice.
122 */
123 struct intel_miptree_map *map;
124 } *slice;
125 };
126
127 /**
128 * Miptree aux buffer. These buffers are associated with a miptree, but the
129 * format is managed by the hardware.
130 *
131 * For Gen7+, we always give the hardware the start of the buffer, and let it
132 * handle all accesses to the buffer. Therefore we don't need the full miptree
133 * layout structure for this buffer.
134 */
135 struct intel_miptree_aux_buffer
136 {
137 struct isl_surf surf;
138
139 /**
140 * Buffer object containing the pixel data.
141 *
142 * @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
143 * @see 3DSTATE_HIER_DEPTH_BUFFER.AuxiliarySurfaceBaseAddress
144 */
145 struct brw_bo *bo;
146
147 /**
148 * Offset into bo where the surface starts.
149 *
150 * @see intel_mipmap_aux_buffer::bo
151 *
152 * @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
153 * @see 3DSTATE_DEPTH_BUFFER.SurfaceBaseAddress
154 * @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
155 * @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
156 */
157 uint32_t offset;
158
159 /*
160 * Size of the MCS surface.
161 *
162 * This is needed when doing any gtt mapped operations on the buffer (which
163 * will be Y-tiled). It is possible that it will not be the same as bo->size
164 * when the drm allocator rounds up the requested size.
165 */
166 size_t size;
167
168 /**
169 * Pitch in bytes.
170 *
171 * @see RENDER_SURFACE_STATE.AuxiliarySurfacePitch
172 * @see 3DSTATE_HIER_DEPTH_BUFFER.SurfacePitch
173 */
174 uint32_t pitch;
175
176 /**
177 * The distance in rows between array slices.
178 *
179 * @see RENDER_SURFACE_STATE.AuxiliarySurfaceQPitch
180 * @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceQPitch
181 */
182 uint32_t qpitch;
183 };
184
185 struct intel_mipmap_tree
186 {
187 struct isl_surf surf;
188
189 /**
190 * Buffer object containing the surface.
191 *
192 * @see intel_mipmap_tree::offset
193 * @see RENDER_SURFACE_STATE.SurfaceBaseAddress
194 * @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
195 * @see 3DSTATE_DEPTH_BUFFER.SurfaceBaseAddress
196 * @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
197 * @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
198 */
199 struct brw_bo *bo;
200
201 /**
202 * @brief One of GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY, etc.
203 *
204 * @see RENDER_SURFACE_STATE.SurfaceType
205 * @see RENDER_SURFACE_STATE.SurfaceArray
206 * @see 3DSTATE_DEPTH_BUFFER.SurfaceType
207 */
208 GLenum target;
209
210 /**
211 * Generally, this is just the same as the gl_texture_image->TexFormat or
212 * gl_renderbuffer->Format.
213 *
214 * However, for textures and renderbuffers with packed depth/stencil formats
215 * on hardware where we want or need to use separate stencil, there will be
216 * two miptrees for storing the data. If the depthstencil texture or rb is
217 * MESA_FORMAT_Z32_FLOAT_S8X24_UINT, then mt->format will be
218 * MESA_FORMAT_Z_FLOAT32, otherwise for MESA_FORMAT_Z24_UNORM_S8_UINT objects it will be
219 * MESA_FORMAT_Z24_UNORM_X8_UINT.
220 *
221 * For ETC1/ETC2 textures, this is one of the uncompressed mesa texture
222 * formats if the hardware lacks support for ETC1/ETC2. See @ref etc_format.
223 *
224 * @see RENDER_SURFACE_STATE.SurfaceFormat
225 * @see 3DSTATE_DEPTH_BUFFER.SurfaceFormat
226 */
227 mesa_format format;
228
229 /**
230 * This variable stores the value of ETC compressed texture format
231 *
232 * @see RENDER_SURFACE_STATE.SurfaceFormat
233 */
234 mesa_format etc_format;
235
236 GLuint first_level;
237 GLuint last_level;
238
239 /** Bytes per pixel (or bytes per block if compressed) */
240 GLuint cpp;
241
242 bool compressed;
243
244 /* Includes image offset tables: */
245 struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
246
247 /**
248 * Offset into bo where the surface starts.
249 *
250 * @see intel_mipmap_tree::bo
251 *
252 * @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
253 * @see 3DSTATE_DEPTH_BUFFER.SurfaceBaseAddress
254 * @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
255 * @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
256 */
257 uint32_t offset;
258
259 /**
260 * \brief HiZ aux buffer
261 *
262 * To allocate the hiz buffer, use intel_miptree_alloc_hiz().
263 *
264 * To determine if hiz is enabled, do not check this pointer. Instead, use
265 * intel_miptree_level_has_hiz().
266 */
267 struct intel_miptree_aux_buffer *hiz_buf;
268
269 /**
270 * \brief The type of auxiliary compression used by this miptree.
271 *
272 * This describes the type of auxiliary compression that is intended to be
273 * used by this miptree. An aux usage of ISL_AUX_USAGE_NONE means that
274 * auxiliary compression is permanently disabled. An aux usage other than
275 * ISL_AUX_USAGE_NONE does not imply that the auxiliary buffer has actually
276 * been allocated nor does it imply that auxiliary compression will always
277 * be enabled for this surface. For instance, with CCS_D, we may allocate
278 * the CCS on-the-fly and it may not be used for texturing if the miptree
279 * is fully resolved.
280 */
281 enum isl_aux_usage aux_usage;
282
283 /**
284 * \brief Whether or not this miptree supports fast clears.
285 */
286 bool supports_fast_clear;
287
288 /**
289 * \brief Maps miptree slices to their current aux state
290 *
291 * This two-dimensional array is indexed as [level][layer] and stores an
292 * aux state for each slice.
293 */
294 enum isl_aux_state **aux_state;
295
296 /**
297 * \brief Stencil miptree for depthstencil textures.
298 *
299 * This miptree is used for depthstencil textures and renderbuffers that
300 * require separate stencil. It always has the true copy of the stencil
301 * bits, regardless of mt->format.
302 *
303 * \see 3DSTATE_STENCIL_BUFFER
304 * \see intel_miptree_map_depthstencil()
305 * \see intel_miptree_unmap_depthstencil()
306 */
307 struct intel_mipmap_tree *stencil_mt;
308
309 /**
310 * \brief Stencil texturing miptree for sampling from a stencil texture
311 *
312 * Some hardware doesn't support sampling from the stencil texture as
313 * required by the GL_ARB_stencil_texturing extenion. To workaround this we
314 * blit the texture into a new texture that can be sampled.
315 *
316 * \see intel_update_r8stencil()
317 */
318 struct intel_mipmap_tree *r8stencil_mt;
319 bool r8stencil_needs_update;
320
321 /**
322 * \brief MCS auxiliary buffer.
323 *
324 * This buffer contains the "multisample control surface", which stores
325 * the necessary information to implement compressed MSAA
326 * (INTEL_MSAA_FORMAT_CMS) and "fast color clear" behaviour on Gen7+.
327 *
328 * NULL if no MCS buffer is in use for this surface.
329 */
330 struct intel_miptree_aux_buffer *mcs_buf;
331
332 /**
333 * Planes 1 and 2 in case this is a planar surface.
334 */
335 struct intel_mipmap_tree *plane[2];
336
337 /**
338 * Fast clear color for this surface. For depth surfaces, the clear value
339 * is stored as a float32 in the red component.
340 */
341 union isl_color_value fast_clear_color;
342
343 /**
344 * For external surfaces, this is DRM format modifier that was used to
345 * create or import the surface. For internal surfaces, this will always
346 * be DRM_FORMAT_MOD_INVALID.
347 */
348 uint64_t drm_modifier;
349
350 /* These are also refcounted:
351 */
352 GLuint refcount;
353 };
354
355 bool
356 intel_miptree_alloc_ccs(struct brw_context *brw,
357 struct intel_mipmap_tree *mt);
358
359 enum intel_miptree_create_flags {
360 /** No miptree create flags */
361 MIPTREE_CREATE_DEFAULT = 0,
362
363 /** Miptree creation should try to allocate a currently busy BO
364 *
365 * This may be advantageous if we know the next thing to touch the BO will
366 * be the GPU because the BO will likely already be in the GTT and maybe
367 * even in some caches. If there is a chance that the next thing to touch
368 * the miptree BO will be the CPU, this flag should not be set.
369 */
370 MIPTREE_CREATE_BUSY = 1 << 0,
371
372 /** Create a linear (not tiled) miptree */
373 MIPTREE_CREATE_LINEAR = 1 << 1,
374
375 /** Create the miptree with auxiliary compression disabled
376 *
377 * This does not prevent the caller of intel_miptree_create from coming
378 * along later and turning auxiliary compression back on but it does mean
379 * that the miptree will be created with mt->aux_usage == NONE.
380 */
381 MIPTREE_CREATE_NO_AUX = 1 << 2,
382 };
383
384 struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
385 GLenum target,
386 mesa_format format,
387 GLuint first_level,
388 GLuint last_level,
389 GLuint width0,
390 GLuint height0,
391 GLuint depth0,
392 GLuint num_samples,
393 enum intel_miptree_create_flags flags);
394
395 struct intel_mipmap_tree *
396 intel_miptree_create_for_bo(struct brw_context *brw,
397 struct brw_bo *bo,
398 mesa_format format,
399 uint32_t offset,
400 uint32_t width,
401 uint32_t height,
402 uint32_t depth,
403 int pitch,
404 enum intel_miptree_create_flags flags);
405
406 struct intel_mipmap_tree *
407 intel_miptree_create_for_dri_image(struct brw_context *brw,
408 __DRIimage *image,
409 GLenum target,
410 mesa_format format,
411 bool is_winsys_image);
412
413 bool
414 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
415 struct intel_renderbuffer *irb,
416 struct intel_mipmap_tree *singlesample_mt,
417 uint32_t width, uint32_t height,
418 uint32_t pitch);
419
420 /**
421 * Create a miptree appropriate as the storage for a non-texture renderbuffer.
422 * The miptree has the following properties:
423 * - The target is GL_TEXTURE_2D.
424 * - There are no levels other than the base level 0.
425 * - Depth is 1.
426 */
427 struct intel_mipmap_tree*
428 intel_miptree_create_for_renderbuffer(struct brw_context *brw,
429 mesa_format format,
430 uint32_t width,
431 uint32_t height,
432 uint32_t num_samples);
433
434 mesa_format
435 intel_depth_format_for_depthstencil_format(mesa_format format);
436
437 mesa_format
438 intel_lower_compressed_format(struct brw_context *brw, mesa_format format);
439
440 unsigned
441 brw_get_num_logical_layers(const struct intel_mipmap_tree *mt, unsigned level);
442
443 /** \brief Assert that the level and layer are valid for the miptree. */
444 void
445 intel_miptree_check_level_layer(const struct intel_mipmap_tree *mt,
446 uint32_t level,
447 uint32_t layer);
448
449 void intel_miptree_reference(struct intel_mipmap_tree **dst,
450 struct intel_mipmap_tree *src);
451
452 void intel_miptree_release(struct intel_mipmap_tree **mt);
453
454 /* Check if an image fits an existing mipmap tree layout
455 */
456 bool intel_miptree_match_image(struct intel_mipmap_tree *mt,
457 struct gl_texture_image *image);
458
459 void
460 intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt,
461 GLuint level, GLuint slice,
462 GLuint *x, GLuint *y);
463
464 enum isl_surf_dim
465 get_isl_surf_dim(GLenum target);
466
467 enum isl_dim_layout
468 get_isl_dim_layout(const struct gen_device_info *devinfo,
469 enum isl_tiling tiling, GLenum target);
470
471 enum isl_aux_usage
472 intel_miptree_get_aux_isl_usage(const struct brw_context *brw,
473 const struct intel_mipmap_tree *mt);
474
475 void
476 intel_get_image_dims(struct gl_texture_image *image,
477 int *width, int *height, int *depth);
478
479 void
480 intel_get_tile_masks(enum isl_tiling tiling, uint32_t cpp,
481 uint32_t *mask_x, uint32_t *mask_y);
482
483 void
484 intel_get_tile_dims(enum isl_tiling tiling, uint32_t cpp,
485 uint32_t *tile_w, uint32_t *tile_h);
486
487 uint32_t
488 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt,
489 GLuint level, GLuint slice,
490 uint32_t *tile_x,
491 uint32_t *tile_y);
492 uint32_t
493 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt,
494 uint32_t x, uint32_t y);
495
496 void
497 intel_miptree_copy_slice(struct brw_context *brw,
498 struct intel_mipmap_tree *src_mt,
499 unsigned src_level, unsigned src_layer,
500 struct intel_mipmap_tree *dst_mt,
501 unsigned dst_level, unsigned dst_layer);
502
503 void
504 intel_miptree_copy_teximage(struct brw_context *brw,
505 struct intel_texture_image *intelImage,
506 struct intel_mipmap_tree *dst_mt);
507
508 /**
509 * \name Miptree HiZ functions
510 * \{
511 *
512 * It is safe to call the "slice_set_need_resolve" and "slice_resolve"
513 * functions on a miptree without HiZ. In that case, each function is a no-op.
514 */
515
516 /**
517 * \brief Allocate the miptree's embedded HiZ miptree.
518 * \see intel_mipmap_tree:hiz_mt
519 * \return false if allocation failed
520 */
521 bool
522 intel_miptree_alloc_hiz(struct brw_context *brw,
523 struct intel_mipmap_tree *mt);
524
525 bool
526 intel_miptree_level_has_hiz(const struct intel_mipmap_tree *mt, uint32_t level);
527
528 /**\}*/
529
530 bool
531 intel_miptree_has_color_unresolved(const struct intel_mipmap_tree *mt,
532 unsigned start_level, unsigned num_levels,
533 unsigned start_layer, unsigned num_layers);
534
535
536 #define INTEL_REMAINING_LAYERS UINT32_MAX
537 #define INTEL_REMAINING_LEVELS UINT32_MAX
538
539 /** Prepare a miptree for access
540 *
541 * This function should be called prior to any access to miptree in order to
542 * perform any needed resolves.
543 *
544 * \param[in] start_level The first mip level to be accessed
545 *
546 * \param[in] num_levels The number of miplevels to be accessed or
547 * INTEL_REMAINING_LEVELS to indicate every level
548 * above start_level will be accessed
549 *
550 * \param[in] start_layer The first array slice or 3D layer to be accessed
551 *
552 * \param[in] num_layers The number of array slices or 3D layers be
553 * accessed or INTEL_REMAINING_LAYERS to indicate
554 * every layer above start_layer will be accessed
555 *
556 * \param[in] aux_supported Whether or not the access will support the
557 * miptree's auxiliary compression format; this
558 * must be false for uncompressed miptrees
559 *
560 * \param[in] fast_clear_supported Whether or not the access will support
561 * fast clears in the miptree's auxiliary
562 * compression format
563 */
564 void
565 intel_miptree_prepare_access(struct brw_context *brw,
566 struct intel_mipmap_tree *mt,
567 uint32_t start_level, uint32_t num_levels,
568 uint32_t start_layer, uint32_t num_layers,
569 enum isl_aux_usage aux_usage,
570 bool fast_clear_supported);
571
572 /** Complete a write operation
573 *
574 * This function should be called after any operation writes to a miptree.
575 * This will update the miptree's compression state so that future resolves
576 * happen correctly. Technically, this function can be called before the
577 * write occurs but the caller must ensure that they don't interlace
578 * intel_miptree_prepare_access and intel_miptree_finish_write calls to
579 * overlapping layer/level ranges.
580 *
581 * \param[in] level The mip level that was written
582 *
583 * \param[in] start_layer The first array slice or 3D layer written
584 *
585 * \param[in] num_layers The number of array slices or 3D layers
586 * written or INTEL_REMAINING_LAYERS to indicate
587 * every layer above start_layer was written
588 *
589 * \param[in] written_with_aux Whether or not the write was done with
590 * auxiliary compression enabled
591 */
592 void
593 intel_miptree_finish_write(struct brw_context *brw,
594 struct intel_mipmap_tree *mt, uint32_t level,
595 uint32_t start_layer, uint32_t num_layers,
596 enum isl_aux_usage aux_usage);
597
598 /** Get the auxiliary compression state of a miptree slice */
599 enum isl_aux_state
600 intel_miptree_get_aux_state(const struct intel_mipmap_tree *mt,
601 uint32_t level, uint32_t layer);
602
603 /** Set the auxiliary compression state of a miptree slice range
604 *
605 * This function directly sets the auxiliary compression state of a slice
606 * range of a miptree. It only modifies data structures and does not do any
607 * resolves. This should only be called by code which directly performs
608 * compression operations such as fast clears and resolves. Most code should
609 * use intel_miptree_prepare_access or intel_miptree_finish_write.
610 */
611 void
612 intel_miptree_set_aux_state(struct brw_context *brw,
613 struct intel_mipmap_tree *mt, uint32_t level,
614 uint32_t start_layer, uint32_t num_layers,
615 enum isl_aux_state aux_state);
616
617 /**
618 * Prepare a miptree for raw access
619 *
620 * This helper prepares the miptree for access that knows nothing about any
621 * sort of compression whatsoever. This is useful when mapping the surface or
622 * using it with the blitter.
623 */
624 static inline void
625 intel_miptree_access_raw(struct brw_context *brw,
626 struct intel_mipmap_tree *mt,
627 uint32_t level, uint32_t layer,
628 bool write)
629 {
630 intel_miptree_prepare_access(brw, mt, level, 1, layer, 1, false, false);
631 if (write)
632 intel_miptree_finish_write(brw, mt, level, layer, 1, false);
633 }
634
635 enum isl_aux_usage
636 intel_miptree_texture_aux_usage(struct brw_context *brw,
637 struct intel_mipmap_tree *mt,
638 enum isl_format view_format);
639 void
640 intel_miptree_prepare_texture(struct brw_context *brw,
641 struct intel_mipmap_tree *mt,
642 enum isl_format view_format,
643 uint32_t start_level, uint32_t num_levels,
644 uint32_t start_layer, uint32_t num_layers,
645 bool disable_aux);
646 void
647 intel_miptree_prepare_image(struct brw_context *brw,
648 struct intel_mipmap_tree *mt);
649
650 enum isl_aux_usage
651 intel_miptree_render_aux_usage(struct brw_context *brw,
652 struct intel_mipmap_tree *mt,
653 enum isl_format render_format,
654 bool blend_enabled);
655 void
656 intel_miptree_prepare_render(struct brw_context *brw,
657 struct intel_mipmap_tree *mt, uint32_t level,
658 uint32_t start_layer, uint32_t layer_count,
659 enum isl_format render_format,
660 bool blend_enabled);
661 void
662 intel_miptree_finish_render(struct brw_context *brw,
663 struct intel_mipmap_tree *mt, uint32_t level,
664 uint32_t start_layer, uint32_t layer_count,
665 enum isl_format render_format,
666 bool blend_enabled);
667 void
668 intel_miptree_prepare_depth(struct brw_context *brw,
669 struct intel_mipmap_tree *mt, uint32_t level,
670 uint32_t start_layer, uint32_t layer_count);
671 void
672 intel_miptree_finish_depth(struct brw_context *brw,
673 struct intel_mipmap_tree *mt, uint32_t level,
674 uint32_t start_layer, uint32_t layer_count,
675 bool depth_written);
676 void
677 intel_miptree_prepare_external(struct brw_context *brw,
678 struct intel_mipmap_tree *mt);
679
680 void
681 intel_miptree_make_shareable(struct brw_context *brw,
682 struct intel_mipmap_tree *mt);
683
684 void
685 intel_miptree_updownsample(struct brw_context *brw,
686 struct intel_mipmap_tree *src,
687 struct intel_mipmap_tree *dst);
688
689 void
690 intel_update_r8stencil(struct brw_context *brw,
691 struct intel_mipmap_tree *mt);
692
693 void
694 intel_miptree_map(struct brw_context *brw,
695 struct intel_mipmap_tree *mt,
696 unsigned int level,
697 unsigned int slice,
698 unsigned int x,
699 unsigned int y,
700 unsigned int w,
701 unsigned int h,
702 GLbitfield mode,
703 void **out_ptr,
704 ptrdiff_t *out_stride);
705
706 void
707 intel_miptree_unmap(struct brw_context *brw,
708 struct intel_mipmap_tree *mt,
709 unsigned int level,
710 unsigned int slice);
711
712 bool
713 intel_miptree_sample_with_hiz(struct brw_context *brw,
714 struct intel_mipmap_tree *mt);
715
716
717 static inline bool
718 intel_miptree_set_clear_color(struct gl_context *ctx,
719 struct intel_mipmap_tree *mt,
720 union isl_color_value clear_color)
721 {
722 if (memcmp(&mt->fast_clear_color, &clear_color, sizeof(clear_color)) != 0) {
723 mt->fast_clear_color = clear_color;
724 ctx->NewDriverState |= BRW_NEW_AUX_STATE;
725 return true;
726 }
727 return false;
728 }
729
730 static inline bool
731 intel_miptree_set_depth_clear_value(struct gl_context *ctx,
732 struct intel_mipmap_tree *mt,
733 float clear_value)
734 {
735 if (mt->fast_clear_color.f32[0] != clear_value) {
736 mt->fast_clear_color.f32[0] = clear_value;
737 ctx->NewDriverState |= BRW_NEW_AUX_STATE;
738 return true;
739 }
740 return false;
741 }
742
743 #ifdef __cplusplus
744 }
745 #endif
746
747 #endif