i965/gen7+: Set up MCS in SURFACE_STATE whenever MCS is present.
[mesa.git] / src / mesa / drivers / dri / intel / intel_mipmap_tree.h
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef INTEL_MIPMAP_TREE_H
29 #define INTEL_MIPMAP_TREE_H
30
31 #include <assert.h>
32
33 #include "intel_regions.h"
34 #include "intel_resolve_map.h"
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 /* A layer on top of the intel_regions code which adds:
41 *
42 * - Code to size and layout a region to hold a set of mipmaps.
43 * - Query to determine if a new image fits in an existing tree.
44 * - More refcounting
45 * - maybe able to remove refcounting from intel_region?
46 * - ?
47 *
48 * The fixed mipmap layout of intel hardware where one offset
49 * specifies the position of all images in a mipmap hierachy
50 * complicates the implementation of GL texture image commands,
51 * compared to hardware where each image is specified with an
52 * independent offset.
53 *
54 * In an ideal world, each texture object would be associated with a
55 * single bufmgr buffer or 2d intel_region, and all the images within
56 * the texture object would slot into the tree as they arrive. The
57 * reality can be a little messier, as images can arrive from the user
58 * with sizes that don't fit in the existing tree, or in an order
59 * where the tree layout cannot be guessed immediately.
60 *
61 * This structure encodes an idealized mipmap tree. The GL image
62 * commands build these where possible, otherwise store the images in
63 * temporary system buffers.
64 */
65
66 struct intel_resolve_map;
67 struct intel_texture_image;
68
69 /**
70 * When calling intel_miptree_map() on an ETC-transcoded-to-RGB miptree or a
71 * depthstencil-split-to-separate-stencil miptree, we'll normally make a
72 * tmeporary and recreate the kind of data requested by Mesa core, since we're
73 * satisfying some glGetTexImage() request or something.
74 *
75 * However, occasionally you want to actually map the miptree's current data
76 * without transcoding back. This flag to intel_miptree_map() gets you that.
77 */
78 #define BRW_MAP_DIRECT_BIT 0x80000000
79
80 struct intel_miptree_map {
81 /** Bitfield of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT, GL_MAP_INVALIDATE_BIT */
82 GLbitfield mode;
83 /** Region of interest for the map. */
84 int x, y, w, h;
85 /** Possibly malloced temporary buffer for the mapping. */
86 void *buffer;
87 /** Possible pointer to a temporary linear miptree for the mapping. */
88 struct intel_mipmap_tree *mt;
89 /** Pointer to the start of (map_x, map_y) returned by the mapping. */
90 void *ptr;
91 /** Stride of the mapping. */
92 int stride;
93
94 /**
95 * intel_mipmap_tree::singlesample_mt is temporary storage that persists
96 * only for the duration of the map.
97 */
98 bool singlesample_mt_is_tmp;
99 };
100
101 /**
102 * Describes the location of each texture image within a texture region.
103 */
104 struct intel_mipmap_level
105 {
106 /** Offset to this miptree level, used in computing x_offset. */
107 GLuint level_x;
108 /** Offset to this miptree level, used in computing y_offset. */
109 GLuint level_y;
110 GLuint width;
111 GLuint height;
112
113 /**
114 * \brief Number of 2D slices in this miplevel.
115 *
116 * The exact semantics of depth varies according to the texture target:
117 * - For GL_TEXTURE_CUBE_MAP, depth is 6.
118 * - For GL_TEXTURE_2D_ARRAY, depth is the number of array slices. It is
119 * identical for all miplevels in the texture.
120 * - For GL_TEXTURE_3D, it is the texture's depth at this miplevel. Its
121 * value, like width and height, varies with miplevel.
122 * - For other texture types, depth is 1.
123 */
124 GLuint depth;
125
126 /**
127 * \brief List of 2D images in this mipmap level.
128 *
129 * This may be a list of cube faces, array slices in 2D array texture, or
130 * layers in a 3D texture. The list's length is \c depth.
131 */
132 struct intel_mipmap_slice {
133 /**
134 * \name Offset to slice
135 * \{
136 *
137 * Hardware formats are so diverse that that there is no unified way to
138 * compute the slice offsets, so we store them in this table.
139 *
140 * The (x, y) offset to slice \c s at level \c l relative the miptrees
141 * base address is
142 * \code
143 * x = mt->level[l].slice[s].x_offset
144 * y = mt->level[l].slice[s].y_offset
145 */
146 GLuint x_offset;
147 GLuint y_offset;
148 /** \} */
149
150 /**
151 * Mapping information. Persistent for the duration of
152 * intel_miptree_map/unmap on this slice.
153 */
154 struct intel_miptree_map *map;
155
156 /**
157 * \brief Is HiZ enabled for this slice?
158 *
159 * If \c mt->level[l].slice[s].has_hiz is set, then (1) \c mt->hiz_mt
160 * has been allocated and (2) the HiZ memory corresponding to this slice
161 * resides at \c mt->hiz_mt->level[l].slice[s].
162 */
163 bool has_hiz;
164 } *slice;
165 };
166
167 /**
168 * Enum for keeping track of the different MSAA layouts supported by Gen7.
169 */
170 enum intel_msaa_layout
171 {
172 /**
173 * Ordinary surface with no MSAA.
174 */
175 INTEL_MSAA_LAYOUT_NONE,
176
177 /**
178 * Interleaved Multisample Surface. The additional samples are
179 * accommodated by scaling up the width and the height of the surface so
180 * that all the samples corresponding to a pixel are located at nearby
181 * memory locations.
182 */
183 INTEL_MSAA_LAYOUT_IMS,
184
185 /**
186 * Uncompressed Multisample Surface. The surface is stored as a 2D array,
187 * with array slice n containing all pixel data for sample n.
188 */
189 INTEL_MSAA_LAYOUT_UMS,
190
191 /**
192 * Compressed Multisample Surface. The surface is stored as in
193 * INTEL_MSAA_LAYOUT_UMS, but there is an additional buffer called the MCS
194 * (Multisample Control Surface) buffer. Each pixel in the MCS buffer
195 * indicates the mapping from sample number to array slice. This allows
196 * the common case (where all samples constituting a pixel have the same
197 * color value) to be stored efficiently by just using a single array
198 * slice.
199 */
200 INTEL_MSAA_LAYOUT_CMS,
201 };
202
203
204 #ifndef I915
205 /**
206 * Enum for keeping track of the state of an MCS buffer associated with a
207 * miptree. This determines when fast clear related operations are needed.
208 *
209 * Fast clear works by deferring the memory writes that would be used to clear
210 * the buffer, so that instead of performing them at the time of the clear
211 * operation, the hardware automatically performs them at the time that the
212 * buffer is later accessed for rendering. The MCS buffer keeps track of
213 * which regions of the buffer still have pending clear writes.
214 *
215 * This enum keeps track of the driver's knowledge of the state of the MCS
216 * buffer.
217 *
218 * MCS buffers only exist on Gen7+.
219 */
220 enum intel_mcs_state
221 {
222 /**
223 * There is no MCS buffer for this miptree, and one should never be
224 * allocated.
225 */
226 INTEL_MCS_STATE_NONE,
227
228 /**
229 * An MCS buffer exists for this miptree, and it is used for MSAA purposes.
230 */
231 INTEL_MCS_STATE_MSAA,
232
233 /**
234 * No deferred clears are pending for this miptree, and the contents of the
235 * color buffer are entirely correct. An MCS buffer may or may not exist
236 * for this miptree. If it does exist, it is entirely in the "no deferred
237 * clears pending" state. If it does not exist, it will be created the
238 * first time a fast color clear is executed.
239 *
240 * In this state, the color buffer can be used for purposes other than
241 * rendering without needing a render target resolve.
242 */
243 INTEL_MCS_STATE_RESOLVED,
244
245 /**
246 * An MCS buffer exists for this miptree, and deferred clears are pending
247 * for some regions of the color buffer, as indicated by the MCS buffer.
248 * The contents of the color buffer are only correct for the regions where
249 * the MCS buffer doesn't indicate a deferred clear.
250 *
251 * In this state, a render target resolve must be performed before the
252 * color buffer can be used for purposes other than rendering.
253 */
254 INTEL_MCS_STATE_UNRESOLVED,
255
256 /**
257 * An MCS buffer exists for this miptree, and deferred clears are pending
258 * for the entire color buffer, and the contents of the MCS buffer reflect
259 * this. The contents of the color buffer are undefined.
260 *
261 * In this state, a render target resolve must be performed before the
262 * color buffer can be used for purposes other than rendering.
263 *
264 * If the client attempts to clear a buffer which is already in this state,
265 * the clear can be safely skipped, since the buffer is already clear.
266 */
267 INTEL_MCS_STATE_CLEAR,
268 };
269 #endif
270
271 struct intel_mipmap_tree
272 {
273 /* Effectively the key:
274 */
275 GLenum target;
276
277 /**
278 * Generally, this is just the same as the gl_texture_image->TexFormat or
279 * gl_renderbuffer->Format.
280 *
281 * However, for textures and renderbuffers with packed depth/stencil formats
282 * on hardware where we want or need to use separate stencil, there will be
283 * two miptrees for storing the data. If the depthstencil texture or rb is
284 * MESA_FORMAT_Z32_FLOAT_X24S8, then mt->format will be
285 * MESA_FORMAT_Z32_FLOAT, otherwise for MESA_FORMAT_S8_Z24 objects it will be
286 * MESA_FORMAT_X8_Z24.
287 *
288 * For ETC1/ETC2 textures, this is one of the uncompressed mesa texture
289 * formats if the hardware lacks support for ETC1/ETC2. See @ref wraps_etc.
290 */
291 gl_format format;
292
293 /** This variable stores the value of ETC compressed texture format */
294 gl_format etc_format;
295
296 /**
297 * The X offset of each image in the miptree must be aligned to this. See
298 * the "Alignment Unit Size" section of the BSpec.
299 */
300 unsigned int align_w;
301 unsigned int align_h; /**< \see align_w */
302
303 GLuint first_level;
304 GLuint last_level;
305
306 /**
307 * Level zero image dimensions. These dimensions correspond to the
308 * physical layout of data in memory. Accordingly, they account for the
309 * extra width, height, and or depth that must be allocated in order to
310 * accommodate multisample formats, and they account for the extra factor
311 * of 6 in depth that must be allocated in order to accommodate cubemap
312 * textures.
313 */
314 GLuint physical_width0, physical_height0, physical_depth0;
315
316 GLuint cpp;
317 GLuint num_samples;
318 bool compressed;
319
320 /**
321 * Level zero image dimensions. These dimensions correspond to the
322 * logical width, height, and depth of the region as seen by client code.
323 * Accordingly, they do not account for the extra width, height, and/or
324 * depth that must be allocated in order to accommodate multisample
325 * formats, nor do they account for the extra factor of 6 in depth that
326 * must be allocated in order to accommodate cubemap textures.
327 */
328 uint32_t logical_width0, logical_height0, logical_depth0;
329
330 /**
331 * For 1D array, 2D array, cube, and 2D multisampled surfaces on Gen7: true
332 * if the surface only contains LOD 0, and hence no space is for LOD's
333 * other than 0 in between array slices.
334 *
335 * Corresponds to the surface_array_spacing bit in gen7_surface_state.
336 */
337 bool array_spacing_lod0;
338
339 /**
340 * MSAA layout used by this buffer.
341 */
342 enum intel_msaa_layout msaa_layout;
343
344 /* Derived from the above:
345 */
346 GLuint total_width;
347 GLuint total_height;
348
349 /* The 3DSTATE_CLEAR_PARAMS value associated with the last depth clear to
350 * this depth mipmap tree, if any.
351 */
352 uint32_t depth_clear_value;
353
354 /* Includes image offset tables:
355 */
356 struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
357
358 /* The data is held here:
359 */
360 struct intel_region *region;
361
362 /* Offset into region bo where miptree starts:
363 */
364 uint32_t offset;
365
366 /**
367 * \brief Singlesample miptree.
368 *
369 * This is used under two cases.
370 *
371 * --- Case 1: As persistent singlesample storage for multisample window
372 * system front and back buffers ---
373 *
374 * Suppose that the window system FBO was created with a multisample
375 * config. Let `back_irb` be the `intel_renderbuffer` for the FBO's back
376 * buffer. Then `back_irb` contains two miptrees: a parent multisample
377 * miptree (back_irb->mt) and a child singlesample miptree
378 * (back_irb->mt->singlesample_mt). The DRM buffer shared with DRI2
379 * belongs to `back_irb->mt->singlesample_mt` and contains singlesample
380 * data. The singlesample miptree is created at the same time as and
381 * persists for the lifetime of its parent multisample miptree.
382 *
383 * When access to the singlesample data is needed, such as at
384 * eglSwapBuffers and glReadPixels, an automatic downsample occurs from
385 * `back_rb->mt` to `back_rb->mt->singlesample_mt` when necessary.
386 *
387 * This description of the back buffer applies analogously to the front
388 * buffer.
389 *
390 *
391 * --- Case 2: As temporary singlesample storage for mapping multisample
392 * miptrees ---
393 *
394 * Suppose the intel_miptree_map is called on a multisample miptree, `mt`,
395 * for which case 1 does not apply (that is, `mt` does not belong to
396 * a front or back buffer). Then `mt->singlesample_mt` is null at the
397 * start of the call. intel_miptree_map will create a temporary
398 * singlesample miptree, store it at `mt->singlesample_mt`, downsample from
399 * `mt` to `mt->singlesample_mt` if necessary, then map
400 * `mt->singlesample_mt`. The temporary miptree is later deleted during
401 * intel_miptree_unmap.
402 */
403 struct intel_mipmap_tree *singlesample_mt;
404
405 /**
406 * \brief A downsample is needed from this miptree to singlesample_mt.
407 */
408 bool need_downsample;
409
410 /**
411 * \brief HiZ miptree
412 *
413 * The hiz miptree contains the miptree's hiz buffer. To allocate the hiz
414 * miptree, use intel_miptree_alloc_hiz().
415 *
416 * To determine if hiz is enabled, do not check this pointer. Instead, use
417 * intel_miptree_slice_has_hiz().
418 */
419 struct intel_mipmap_tree *hiz_mt;
420
421 /**
422 * \brief Map of miptree slices to needed resolves.
423 *
424 * This is used only when the miptree has a child HiZ miptree.
425 *
426 * Let \c mt be a depth miptree with HiZ enabled. Then the resolve map is
427 * \c mt->hiz_map. The resolve map of the child HiZ miptree, \c
428 * mt->hiz_mt->hiz_map, is unused.
429 */
430 struct intel_resolve_map hiz_map;
431
432 /**
433 * \brief Stencil miptree for depthstencil textures.
434 *
435 * This miptree is used for depthstencil textures and renderbuffers that
436 * require separate stencil. It always has the true copy of the stencil
437 * bits, regardless of mt->format.
438 *
439 * \see intel_miptree_map_depthstencil()
440 * \see intel_miptree_unmap_depthstencil()
441 */
442 struct intel_mipmap_tree *stencil_mt;
443
444 #ifndef I915
445 /**
446 * \brief MCS miptree.
447 *
448 * This miptree contains the "multisample control surface", which stores
449 * the necessary information to implement compressed MSAA
450 * (INTEL_MSAA_FORMAT_CMS) and "fast color clear" behaviour on Gen7+.
451 *
452 * NULL if no MCS miptree is in use for this surface.
453 */
454 struct intel_mipmap_tree *mcs_mt;
455
456 /**
457 * MCS state for this buffer.
458 */
459 enum intel_mcs_state mcs_state;
460 #endif
461
462 /* These are also refcounted:
463 */
464 GLuint refcount;
465 };
466
467 enum intel_miptree_tiling_mode {
468 INTEL_MIPTREE_TILING_ANY,
469 INTEL_MIPTREE_TILING_Y,
470 INTEL_MIPTREE_TILING_NONE,
471 };
472
473 struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
474 GLenum target,
475 gl_format format,
476 GLuint first_level,
477 GLuint last_level,
478 GLuint width0,
479 GLuint height0,
480 GLuint depth0,
481 bool expect_accelerated_upload,
482 GLuint num_samples,
483 enum intel_miptree_tiling_mode);
484
485 struct intel_mipmap_tree *
486 intel_miptree_create_layout(struct intel_context *intel,
487 GLenum target,
488 gl_format format,
489 GLuint first_level,
490 GLuint last_level,
491 GLuint width0,
492 GLuint height0,
493 GLuint depth0,
494 bool for_bo,
495 GLuint num_samples);
496
497 struct intel_mipmap_tree *
498 intel_miptree_create_for_bo(struct intel_context *intel,
499 drm_intel_bo *bo,
500 gl_format format,
501 uint32_t offset,
502 uint32_t width,
503 uint32_t height,
504 int pitch,
505 uint32_t tiling);
506
507 struct intel_mipmap_tree*
508 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
509 unsigned dri_attachment,
510 gl_format format,
511 uint32_t num_samples,
512 struct intel_region *region);
513
514 /**
515 * Create a miptree appropriate as the storage for a non-texture renderbuffer.
516 * The miptree has the following properties:
517 * - The target is GL_TEXTURE_2D.
518 * - There are no levels other than the base level 0.
519 * - Depth is 1.
520 */
521 struct intel_mipmap_tree*
522 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
523 gl_format format,
524 uint32_t width,
525 uint32_t height,
526 uint32_t num_samples);
527
528 /** \brief Assert that the level and layer are valid for the miptree. */
529 static inline void
530 intel_miptree_check_level_layer(struct intel_mipmap_tree *mt,
531 uint32_t level,
532 uint32_t layer)
533 {
534 assert(level >= mt->first_level);
535 assert(level <= mt->last_level);
536 assert(layer < mt->level[level].depth);
537 }
538
539 int intel_miptree_pitch_align (struct intel_context *intel,
540 struct intel_mipmap_tree *mt,
541 uint32_t tiling,
542 int pitch);
543
544 void intel_miptree_reference(struct intel_mipmap_tree **dst,
545 struct intel_mipmap_tree *src);
546
547 void intel_miptree_release(struct intel_mipmap_tree **mt);
548
549 /* Check if an image fits an existing mipmap tree layout
550 */
551 bool intel_miptree_match_image(struct intel_mipmap_tree *mt,
552 struct gl_texture_image *image);
553
554 void
555 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
556 GLuint level, GLuint slice,
557 GLuint *x, GLuint *y);
558
559 void
560 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
561 int *width, int *height, int *depth);
562
563 uint32_t
564 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
565 GLuint level, GLuint slice,
566 uint32_t *tile_x,
567 uint32_t *tile_y);
568
569 void intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
570 GLuint level,
571 GLuint x, GLuint y,
572 GLuint w, GLuint h, GLuint d);
573
574 void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
575 GLuint level,
576 GLuint img, GLuint x, GLuint y);
577
578 void
579 intel_miptree_copy_teximage(struct intel_context *intel,
580 struct intel_texture_image *intelImage,
581 struct intel_mipmap_tree *dst_mt, bool invalidate);
582
583 /**
584 * Copy the stencil data from \c mt->stencil_mt->region to \c mt->region for
585 * the given miptree slice.
586 *
587 * \see intel_mipmap_tree::stencil_mt
588 */
589 void
590 intel_miptree_s8z24_scatter(struct intel_context *intel,
591 struct intel_mipmap_tree *mt,
592 uint32_t level,
593 uint32_t slice);
594
595 /**
596 * Copy the stencil data in \c mt->stencil_mt->region to \c mt->region for the
597 * given miptree slice.
598 *
599 * \see intel_mipmap_tree::stencil_mt
600 */
601 void
602 intel_miptree_s8z24_gather(struct intel_context *intel,
603 struct intel_mipmap_tree *mt,
604 uint32_t level,
605 uint32_t layer);
606
607 bool
608 intel_miptree_alloc_mcs(struct intel_context *intel,
609 struct intel_mipmap_tree *mt,
610 GLuint num_samples);
611
612 /**
613 * \name Miptree HiZ functions
614 * \{
615 *
616 * It is safe to call the "slice_set_need_resolve" and "slice_resolve"
617 * functions on a miptree without HiZ. In that case, each function is a no-op.
618 */
619
620 /**
621 * \brief Allocate the miptree's embedded HiZ miptree.
622 * \see intel_mipmap_tree:hiz_mt
623 * \return false if allocation failed
624 */
625
626 bool
627 intel_miptree_alloc_hiz(struct intel_context *intel,
628 struct intel_mipmap_tree *mt);
629
630 bool
631 intel_miptree_slice_has_hiz(struct intel_mipmap_tree *mt,
632 uint32_t level,
633 uint32_t layer);
634
635 void
636 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
637 uint32_t level,
638 uint32_t depth);
639 void
640 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
641 uint32_t level,
642 uint32_t depth);
643
644 /**
645 * \return false if no resolve was needed
646 */
647 bool
648 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
649 struct intel_mipmap_tree *mt,
650 unsigned int level,
651 unsigned int depth);
652
653 /**
654 * \return false if no resolve was needed
655 */
656 bool
657 intel_miptree_slice_resolve_depth(struct intel_context *intel,
658 struct intel_mipmap_tree *mt,
659 unsigned int level,
660 unsigned int depth);
661
662 /**
663 * \return false if no resolve was needed
664 */
665 bool
666 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
667 struct intel_mipmap_tree *mt);
668
669 /**
670 * \return false if no resolve was needed
671 */
672 bool
673 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
674 struct intel_mipmap_tree *mt);
675
676 /**\}*/
677
678 /**
679 * Update the fast clear state for a miptree to indicate that it has been used
680 * for rendering.
681 */
682 static inline void
683 intel_miptree_used_for_rendering(struct intel_mipmap_tree *mt)
684 {
685 #ifdef I915
686 /* Nothing needs to be done for I915, since it doesn't support fast
687 * clear.
688 */
689 #else
690 /* If the buffer was previously in fast clear state, change it to
691 * unresolved state, since it won't be guaranteed to be clear after
692 * rendering occurs.
693 */
694 if (mt->mcs_state == INTEL_MCS_STATE_CLEAR)
695 mt->mcs_state = INTEL_MCS_STATE_UNRESOLVED;
696 #endif
697 }
698
699 void
700 intel_miptree_downsample(struct intel_context *intel,
701 struct intel_mipmap_tree *mt);
702
703 void
704 intel_miptree_upsample(struct intel_context *intel,
705 struct intel_mipmap_tree *mt);
706
707 /* i915_mipmap_tree.c:
708 */
709 void i915_miptree_layout(struct intel_mipmap_tree *mt);
710 void i945_miptree_layout(struct intel_mipmap_tree *mt);
711 void brw_miptree_layout(struct intel_context *intel,
712 struct intel_mipmap_tree *mt);
713
714 void *intel_miptree_map_raw(struct intel_context *intel,
715 struct intel_mipmap_tree *mt);
716
717 void intel_miptree_unmap_raw(struct intel_context *intel,
718 struct intel_mipmap_tree *mt);
719
720 void
721 intel_miptree_map(struct intel_context *intel,
722 struct intel_mipmap_tree *mt,
723 unsigned int level,
724 unsigned int slice,
725 unsigned int x,
726 unsigned int y,
727 unsigned int w,
728 unsigned int h,
729 GLbitfield mode,
730 void **out_ptr,
731 int *out_stride);
732
733 void
734 intel_miptree_unmap(struct intel_context *intel,
735 struct intel_mipmap_tree *mt,
736 unsigned int level,
737 unsigned int slice);
738
739 #ifdef I915
740 static inline void
741 intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
742 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
743 {
744 /* Stub on i915. It would be nice if we didn't execute resolve code at all
745 * there.
746 */
747 }
748 #else
749 void
750 intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
751 unsigned int level, unsigned int layer, enum gen6_hiz_op op);
752 #endif
753
754 #ifdef __cplusplus
755 }
756 #endif
757
758 #endif