i965: Make INTEL_DEBUG=bat decode VS/CLIP/GS/SF/WM/CC_STATE on Gen4-5.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_resolve_map.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef INTEL_RESLVE_MAP_H
25 #define INTEL_RESLVE_MAP_H
26
27 #include <stdint.h>
28 #include "blorp/blorp.h"
29 #include "compiler/glsl/list.h"
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 /**
36 * Enum for keeping track of the fast clear state of a buffer associated with
37 * a miptree.
38 *
39 * Fast clear works by deferring the memory writes that would be used to clear
40 * the buffer, so that instead of performing them at the time of the clear
41 * operation, the hardware automatically performs them at the time that the
42 * buffer is later accessed for rendering. The MCS buffer keeps track of
43 * which regions of the buffer still have pending clear writes.
44 *
45 * This enum keeps track of the driver's knowledge of pending fast clears in
46 * the MCS buffer.
47 *
48 * MCS buffers only exist on Gen7+.
49 */
50 enum intel_fast_clear_state
51 {
52 /**
53 * No deferred clears are pending for this miptree, and the contents of the
54 * color buffer are entirely correct. An MCS buffer may or may not exist
55 * for this miptree. If it does exist, it is entirely in the "no deferred
56 * clears pending" state. If it does not exist, it will be created the
57 * first time a fast color clear is executed.
58 *
59 * In this state, the color buffer can be used for purposes other than
60 * rendering without needing a render target resolve.
61 *
62 * Since there is no such thing as a "fast color clear resolve" for MSAA
63 * buffers, an MSAA buffer will never be in this state.
64 */
65 INTEL_FAST_CLEAR_STATE_RESOLVED,
66
67 /**
68 * An MCS buffer exists for this miptree, and deferred clears are pending
69 * for some regions of the color buffer, as indicated by the MCS buffer.
70 * The contents of the color buffer are only correct for the regions where
71 * the MCS buffer doesn't indicate a deferred clear.
72 *
73 * If a single-sample buffer is in this state, a render target resolve must
74 * be performed before it can be used for purposes other than rendering.
75 */
76 INTEL_FAST_CLEAR_STATE_UNRESOLVED,
77
78 /**
79 * An MCS buffer exists for this miptree, and deferred clears are pending
80 * for the entire color buffer, and the contents of the MCS buffer reflect
81 * this. The contents of the color buffer are undefined.
82 *
83 * If a single-sample buffer is in this state, a render target resolve must
84 * be performed before it can be used for purposes other than rendering.
85 *
86 * If the client attempts to clear a buffer which is already in this state,
87 * the clear can be safely skipped, since the buffer is already clear.
88 */
89 INTEL_FAST_CLEAR_STATE_CLEAR,
90 };
91
92 /**
93 * \brief Map of miptree slices to needed resolves.
94 *
95 * The map is implemented as a linear doubly-linked list.
96 *
97 * In the intel_resolve_map*() functions, the \c head argument is not
98 * inspected for its data. It only serves as an anchor for the list.
99 *
100 * \par Design Discussion
101 *
102 * There are two possible ways to record which miptree slices need
103 * resolves. 1) Maintain a flag for every miptree slice in the texture,
104 * likely in intel_mipmap_level::slice, or 2) maintain a list of only
105 * those slices that need a resolve.
106 *
107 * Immediately before drawing, a full depth resolve performed on each
108 * enabled depth texture. If design 1 were chosen, then at each draw call
109 * it would be necessary to iterate over each miptree slice of each
110 * enabled depth texture in order to query if each slice needed a resolve.
111 * In the worst case, this would require 2^16 iterations: 16 texture
112 * units, 16 miplevels, and 256 depth layers (assuming maximums for OpenGL
113 * 2.1).
114 *
115 * By choosing design 2, the number of iterations is exactly the minimum
116 * necessary.
117 */
118 struct intel_resolve_map {
119 struct exec_node link;
120
121 uint32_t level;
122 uint32_t layer;
123
124 union {
125 enum blorp_hiz_op need;
126 enum intel_fast_clear_state fast_clear_state;
127 };
128 };
129
130 void
131 intel_resolve_map_set(struct exec_list *resolve_map,
132 uint32_t level,
133 uint32_t layer,
134 unsigned new_state);
135
136 const struct intel_resolve_map *
137 intel_resolve_map_find_any(const struct exec_list *resolve_map,
138 uint32_t start_level, uint32_t num_levels,
139 uint32_t start_layer, uint32_t num_layers);
140
141 static inline const struct intel_resolve_map *
142 intel_resolve_map_const_get(const struct exec_list *resolve_map,
143 uint32_t level,
144 uint32_t layer)
145 {
146 return intel_resolve_map_find_any(resolve_map, level, 1, layer, 1);
147 }
148
149 static inline struct intel_resolve_map *
150 intel_resolve_map_get(struct exec_list *resolve_map,
151 uint32_t level,
152 uint32_t layer)
153 {
154 return (struct intel_resolve_map *)intel_resolve_map_find_any(
155 resolve_map, level, 1, layer, 1);
156 }
157
158 void
159 intel_resolve_map_remove(struct intel_resolve_map *resolve_map);
160
161 void
162 intel_resolve_map_clear(struct exec_list *resolve_map);
163
164 #ifdef __cplusplus
165 } /* extern "C" */
166 #endif
167
168 #endif /* INTEL_RESLVE_MAP_H */