[i915] Fix driver from cliprects changes, and clean up state emission.
[mesa.git] / src / mesa / drivers / dri / intel / intel_tex_validate.c
1 #include "mtypes.h"
2 #include "macros.h"
3
4 #include "intel_context.h"
5 #include "intel_batchbuffer.h"
6 #include "intel_mipmap_tree.h"
7 #include "intel_tex.h"
8
9 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
10
11 /**
12 * Compute which mipmap levels that really need to be sent to the hardware.
13 * This depends on the base image size, GL_TEXTURE_MIN_LOD,
14 * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
15 */
16 static void
17 intel_calculate_first_last_level(struct intel_texture_object *intelObj)
18 {
19 struct gl_texture_object *tObj = &intelObj->base;
20 const struct gl_texture_image *const baseImage =
21 tObj->Image[0][tObj->BaseLevel];
22
23 /* These must be signed values. MinLod and MaxLod can be negative numbers,
24 * and having firstLevel and lastLevel as signed prevents the need for
25 * extra sign checks.
26 */
27 int firstLevel;
28 int lastLevel;
29
30 /* Yes, this looks overly complicated, but it's all needed.
31 */
32 switch (tObj->Target) {
33 case GL_TEXTURE_1D:
34 case GL_TEXTURE_2D:
35 case GL_TEXTURE_3D:
36 case GL_TEXTURE_CUBE_MAP:
37 if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
38 /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
39 */
40 firstLevel = lastLevel = tObj->BaseLevel;
41 }
42 else {
43 #ifdef I915
44 firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
45 firstLevel = MAX2(firstLevel, tObj->BaseLevel);
46 lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
47 lastLevel = MAX2(lastLevel, tObj->BaseLevel);
48 lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
49 lastLevel = MIN2(lastLevel, tObj->MaxLevel);
50 lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
51 #else
52 /* Currently not taking min/max lod into account here, those
53 * values are programmed as sampler state elsewhere and we
54 * upload the same mipmap levels regardless. Not sure if
55 * this makes sense as it means it isn't possible for the app
56 * to use min/max lod to reduce texture memory pressure:
57 */
58 firstLevel = tObj->BaseLevel;
59 lastLevel = MIN2(tObj->BaseLevel + baseImage->MaxLog2,
60 tObj->MaxLevel);
61 lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
62 #endif
63 }
64 break;
65 case GL_TEXTURE_RECTANGLE_NV:
66 case GL_TEXTURE_4D_SGIS:
67 firstLevel = lastLevel = 0;
68 break;
69 default:
70 return;
71 }
72
73 /* save these values */
74 intelObj->firstLevel = firstLevel;
75 intelObj->lastLevel = lastLevel;
76 }
77
78 /**
79 * Copies the image's contents at its level into the object's miptree,
80 * and updates the image to point at the object's miptree.
81 */
82 static void
83 copy_image_data_to_tree(struct intel_context *intel,
84 struct intel_texture_object *intelObj,
85 struct intel_texture_image *intelImage)
86 {
87 if (intelImage->mt) {
88 /* Copy potentially with the blitter:
89 */
90 intel_miptree_image_copy(intel,
91 intelObj->mt,
92 intelImage->face,
93 intelImage->level, intelImage->mt);
94
95 intel_miptree_release(intel, &intelImage->mt);
96 }
97 else {
98 assert(intelImage->base.Data != NULL);
99
100 /* More straightforward upload.
101 */
102 intel_miptree_image_data(intel,
103 intelObj->mt,
104 intelImage->face,
105 intelImage->level,
106 intelImage->base.Data,
107 intelImage->base.RowStride,
108 intelImage->base.RowStride *
109 intelImage->base.Height);
110 _mesa_align_free(intelImage->base.Data);
111 intelImage->base.Data = NULL;
112 }
113
114 intel_miptree_reference(&intelImage->mt, intelObj->mt);
115 }
116
117
118 /*
119 */
120 GLuint
121 intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
122 {
123 struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
124 struct intel_texture_object *intelObj = intel_texture_object(tObj);
125 int comp_byte = 0;
126 int cpp;
127
128 GLuint face, i;
129 GLuint nr_faces = 0;
130 struct intel_texture_image *firstImage;
131
132 GLboolean need_flush = GL_FALSE;
133
134 /* We know/require this is true by now:
135 */
136 assert(intelObj->base._Complete);
137
138 /* What levels must the tree include at a minimum?
139 */
140 intel_calculate_first_last_level(intelObj);
141 firstImage =
142 intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]);
143
144 /* Fallback case:
145 */
146 if (firstImage->base.Border) {
147 if (intelObj->mt) {
148 intel_miptree_release(intel, &intelObj->mt);
149 }
150 return GL_FALSE;
151 }
152
153
154 /* If both firstImage and intelObj have a tree which can contain
155 * all active images, favour firstImage. Note that because of the
156 * completeness requirement, we know that the image dimensions
157 * will match.
158 */
159 if (firstImage->mt &&
160 firstImage->mt != intelObj->mt &&
161 firstImage->mt->first_level <= intelObj->firstLevel &&
162 firstImage->mt->last_level >= intelObj->lastLevel) {
163
164 if (intelObj->mt)
165 intel_miptree_release(intel, &intelObj->mt);
166
167 intel_miptree_reference(&intelObj->mt, firstImage->mt);
168 }
169
170 if (firstImage->base.IsCompressed) {
171 comp_byte = intel_compressed_num_bytes(firstImage->base.TexFormat->MesaFormat);
172 cpp = comp_byte;
173 }
174 else cpp = firstImage->base.TexFormat->TexelBytes;
175
176 /* Check tree can hold all active levels. Check tree matches
177 * target, imageFormat, etc.
178 *
179 * XXX: For some layouts (eg i945?), the test might have to be
180 * first_level == firstLevel, as the tree isn't valid except at the
181 * original start level. Hope to get around this by
182 * programming minLod, maxLod, baseLevel into the hardware and
183 * leaving the tree alone.
184 */
185 if (intelObj->mt &&
186 (intelObj->mt->target != intelObj->base.Target ||
187 intelObj->mt->internal_format != firstImage->base.InternalFormat ||
188 intelObj->mt->first_level != intelObj->firstLevel ||
189 intelObj->mt->last_level != intelObj->lastLevel ||
190 intelObj->mt->width0 != firstImage->base.Width ||
191 intelObj->mt->height0 != firstImage->base.Height ||
192 intelObj->mt->depth0 != firstImage->base.Depth ||
193 intelObj->mt->cpp != cpp ||
194 intelObj->mt->compressed != firstImage->base.IsCompressed)) {
195 intel_miptree_release(intel, &intelObj->mt);
196 }
197
198
199 /* May need to create a new tree:
200 */
201 if (!intelObj->mt) {
202 intelObj->mt = intel_miptree_create(intel,
203 intelObj->base.Target,
204 firstImage->base.InternalFormat,
205 intelObj->firstLevel,
206 intelObj->lastLevel,
207 firstImage->base.Width,
208 firstImage->base.Height,
209 firstImage->base.Depth,
210 cpp,
211 comp_byte);
212 }
213
214 /* Pull in any images not in the object's tree:
215 */
216 nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
217 for (face = 0; face < nr_faces; face++) {
218 for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
219 struct intel_texture_image *intelImage =
220 intel_texture_image(intelObj->base.Image[face][i]);
221
222 /* Need to import images in main memory or held in other trees.
223 */
224 if (intelObj->mt != intelImage->mt) {
225 copy_image_data_to_tree(intel, intelObj, intelImage);
226 need_flush = GL_TRUE;
227 }
228 }
229 }
230
231 #ifdef I915
232 /* XXX: what is this flush about?
233 * On 965, it causes a batch flush in the middle of the state relocation
234 * emits, which means that the eventual rendering doesn't have all of the
235 * required relocations in place.
236 */
237 if (need_flush)
238 intel_batchbuffer_flush(intel->batch);
239 #endif
240
241 return GL_TRUE;
242 }
243
244
245
246 void
247 intel_tex_map_images(struct intel_context *intel,
248 struct intel_texture_object *intelObj)
249 {
250 GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
251 GLuint face, i;
252
253 DBG("%s\n", __FUNCTION__);
254
255 for (face = 0; face < nr_faces; face++) {
256 for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
257 struct intel_texture_image *intelImage =
258 intel_texture_image(intelObj->base.Image[face][i]);
259
260 if (intelImage->mt) {
261 intelImage->base.Data =
262 intel_miptree_image_map(intel,
263 intelImage->mt,
264 intelImage->face,
265 intelImage->level,
266 &intelImage->base.RowStride,
267 intelImage->base.ImageOffsets);
268 /* convert stride to texels, not bytes */
269 intelImage->base.RowStride /= intelImage->mt->cpp;
270 /* intelImage->base.ImageStride /= intelImage->mt->cpp; */
271 }
272 }
273 }
274 }
275
276
277
278 void
279 intel_tex_unmap_images(struct intel_context *intel,
280 struct intel_texture_object *intelObj)
281 {
282 GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
283 GLuint face, i;
284
285 for (face = 0; face < nr_faces; face++) {
286 for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
287 struct intel_texture_image *intelImage =
288 intel_texture_image(intelObj->base.Image[face][i]);
289
290 if (intelImage->mt) {
291 intel_miptree_image_unmap(intel, intelImage->mt);
292 intelImage->base.Data = NULL;
293 }
294 }
295 }
296 }