i915: Don't free the intel_context structure when intelCreateContext fails.
[mesa.git] / src / mesa / drivers / dri / intel / intel_tex_subimage.c
1
2 /**************************************************************************
3 *
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "main/macros.h"
30 #include "main/mtypes.h"
31 #include "main/pbo.h"
32 #include "main/texobj.h"
33 #include "main/texstore.h"
34 #include "main/texcompress.h"
35 #include "main/enums.h"
36
37 #include "intel_batchbuffer.h"
38 #include "intel_context.h"
39 #include "intel_tex.h"
40 #include "intel_mipmap_tree.h"
41 #include "intel_blit.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_TEXTURE
44
45 static bool
46 intel_blit_texsubimage(struct gl_context * ctx,
47 struct gl_texture_image *texImage,
48 GLint xoffset, GLint yoffset,
49 GLint width, GLint height,
50 GLenum format, GLenum type, const void *pixels,
51 const struct gl_pixelstore_attrib *packing)
52 {
53 struct intel_context *intel = intel_context(ctx);
54 struct intel_texture_image *intelImage = intel_texture_image(texImage);
55 GLuint dstRowStride = 0;
56 drm_intel_bo *temp_bo = NULL;
57 unsigned int blit_x = 0, blit_y = 0;
58 unsigned long pitch;
59 uint32_t tiling_mode = I915_TILING_NONE;
60 GLubyte *dstMap;
61
62 /* Try to do a blit upload of the subimage if the texture is
63 * currently busy.
64 */
65 if (!intelImage->mt)
66 return false;
67
68 /* The blitter can't handle Y tiling */
69 if (intelImage->mt->region->tiling == I915_TILING_Y)
70 return false;
71
72 if (texImage->TexObject->Target != GL_TEXTURE_2D)
73 return false;
74
75 /* On gen6, it's probably not worth swapping to the blit ring to do
76 * this because of all the overhead involved.
77 */
78 if (intel->gen >= 6)
79 return false;
80
81 if (!drm_intel_bo_busy(intelImage->mt->region->bo))
82 return false;
83
84 DBG("BLT subimage %s target %s level %d offset %d,%d %dx%d\n",
85 __FUNCTION__,
86 _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
87 texImage->Level, xoffset, yoffset, width, height);
88
89 pixels = _mesa_validate_pbo_teximage(ctx, 2, width, height, 1,
90 format, type, pixels, packing,
91 "glTexSubImage");
92 if (!pixels)
93 return false;
94
95 temp_bo = drm_intel_bo_alloc_tiled(intel->bufmgr,
96 "subimage blit bo",
97 width, height,
98 intelImage->mt->cpp,
99 &tiling_mode,
100 &pitch,
101 0);
102 if (temp_bo == NULL) {
103 _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
104 return false;
105 }
106
107 if (drm_intel_gem_bo_map_gtt(temp_bo)) {
108 _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
109 return false;
110 }
111
112 dstMap = temp_bo->virtual;
113 dstRowStride = pitch;
114
115 intel_miptree_get_image_offset(intelImage->mt, texImage->Level,
116 intelImage->base.Base.Face, 0,
117 &blit_x, &blit_y);
118 blit_x += xoffset;
119 blit_y += yoffset;
120
121 if (!_mesa_texstore(ctx, 2, texImage->_BaseFormat,
122 texImage->TexFormat,
123 dstRowStride,
124 &dstMap,
125 width, height, 1,
126 format, type, pixels, packing)) {
127 _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
128 }
129
130 bool ret;
131 unsigned int dst_pitch = intelImage->mt->region->pitch *
132 intelImage->mt->cpp;
133
134 drm_intel_gem_bo_unmap_gtt(temp_bo);
135
136 ret = intelEmitCopyBlit(intel,
137 intelImage->mt->cpp,
138 dstRowStride / intelImage->mt->cpp,
139 temp_bo, 0, false,
140 dst_pitch / intelImage->mt->cpp,
141 intelImage->mt->region->bo, 0,
142 intelImage->mt->region->tiling,
143 0, 0, blit_x, blit_y, width, height,
144 GL_COPY);
145 assert(ret);
146
147 drm_intel_bo_unreference(temp_bo);
148 _mesa_unmap_teximage_pbo(ctx, packing);
149
150 return true;
151 }
152
153 /**
154 * \brief A fast path for glTexImage and glTexSubImage.
155 *
156 * \param for_glTexImage Was this called from glTexImage or glTexSubImage?
157 *
158 * This fast path is taken when the hardware natively supports the texture
159 * format (such as GL_BGRA) and when the texture memory is X-tiled. It uploads
160 * the texture data by mapping the texture memory without a GTT fence, thus
161 * acquiring a tiled view of the memory, and then memcpy'ing sucessive
162 * subspans within each tile.
163 *
164 * This is a performance win over the conventional texture upload path because
165 * it avoids the performance penalty of writing through the write-combine
166 * buffer. In the conventional texture upload path,
167 * texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
168 * fence, thus acquiring a linear view of the memory, then each row in the
169 * image is memcpy'd. In this fast path, we replace each row's memcpy with
170 * a sequence of memcpy's over each bit6 swizzle span in the row.
171 *
172 * This fast path's use case is Google Chrome's paint rectangles. Chrome (as
173 * of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
174 * Each page's content is initially uploaded with glTexImage2D and damaged
175 * regions are updated with glTexSubImage2D. On some workloads, the
176 * performance gain of this fastpath on Sandybridge is over 5x.
177 */
178 bool
179 intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
180 GLuint dims,
181 struct gl_texture_image *texImage,
182 GLint xoffset, GLint yoffset, GLint zoffset,
183 GLsizei width, GLsizei height, GLsizei depth,
184 GLenum format, GLenum type,
185 const GLvoid *pixels,
186 const struct gl_pixelstore_attrib *packing,
187 bool for_glTexImage)
188 {
189 struct intel_context *intel = intel_context(ctx);
190 struct intel_texture_image *image = intel_texture_image(texImage);
191
192 /* The miptree's buffer. */
193 drm_intel_bo *bo;
194
195 int error = 0;
196
197 /* This fastpath is restricted to a specific texture type: level 0 of
198 * a 2D BGRA texture. It could be generalized to support more types by
199 * varying the arithmetic loop below.
200 */
201 if (!intel->has_llc ||
202 format != GL_BGRA ||
203 type != GL_UNSIGNED_BYTE ||
204 texImage->TexFormat != MESA_FORMAT_ARGB8888 ||
205 texImage->TexObject->Target != GL_TEXTURE_2D ||
206 texImage->Level != 0 ||
207 pixels == NULL ||
208 packing->Alignment > 4)
209 return false;
210
211 if (for_glTexImage)
212 ctx->Driver.AllocTextureImageBuffer(ctx, texImage);
213
214 if (!image->mt ||
215 image->mt->region->tiling != I915_TILING_X) {
216 /* The algorithm below is written only for X-tiled memory. */
217 return false;
218 }
219
220 bo = image->mt->region->bo;
221
222 if (drm_intel_bo_references(intel->batch.bo, bo)) {
223 perf_debug("Flushing before mapping a referenced bo.\n");
224 intel_batchbuffer_flush(intel);
225 }
226
227 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
228 if (drm_intel_bo_busy(bo)) {
229 perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
230 }
231 }
232
233 error = drm_intel_bo_map(bo, true /*write_enable*/);
234 if (error || bo->virtual == NULL) {
235 DBG("%s: failed to map bo\n", __FUNCTION__);
236 return false;
237 }
238
239 /* We postponed printing this message until having committed to executing
240 * the function.
241 */
242 DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d)\n",
243 __FUNCTION__, texImage->Level, xoffset, yoffset, width, height);
244
245 /* In the tiling algorithm below, some variables are in units of pixels,
246 * others are in units of bytes, and others (such as height) are unitless.
247 * Each variable name is suffixed with its units.
248 */
249
250 const uint32_t x_max_pixels = xoffset + width;
251 const uint32_t y_max_pixels = yoffset + height;
252
253 const uint32_t tile_size_bytes = 4096;
254
255 const uint32_t tile_width_bytes = 512;
256 const uint32_t tile_width_pixels = 128;
257
258 const uint32_t tile_height = 8;
259
260 const uint32_t cpp = 4; /* chars per pixel of GL_BGRA */
261 const uint32_t swizzle_width_pixels = 16;
262
263 const uint32_t stride_bytes = image->mt->region->pitch * cpp;
264 const uint32_t width_tiles = stride_bytes / tile_width_bytes;
265
266 for (uint32_t y_pixels = yoffset; y_pixels < y_max_pixels; ++y_pixels) {
267 const uint32_t y_offset_bytes = (y_pixels / tile_height) * width_tiles * tile_size_bytes
268 + (y_pixels % tile_height) * tile_width_bytes;
269
270 for (uint32_t x_pixels = xoffset; x_pixels < x_max_pixels; x_pixels += swizzle_width_pixels) {
271 const uint32_t x_offset_bytes = (x_pixels / tile_width_pixels) * tile_size_bytes
272 + (x_pixels % tile_width_pixels) * cpp;
273
274 intptr_t offset_bytes = y_offset_bytes + x_offset_bytes;
275 if (intel->has_swizzling) {
276 #if 0
277 /* Clear, unoptimized version. */
278 bool bit6 = (offset_bytes >> 6) & 1;
279 bool bit9 = (offset_bytes >> 9) & 1;
280 bool bit10 = (offset_bytes >> 10) & 1;
281
282 if (bit9 ^ bit10)
283 offset_bytes ^= (1 << 6);
284 #else
285 /* Optimized, obfuscated version. */
286 offset_bytes ^= ((offset_bytes >> 3) ^ (offset_bytes >> 4))
287 & (1 << 6);
288 #endif
289 }
290
291 const uint32_t swizzle_bound_pixels = ALIGN(x_pixels + 1, swizzle_width_pixels);
292 const uint32_t memcpy_bound_pixels = MIN2(x_max_pixels, swizzle_bound_pixels);
293 const uint32_t copy_size = cpp * (memcpy_bound_pixels - x_pixels);
294
295 memcpy(bo->virtual + offset_bytes, pixels, copy_size);
296 pixels += copy_size;
297 x_pixels -= (x_pixels % swizzle_width_pixels);
298 }
299 }
300
301 drm_intel_bo_unmap(bo);
302 return true;
303 }
304
305 static void
306 intelTexSubImage(struct gl_context * ctx,
307 GLuint dims,
308 struct gl_texture_image *texImage,
309 GLint xoffset, GLint yoffset, GLint zoffset,
310 GLsizei width, GLsizei height, GLsizei depth,
311 GLenum format, GLenum type,
312 const GLvoid * pixels,
313 const struct gl_pixelstore_attrib *packing)
314 {
315 bool ok;
316
317 ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
318 xoffset, yoffset, zoffset,
319 width, height, depth,
320 format, type, pixels, packing,
321 false /*for_glTexImage*/);
322 if (ok)
323 return;
324
325 /* The intel_blit_texsubimage() function only handles 2D images */
326 if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
327 xoffset, yoffset,
328 width, height,
329 format, type, pixels, packing)) {
330 _mesa_store_texsubimage(ctx, dims, texImage,
331 xoffset, yoffset, zoffset,
332 width, height, depth,
333 format, type, pixels, packing);
334 }
335 }
336
337 void
338 intelInitTextureSubImageFuncs(struct dd_function_table *functions)
339 {
340 functions->TexSubImage = intelTexSubImage;
341 }