{
intel_batchbuffer_reset(intel);
- if (!intel->has_llc) {
- intel->batch.cpu_map = malloc(intel->maxBatchSize);
- intel->batch.map = intel->batch.cpu_map;
- }
+ intel->batch.cpu_map = malloc(intel->maxBatchSize);
+ intel->batch.map = intel->batch.cpu_map;
}
static void
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
intel->maxBatchSize, 4096);
- if (intel->has_llc) {
- drm_intel_bo_map(intel->batch.bo, true);
- intel->batch.map = intel->batch.bo->virtual;
- }
intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.state_batch_offset = intel->batch.bo->size;
struct intel_batchbuffer *batch = &intel->batch;
int ret = 0;
- if (intel->has_llc) {
- drm_intel_bo_unmap(batch->bo);
- } else {
- ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
- if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
- ret = drm_intel_bo_subdata(batch->bo,
- batch->state_batch_offset,
- batch->bo->size - batch->state_batch_offset,
- (char *)batch->map + batch->state_batch_offset);
- }
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
+ ret = drm_intel_bo_subdata(batch->bo,
+ batch->state_batch_offset,
+ batch->bo->size - batch->state_batch_offset,
+ (char *)batch->map + batch->state_batch_offset);
}
if (!intel->intelScreen->no_hw) {
return false;
}
-/**
- * \brief A fast path for glTexImage and glTexSubImage.
- *
- * \param for_glTexImage Was this called from glTexImage or glTexSubImage?
- *
- * This fast path is taken when the hardware natively supports the texture
- * format (such as GL_BGRA) and when the texture memory is X-tiled. It uploads
- * the texture data by mapping the texture memory without a GTT fence, thus
- * acquiring a tiled view of the memory, and then memcpy'ing sucessive
- * subspans within each tile.
- *
- * This is a performance win over the conventional texture upload path because
- * it avoids the performance penalty of writing through the write-combine
- * buffer. In the conventional texture upload path,
- * texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
- * fence, thus acquiring a linear view of the memory, then each row in the
- * image is memcpy'd. In this fast path, we replace each row's memcpy with
- * a sequence of memcpy's over each bit6 swizzle span in the row.
- *
- * This fast path's use case is Google Chrome's paint rectangles. Chrome (as
- * of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
- * Each page's content is initially uploaded with glTexImage2D and damaged
- * regions are updated with glTexSubImage2D. On some workloads, the
- * performance gain of this fastpath on Sandybridge is over 5x.
- */
-bool
-intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
- GLuint dims,
- struct gl_texture_image *texImage,
- GLint xoffset, GLint yoffset, GLint zoffset,
- GLsizei width, GLsizei height, GLsizei depth,
- GLenum format, GLenum type,
- const GLvoid *pixels,
- const struct gl_pixelstore_attrib *packing,
- bool for_glTexImage)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_image *image = intel_texture_image(texImage);
-
- /* The miptree's buffer. */
- drm_intel_bo *bo;
-
- int error = 0;
-
- /* This fastpath is restricted to a specific texture type: level 0 of
- * a 2D BGRA texture. It could be generalized to support more types by
- * varying the arithmetic loop below.
- */
- if (!intel->has_llc ||
- format != GL_BGRA ||
- type != GL_UNSIGNED_BYTE ||
- texImage->TexFormat != MESA_FORMAT_ARGB8888 ||
- texImage->TexObject->Target != GL_TEXTURE_2D ||
- texImage->Level != 0 ||
- pixels == NULL ||
- _mesa_is_bufferobj(packing->BufferObj) ||
- packing->Alignment > 4 ||
- packing->SkipPixels > 0 ||
- packing->SkipRows > 0 ||
- (packing->RowLength != 0 && packing->RowLength != width) ||
- packing->SwapBytes ||
- packing->LsbFirst ||
- packing->Invert)
- return false;
-
- if (for_glTexImage)
- ctx->Driver.AllocTextureImageBuffer(ctx, texImage);
-
- if (!image->mt ||
- image->mt->region->tiling != I915_TILING_X) {
- /* The algorithm below is written only for X-tiled memory. */
- return false;
- }
-
- bo = image->mt->region->bo;
-
- if (drm_intel_bo_references(intel->batch.bo, bo)) {
- perf_debug("Flushing before mapping a referenced bo.\n");
- intel_batchbuffer_flush(intel);
- }
-
- if (unlikely(intel->perf_debug)) {
- if (drm_intel_bo_busy(bo)) {
- perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
- }
- }
-
- error = drm_intel_bo_map(bo, true /*write_enable*/);
- if (error || bo->virtual == NULL) {
- DBG("%s: failed to map bo\n", __FUNCTION__);
- return false;
- }
-
- /* We postponed printing this message until having committed to executing
- * the function.
- */
- DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d)\n",
- __FUNCTION__, texImage->Level, xoffset, yoffset, width, height);
-
- /* In the tiling algorithm below, some variables are in units of pixels,
- * others are in units of bytes, and others (such as height) are unitless.
- * Each variable name is suffixed with its units.
- */
-
- const uint32_t x_max_pixels = xoffset + width;
- const uint32_t y_max_pixels = yoffset + height;
-
- const uint32_t tile_size_bytes = 4096;
-
- const uint32_t tile_width_bytes = 512;
- const uint32_t tile_width_pixels = 128;
-
- const uint32_t tile_height = 8;
-
- const uint32_t cpp = 4; /* chars per pixel of GL_BGRA */
- const uint32_t swizzle_width_pixels = 16;
-
- const uint32_t stride_bytes = image->mt->region->pitch;
- const uint32_t width_tiles = stride_bytes / tile_width_bytes;
-
- for (uint32_t y_pixels = yoffset; y_pixels < y_max_pixels; ++y_pixels) {
- const uint32_t y_offset_bytes = (y_pixels / tile_height) * width_tiles * tile_size_bytes
- + (y_pixels % tile_height) * tile_width_bytes;
-
- for (uint32_t x_pixels = xoffset; x_pixels < x_max_pixels; x_pixels += swizzle_width_pixels) {
- const uint32_t x_offset_bytes = (x_pixels / tile_width_pixels) * tile_size_bytes
- + (x_pixels % tile_width_pixels) * cpp;
-
- intptr_t offset_bytes = y_offset_bytes + x_offset_bytes;
- if (intel->has_swizzling) {
-#if 0
- /* Clear, unoptimized version. */
- bool bit6 = (offset_bytes >> 6) & 1;
- bool bit9 = (offset_bytes >> 9) & 1;
- bool bit10 = (offset_bytes >> 10) & 1;
-
- if (bit9 ^ bit10)
- offset_bytes ^= (1 << 6);
-#else
- /* Optimized, obfuscated version. */
- offset_bytes ^= ((offset_bytes >> 3) ^ (offset_bytes >> 4))
- & (1 << 6);
-#endif
- }
-
- const uint32_t swizzle_bound_pixels = ALIGN(x_pixels + 1, swizzle_width_pixels);
- const uint32_t memcpy_bound_pixels = MIN2(x_max_pixels, swizzle_bound_pixels);
- const uint32_t copy_size = cpp * (memcpy_bound_pixels - x_pixels);
-
- memcpy(bo->virtual + offset_bytes, pixels, copy_size);
- pixels += copy_size;
- x_pixels -= (x_pixels % swizzle_width_pixels);
- }
- }
-
- drm_intel_bo_unmap(bo);
- return true;
-}
-
static void
intelTexSubImage(struct gl_context * ctx,
GLuint dims,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing)
{
- bool ok;
-
- ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
- xoffset, yoffset, zoffset,
- width, height, depth,
- format, type, pixels, packing,
- false /*for_glTexImage*/);
- if (ok)
- return;
-
/* The intel_blit_texsubimage() function only handles 2D images */
if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
xoffset, yoffset,