#include "pipe/p_state.h"
#include "pipe/p_defines.h"
#include "os/os_thread.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_inlines.h"
#include "util/u_math.h"
#include "util/u_memory.h"
{
struct svga_texture *texture = svga_texture(st->base.resource);
SVGA3dCopyBox box;
- enum pipe_error ret;
assert(!st->use_direct_map);
(util_format_get_blockwidth(texture->b.b.format)
* util_format_get_blockheight(texture->b.b.format)));
- ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
- assert(ret == PIPE_OK);
- }
+ SVGA_RETRY(svga, SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags));
}
/* Ensure any pending operations on host surfaces are queued on the command
* buffer first.
*/
- svga_surfaces_flush( svga );
+ svga_surfaces_flush(svga);
if (!st->swbuf) {
/* Do the DMA transfer in a single go */
svga_transfer_dma_band(svga, st, transfer,
- st->base.box.x, st->base.box.y, st->base.box.z,
- st->base.box.width, st->base.box.height, st->base.box.depth,
+ st->box.x, st->box.y, st->box.z,
+ st->box.w, st->box.h, st->box.d,
0, 0, 0,
flags);
h = st->hw_nblocksy * blockheight;
srcy = 0;
- for (y = 0; y < st->base.box.height; y += h) {
+ for (y = 0; y < st->box.h; y += h) {
unsigned offset, length;
void *hw, *sw;
- if (y + h > st->base.box.height)
- h = st->base.box.height - y;
+ if (y + h > st->box.h)
+ h = st->box.h - y;
/* Transfer band must be aligned to pixel block boundaries */
assert(y % blockheight == 0);
}
svga_transfer_dma_band(svga, st, transfer,
- st->base.box.x, y, st->base.box.z,
- st->base.box.width, h, st->base.box.depth,
+ st->box.x, y, st->box.z,
+ st->box.w, h, st->box.d,
0, srcy, 0, flags);
/*
-static boolean
+static bool
svga_texture_get_handle(struct pipe_screen *screen,
struct pipe_resource *texture,
struct winsys_handle *whandle)
static void
svga_texture_destroy(struct pipe_screen *screen,
- struct pipe_resource *pt)
+ struct pipe_resource *pt)
{
struct svga_screen *ss = svga_screen(screen);
struct svga_texture *tex = svga_texture(pt);
/* Destroy the backed surface handle if exists */
if (tex->backed_handle)
svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);
-
+
ss->hud.total_resource_bytes -= tex->size;
FREE(tex->defined);
}
-/**
- * Determine if the resource was rendered to
- */
-static inline boolean
-was_tex_rendered_to(struct pipe_resource *resource,
- const struct pipe_transfer *transfer)
-{
- unsigned face;
-
- if (resource->target == PIPE_TEXTURE_CUBE) {
- assert(transfer->box.depth == 1);
- face = transfer->box.z;
- }
- else {
- face = 0;
- }
-
- return svga_was_texture_rendered_to(svga_texture(resource),
- face, transfer->level);
-}
-
-
/**
* Determine if we need to read back a texture image before mapping it.
*/
static inline boolean
-need_tex_readback(struct pipe_transfer *transfer)
+need_tex_readback(struct svga_transfer *st)
{
- if (transfer->usage & PIPE_TRANSFER_READ)
+ if (st->base.usage & PIPE_TRANSFER_READ)
return TRUE;
- if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
- ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
- return was_tex_rendered_to(transfer->resource, transfer);
+ if ((st->base.usage & PIPE_TRANSFER_WRITE) &&
+ ((st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
+ return svga_was_texture_rendered_to(svga_texture(st->base.resource),
+ st->slice, st->base.level);
}
return FALSE;
}
-static enum pipe_error
+static void
readback_image_vgpu9(struct svga_context *svga,
struct svga_winsys_surface *surf,
unsigned slice,
unsigned level)
{
- enum pipe_error ret;
-
- ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
- }
- return ret;
+ SVGA_RETRY(svga, SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level));
}
-static enum pipe_error
+static void
readback_image_vgpu10(struct svga_context *svga,
struct svga_winsys_surface *surf,
unsigned slice,
unsigned level,
unsigned numMipLevels)
{
- enum pipe_error ret;
unsigned subResource;
subResource = slice * numMipLevels + level;
- ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
- }
- return ret;
+ SVGA_RETRY(svga, SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf,
+ subResource));
}
unsigned usage = st->base.usage;
/* we'll put the data into a tightly packed buffer */
- nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
- nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
- d = st->base.box.depth;
+ nblocksx = util_format_get_nblocksx(texture->format, st->box.w);
+ nblocksy = util_format_get_nblocksy(texture->format, st->box.h);
+ d = st->box.d;
st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
st->base.layer_stride = st->base.stride * nblocksy;
unsigned w, h, nblocksx, nblocksy, i;
unsigned usage = st->base.usage;
- if (need_tex_readback(transfer)) {
- enum pipe_error ret;
-
+ if (need_tex_readback(st)) {
svga_surfaces_flush(svga);
- for (i = 0; i < st->base.box.depth; i++) {
- if (svga_have_vgpu10(svga)) {
- ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
- tex->b.b.last_level + 1);
- } else {
- ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
+ if (!svga->swc->force_coherent || tex->imported) {
+ for (i = 0; i < st->box.d; i++) {
+ if (svga_have_vgpu10(svga)) {
+ readback_image_vgpu10(svga, surf, st->slice + i, level,
+ tex->b.b.last_level + 1);
+ } else {
+ readback_image_vgpu9(svga, surf, st->slice + i, level);
+ }
}
- }
- svga->hud.num_readbacks++;
- SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
-
- assert(ret == PIPE_OK);
- (void) ret;
+ svga->hud.num_readbacks++;
+ SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
- svga_context_flush(svga, NULL);
+ svga_context_flush(svga, NULL);
+ }
/*
* Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
* we could potentially clear the flag for all faces/layers/mips.
{
SVGA3dSize baseLevelSize;
uint8_t *map;
- boolean retry;
+ boolean retry, rebind;
unsigned offset, mip_width, mip_height;
+ struct svga_winsys_context *swc = svga->swc;
+
+ if (swc->force_coherent) {
+ usage |= PIPE_TRANSFER_PERSISTENT | PIPE_TRANSFER_COHERENT;
+ }
+
+ map = SVGA_TRY_MAP(svga->swc->surface_map
+ (svga->swc, surf, usage, &retry, &rebind), retry);
- map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
if (map == NULL && retry) {
/*
* At this point, the svga_surfaces_flush() should already have
* called in svga_texture_get_transfer().
*/
svga->hud.surface_write_flushes++;
+ svga_retry_enter(svga);
+ svga_context_flush(svga, NULL);
+ map = svga->swc->surface_map(svga->swc, surf, usage, &retry, &rebind);
+ svga_retry_exit(svga);
+ }
+
+ if (map && rebind) {
+ enum pipe_error ret;
+
+ ret = SVGA3D_BindGBSurface(swc, surf);
+ if (ret != PIPE_OK) {
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_BindGBSurface(swc, surf);
+ assert(ret == PIPE_OK);
+ }
svga_context_flush(svga, NULL);
- map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
}
/*
baseLevelSize.depth = tex->b.b.depth0;
if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
- (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY)) {
+ (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY) ||
+ (tex->b.b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
st->base.layer_stride =
svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
tex->b.b.last_level + 1, 1, 0);
offset += svga3dsurface_get_pixel_offset(tex->key.format,
mip_width, mip_height,
- st->base.box.x,
- st->base.box.y,
- st->base.box.z);
+ st->box.x,
+ st->box.y,
+ st->box.z);
+
return (void *) (map + offset);
}
}
struct svga_transfer *st;
struct svga_winsys_surface *surf = tex->handle;
boolean use_direct_map = svga_have_gb_objects(svga) &&
- !svga_have_gb_dma(svga);
+ (!svga_have_gb_dma(svga) || (usage & PIPE_TRANSFER_WRITE));
void *map = NULL;
int64_t begin = svga_get_time(svga);
st->base.usage = usage;
st->base.box = *box;
+ /* The modified transfer map box with the array index removed from z.
+ * The array index is specified in slice.
+ */
+ st->box.x = box->x;
+ st->box.y = box->y;
+ st->box.z = box->z;
+ st->box.w = box->width;
+ st->box.h = box->height;
+ st->box.d = box->depth;
+
switch (tex->b.b.target) {
case PIPE_TEXTURE_CUBE:
st->slice = st->base.box.z;
- st->base.box.z = 0; /* so we don't apply double offsets below */
+ st->box.z = 0; /* so we don't apply double offsets below */
break;
- case PIPE_TEXTURE_2D_ARRAY:
case PIPE_TEXTURE_1D_ARRAY:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_CUBE_ARRAY:
st->slice = st->base.box.z;
- st->base.box.z = 0; /* so we don't apply double offsets below */
+ st->box.z = 0; /* so we don't apply double offsets below */
/* Force direct map for transfering multiple slices */
if (st->base.box.depth > 1)
break;
}
+ /* Force direct map for multisample surface */
+ if (texture->nr_samples > 1) {
+ assert(svga_have_gb_objects(svga));
+ assert(sws->have_sm4_1);
+ use_direct_map = TRUE;
+ }
+
st->use_direct_map = use_direct_map;
pipe_resource_reference(&st->base.resource, texture);
/* If this is the first time mapping to the surface in this
- * command buffer, clear the dirty masks of this surface.
+ * command buffer and there is no pending primitives, clear
+ * the dirty masks of this surface.
*/
- if (sws->surface_is_flushed(sws, surf)) {
+ if (sws->surface_is_flushed(sws, surf) &&
+ (svga_have_vgpu10(svga) ||
+ !svga_hwtnl_has_pending_prim(svga->hwtnl))) {
svga_clear_texture_dirty(tex);
}
else {
boolean can_use_upload = tex->can_use_upload &&
!(st->base.usage & PIPE_TRANSFER_READ);
- boolean was_rendered_to = was_tex_rendered_to(texture, &st->base);
+ boolean was_rendered_to =
+ svga_was_texture_rendered_to(svga_texture(texture),
+ st->slice, st->base.level);
/* If the texture was already rendered to and upload buffer
* is supported, then we will use upload buffer to
if (usage & PIPE_TRANSFER_WRITE) {
/* record texture upload for HUD */
svga->hud.num_bytes_uploaded +=
- st->base.layer_stride * st->base.box.depth;
+ st->base.layer_stride * st->box.d;
/* mark this texture level as dirty */
svga_set_texture_dirty(tex, st->slice, level);
swc->surface_unmap(swc, surf, &rebind);
if (rebind) {
- enum pipe_error ret;
- ret = SVGA3D_BindGBSurface(swc, surf);
- if (ret != PIPE_OK) {
- /* flush and retry */
- svga_context_flush(svga, NULL);
- ret = SVGA3D_BindGBSurface(swc, surf);
- assert(ret == PIPE_OK);
- }
+ SVGA_RETRY(svga, SVGA3D_BindGBSurface(swc, surf));
}
}
-static enum pipe_error
+static void
update_image_vgpu9(struct svga_context *svga,
struct svga_winsys_surface *surf,
const SVGA3dBox *box,
unsigned slice,
unsigned level)
{
- enum pipe_error ret;
-
- ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
- }
- return ret;
+ SVGA_RETRY(svga, SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level));
}
-static enum pipe_error
+static void
update_image_vgpu10(struct svga_context *svga,
struct svga_winsys_surface *surf,
const SVGA3dBox *box,
unsigned level,
unsigned numMipLevels)
{
- enum pipe_error ret;
unsigned subResource;
subResource = slice * numMipLevels + level;
- ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
- }
- return ret;
+
+ SVGA_RETRY(svga, SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box,
+ subResource));
}
*/
static void
svga_texture_transfer_unmap_dma(struct svga_context *svga,
- struct svga_transfer *st)
+ struct svga_transfer *st)
{
struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
- if (st->hwbuf)
+ if (!st->swbuf)
sws->buffer_unmap(sws, st->hwbuf);
if (st->base.usage & PIPE_TRANSFER_WRITE) {
/* Use DMA to transfer texture data */
SVGA3dSurfaceDMAFlags flags;
+ struct pipe_resource *texture = st->base.resource;
+ struct svga_texture *tex = svga_texture(texture);
+
memset(&flags, 0, sizeof flags);
if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
}
svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
+ svga_set_texture_rendered_to(tex, st->slice, st->base.level);
}
FREE(st->swbuf);
*/
static void
svga_texture_transfer_unmap_direct(struct svga_context *svga,
- struct svga_transfer *st)
+ struct svga_transfer *st)
{
struct pipe_transfer *transfer = &st->base;
struct svga_texture *tex = svga_texture(transfer->resource);
/* Now send an update command to update the content in the backend. */
if (st->base.usage & PIPE_TRANSFER_WRITE) {
struct svga_winsys_surface *surf = tex->handle;
- SVGA3dBox box;
- enum pipe_error ret;
- unsigned nlayers = 1;
assert(svga_have_gb_objects(svga));
/* update the effected region */
- box.x = transfer->box.x;
- box.y = transfer->box.y;
- box.w = transfer->box.width;
- box.h = transfer->box.height;
- box.d = transfer->box.depth;
+ SVGA3dBox box = st->box;
+ unsigned nlayers;
switch (tex->b.b.target) {
- case PIPE_TEXTURE_CUBE:
- box.z = 0;
- break;
case PIPE_TEXTURE_2D_ARRAY:
- nlayers = box.d;
- box.z = 0;
- box.d = 1;
- break;
+ case PIPE_TEXTURE_CUBE_ARRAY:
case PIPE_TEXTURE_1D_ARRAY:
nlayers = box.d;
- box.y = box.z = 0;
box.d = 1;
break;
default:
- box.z = transfer->box.z;
+ nlayers = 1;
break;
}
+
if (0)
debug_printf("%s %d, %d, %d %d x %d x %d\n",
__FUNCTION__,
box.x, box.y, box.z,
box.w, box.h, box.d);
- if (svga_have_vgpu10(svga)) {
- unsigned i;
- for (i = 0; i < nlayers; i++) {
- ret = update_image_vgpu10(svga, surf, &box,
- st->slice + i, transfer->level,
- tex->b.b.last_level + 1);
- assert(ret == PIPE_OK);
+ if (!svga->swc->force_coherent || tex->imported) {
+ if (svga_have_vgpu10(svga)) {
+ unsigned i;
+
+ for (i = 0; i < nlayers; i++) {
+ update_image_vgpu10(svga, surf, &box,
+ st->slice + i, transfer->level,
+ tex->b.b.last_level + 1);
+ }
+ } else {
+ assert(nlayers == 1);
+ update_image_vgpu9(svga, surf, &box, st->slice,
+ transfer->level);
}
- } else {
- assert(nlayers == 1);
- ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
- assert(ret == PIPE_OK);
}
- (void) ret;
}
}
+
static void
svga_texture_transfer_unmap(struct pipe_context *pipe,
- struct pipe_transfer *transfer)
+ struct pipe_transfer *transfer)
{
struct svga_context *svga = svga_context(pipe);
struct svga_screen *ss = svga_screen(pipe->screen);
/* Verify the number of mipmap levels isn't impossibly large. For example,
* if the base 2D image is 16x16, we can't have 8 mipmap levels.
- * The state tracker should never ask us to create a resource with invalid
+ * the gallium frontend should never ask us to create a resource with invalid
* parameters.
*/
{
tex->b.b.nr_samples = 0;
}
else if (tex->b.b.nr_samples > 1) {
- tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
+ assert(svgascreen->sws->have_sm4_1);
+ tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
}
tex->key.sampleCount = tex->b.b.nr_samples;
tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
tex->key.numFaces = 6;
break;
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ assert(svgascreen->sws->have_sm4_1);
+ tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
+ tex->key.numFaces = 1; // arraySize already includes the 6 faces
+ tex->key.arraySize = template->array_size;
+ break;
default:
break;
}
* and it always requests PIPE_BIND_RENDER_TARGET, therefore
* passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
*
- * However, this was changed since other state trackers
+ * However, this was changed since other gallium frontends
* (XA for example) uses it accurately and certain device versions
* relies on it in certain situations to render correctly.
*/
return NULL;
}
-boolean
+bool
svga_texture_generate_mipmap(struct pipe_context *pipe,
struct pipe_resource *pt,
enum pipe_format format,
struct svga_pipe_sampler_view *sv;
struct svga_context *svga = svga_context(pipe);
struct svga_texture *tex = svga_texture(pt);
- enum pipe_error ret;
assert(svga_have_vgpu10(svga));
/* Only support 2D texture for now */
if (pt->target != PIPE_TEXTURE_2D)
- return FALSE;
+ return false;
/* Fallback to the mipmap generation utility for those formats that
* do not support hw generate mipmap
*/
if (!svga_format_support_gen_mips(format))
- return FALSE;
+ return false;
/* Make sure the texture surface was created with
* SVGA3D_SURFACE_BIND_RENDER_TARGET
*/
if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
- return FALSE;
+ return false;
templ.format = format;
templ.u.tex.first_layer = first_layer;
psv = pipe->create_sampler_view(pipe, pt, &templ);
if (psv == NULL)
- return FALSE;
+ return false;
sv = svga_pipe_sampler_view(psv);
- ret = svga_validate_pipe_sampler_view(svga, sv);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = svga_validate_pipe_sampler_view(svga, sv);
- assert(ret == PIPE_OK);
- }
+ SVGA_RETRY(svga, svga_validate_pipe_sampler_view(svga, sv));
- ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
- }
+ SVGA_RETRY(svga, SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle));
pipe_sampler_view_reference(&psv, NULL);
svga->hud.num_generate_mipmap++;
- return TRUE;
+ return true;
}
svga_texture_transfer_map_upload_create(struct svga_context *svga)
{
svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
- 0, PIPE_USAGE_STAGING, 0);
+ PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 0);
+ if (svga->tex_upload)
+ u_upload_disable_persistent(svga->tex_upload);
+
return svga->tex_upload != NULL;
}
st->upload.box.z = 0;
break;
case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_CUBE_ARRAY:
st->upload.nlayers = st->base.box.depth;
st->upload.box.z = 0;
st->upload.box.d = 1;
struct svga_winsys_surface *dstsurf;
struct pipe_resource *texture = st->base.resource;
struct svga_texture *tex = svga_texture(texture);
- enum pipe_error ret;
unsigned subResource;
unsigned numMipLevels;
unsigned i, layer;
assert(svga->tex_upload);
assert(st->upload.buf);
-
+
/* unmap the texture upload buffer */
u_upload_unmap(svga->tex_upload);
/* send a transferFromBuffer command to update the host texture surface */
assert((offset & 15) == 0);
- ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
- offset,
- st->base.stride,
- st->base.layer_stride,
- dstsurf, subResource,
- &st->upload.box);
- if (ret != PIPE_OK) {
- svga_context_flush(svga, NULL);
- ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
- offset,
- st->base.stride,
- st->base.layer_stride,
- dstsurf, subResource,
- &st->upload.box);
- assert(ret == PIPE_OK);
- }
+ SVGA_RETRY(svga, SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
+ offset,
+ st->base.stride,
+ st->base.layer_stride,
+ dstsurf, subResource,
+ &st->upload.box));
offset += st->base.layer_stride;
/* Set rendered-to flag */