This is the final step of the driver rename.
#if 0
static struct pipe_surface *
-vc5_get_blit_surface(struct pipe_context *pctx,
+v3d_get_blit_surface(struct pipe_context *pctx,
struct pipe_resource *prsc, unsigned level)
{
struct pipe_surface tmpl;
}
static bool
-vc5_tile_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
+v3d_tile_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
bool msaa = (info->src.resource->nr_samples > 1 ||
info->dst.resource->nr_samples > 1);
int tile_width = msaa ? 32 : 64;
* determine the stride (which could be fixed by explicitly supplying
* it in the ABI).
*/
- struct vc5_resource *rsc = vc5_resource(info->src.resource);
+ struct v3d_resource *rsc = v3d_resource(info->src.resource);
uint32_t stride;
}
struct pipe_surface *dst_surf =
- vc5_get_blit_surface(pctx, info->dst.resource, info->dst.level);
+ v3d_get_blit_surface(pctx, info->dst.resource, info->dst.level);
struct pipe_surface *src_surf =
- vc5_get_blit_surface(pctx, info->src.resource, info->src.level);
+ v3d_get_blit_surface(pctx, info->src.resource, info->src.level);
- vc5_flush_jobs_reading_resource(vc5, info->src.resource);
+ v3d_flush_jobs_reading_resource(v3d, info->src.resource);
- struct vc5_job *job = vc5_get_job(vc5, dst_surf, NULL);
+ struct v3d_job *job = v3d_get_job(v3d, dst_surf, NULL);
pipe_surface_reference(&job->color_read, src_surf);
/* If we're resolving from MSAA to single sample, we still need to run
job->needs_flush = true;
job->resolve |= PIPE_CLEAR_COLOR;
- vc5_job_submit(vc5, job);
+ v3d_job_submit(v3d, job);
pipe_surface_reference(&dst_surf, NULL);
pipe_surface_reference(&src_surf, NULL);
#endif
void
-vc5_blitter_save(struct vc5_context *vc5)
+v3d_blitter_save(struct v3d_context *v3d)
{
- util_blitter_save_fragment_constant_buffer_slot(vc5->blitter,
- vc5->constbuf[PIPE_SHADER_FRAGMENT].cb);
- util_blitter_save_vertex_buffer_slot(vc5->blitter, vc5->vertexbuf.vb);
- util_blitter_save_vertex_elements(vc5->blitter, vc5->vtx);
- util_blitter_save_vertex_shader(vc5->blitter, vc5->prog.bind_vs);
- util_blitter_save_so_targets(vc5->blitter, vc5->streamout.num_targets,
- vc5->streamout.targets);
- util_blitter_save_rasterizer(vc5->blitter, vc5->rasterizer);
- util_blitter_save_viewport(vc5->blitter, &vc5->viewport);
- util_blitter_save_scissor(vc5->blitter, &vc5->scissor);
- util_blitter_save_fragment_shader(vc5->blitter, vc5->prog.bind_fs);
- util_blitter_save_blend(vc5->blitter, vc5->blend);
- util_blitter_save_depth_stencil_alpha(vc5->blitter, vc5->zsa);
- util_blitter_save_stencil_ref(vc5->blitter, &vc5->stencil_ref);
- util_blitter_save_sample_mask(vc5->blitter, vc5->sample_mask);
- util_blitter_save_framebuffer(vc5->blitter, &vc5->framebuffer);
- util_blitter_save_fragment_sampler_states(vc5->blitter,
- vc5->fragtex.num_samplers,
- (void **)vc5->fragtex.samplers);
- util_blitter_save_fragment_sampler_views(vc5->blitter,
- vc5->fragtex.num_textures, vc5->fragtex.textures);
- util_blitter_save_so_targets(vc5->blitter, vc5->streamout.num_targets,
- vc5->streamout.targets);
+ util_blitter_save_fragment_constant_buffer_slot(v3d->blitter,
+ v3d->constbuf[PIPE_SHADER_FRAGMENT].cb);
+ util_blitter_save_vertex_buffer_slot(v3d->blitter, v3d->vertexbuf.vb);
+ util_blitter_save_vertex_elements(v3d->blitter, v3d->vtx);
+ util_blitter_save_vertex_shader(v3d->blitter, v3d->prog.bind_vs);
+ util_blitter_save_so_targets(v3d->blitter, v3d->streamout.num_targets,
+ v3d->streamout.targets);
+ util_blitter_save_rasterizer(v3d->blitter, v3d->rasterizer);
+ util_blitter_save_viewport(v3d->blitter, &v3d->viewport);
+ util_blitter_save_scissor(v3d->blitter, &v3d->scissor);
+ util_blitter_save_fragment_shader(v3d->blitter, v3d->prog.bind_fs);
+ util_blitter_save_blend(v3d->blitter, v3d->blend);
+ util_blitter_save_depth_stencil_alpha(v3d->blitter, v3d->zsa);
+ util_blitter_save_stencil_ref(v3d->blitter, &v3d->stencil_ref);
+ util_blitter_save_sample_mask(v3d->blitter, v3d->sample_mask);
+ util_blitter_save_framebuffer(v3d->blitter, &v3d->framebuffer);
+ util_blitter_save_fragment_sampler_states(v3d->blitter,
+ v3d->fragtex.num_samplers,
+ (void **)v3d->fragtex.samplers);
+ util_blitter_save_fragment_sampler_views(v3d->blitter,
+ v3d->fragtex.num_textures, v3d->fragtex.textures);
+ util_blitter_save_so_targets(v3d->blitter, v3d->streamout.num_targets,
+ v3d->streamout.targets);
}
static bool
-vc5_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
+v3d_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
{
- struct vc5_context *vc5 = vc5_context(ctx);
+ struct v3d_context *v3d = v3d_context(ctx);
- if (!util_blitter_is_blit_supported(vc5->blitter, info)) {
+ if (!util_blitter_is_blit_supported(v3d->blitter, info)) {
fprintf(stderr, "blit unsupported %s -> %s\n",
util_format_short_name(info->src.resource->format),
util_format_short_name(info->dst.resource->format));
return false;
}
- vc5_blitter_save(vc5);
- util_blitter_blit(vc5->blitter, info);
+ v3d_blitter_save(v3d);
+ util_blitter_blit(v3d->blitter, info);
return true;
}
* or R8 texture.
*/
static void
-vc5_stencil_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
+v3d_stencil_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
{
- struct vc5_context *vc5 = vc5_context(ctx);
- struct vc5_resource *src = vc5_resource(info->src.resource);
- struct vc5_resource *dst = vc5_resource(info->dst.resource);
+ struct v3d_context *v3d = v3d_context(ctx);
+ struct v3d_resource *src = v3d_resource(info->src.resource);
+ struct v3d_resource *dst = v3d_resource(info->dst.resource);
enum pipe_format src_format, dst_format;
if (src->separate_stencil) {
struct pipe_sampler_view *src_view =
ctx->create_sampler_view(ctx, &src->base, &src_tmpl);
- vc5_blitter_save(vc5);
- util_blitter_blit_generic(vc5->blitter, dst_surf, &info->dst.box,
+ v3d_blitter_save(v3d);
+ util_blitter_blit_generic(v3d->blitter, dst_surf, &info->dst.box,
src_view, &info->src.box,
src->base.width0, src->base.height0,
PIPE_MASK_R,
* Scaling, format conversion, up- and downsampling (resolve) are allowed.
*/
void
-vc5_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
+v3d_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
{
struct pipe_blit_info info = *blit_info;
if (info.mask & PIPE_MASK_S) {
- vc5_stencil_blit(pctx, blit_info);
+ v3d_stencil_blit(pctx, blit_info);
info.mask &= ~PIPE_MASK_S;
}
#if 0
- if (vc5_tile_blit(pctx, blit_info))
+ if (v3d_tile_blit(pctx, blit_info))
return;
#endif
- vc5_render_blit(pctx, &info);
+ v3d_render_blit(pctx, &info);
}
static bool dump_stats = false;
static void
-vc5_bo_cache_free_all(struct vc5_bo_cache *cache);
+v3d_bo_cache_free_all(struct v3d_bo_cache *cache);
static void
-vc5_bo_dump_stats(struct vc5_screen *screen)
+v3d_bo_dump_stats(struct v3d_screen *screen)
{
- struct vc5_bo_cache *cache = &screen->bo_cache;
+ struct v3d_bo_cache *cache = &screen->bo_cache;
fprintf(stderr, " BOs allocated: %d\n", screen->bo_count);
fprintf(stderr, " BOs size: %dkb\n", screen->bo_size / 1024);
fprintf(stderr, " BOs cached size: %dkb\n", cache->bo_size / 1024);
if (!list_empty(&cache->time_list)) {
- struct vc5_bo *first = LIST_ENTRY(struct vc5_bo,
+ struct v3d_bo *first = LIST_ENTRY(struct v3d_bo,
cache->time_list.next,
time_list);
- struct vc5_bo *last = LIST_ENTRY(struct vc5_bo,
+ struct v3d_bo *last = LIST_ENTRY(struct v3d_bo,
cache->time_list.prev,
time_list);
}
static void
-vc5_bo_remove_from_cache(struct vc5_bo_cache *cache, struct vc5_bo *bo)
+v3d_bo_remove_from_cache(struct v3d_bo_cache *cache, struct v3d_bo *bo)
{
list_del(&bo->time_list);
list_del(&bo->size_list);
cache->bo_size -= bo->size;
}
-static struct vc5_bo *
-vc5_bo_from_cache(struct vc5_screen *screen, uint32_t size, const char *name)
+static struct v3d_bo *
+v3d_bo_from_cache(struct v3d_screen *screen, uint32_t size, const char *name)
{
- struct vc5_bo_cache *cache = &screen->bo_cache;
+ struct v3d_bo_cache *cache = &screen->bo_cache;
uint32_t page_index = size / 4096 - 1;
if (cache->size_list_size <= page_index)
return NULL;
- struct vc5_bo *bo = NULL;
+ struct v3d_bo *bo = NULL;
mtx_lock(&cache->lock);
if (!list_empty(&cache->size_list[page_index])) {
- bo = LIST_ENTRY(struct vc5_bo, cache->size_list[page_index].next,
+ bo = LIST_ENTRY(struct v3d_bo, cache->size_list[page_index].next,
size_list);
/* Check that the BO has gone idle. If not, then we want to
* allocate something new instead, since we assume that the
* user will proceed to CPU map it and fill it with stuff.
*/
- if (!vc5_bo_wait(bo, 0, NULL)) {
+ if (!v3d_bo_wait(bo, 0, NULL)) {
mtx_unlock(&cache->lock);
return NULL;
}
pipe_reference_init(&bo->reference, 1);
- vc5_bo_remove_from_cache(cache, bo);
+ v3d_bo_remove_from_cache(cache, bo);
bo->name = name;
}
return bo;
}
-struct vc5_bo *
-vc5_bo_alloc(struct vc5_screen *screen, uint32_t size, const char *name)
+struct v3d_bo *
+v3d_bo_alloc(struct v3d_screen *screen, uint32_t size, const char *name)
{
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
int ret;
size = align(size, 4096);
- bo = vc5_bo_from_cache(screen, size, name);
+ bo = v3d_bo_from_cache(screen, size, name);
if (bo) {
if (dump_stats) {
fprintf(stderr, "Allocated %s %dkb from cache:\n",
name, size / 1024);
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
return bo;
}
- bo = CALLOC_STRUCT(vc5_bo);
+ bo = CALLOC_STRUCT(v3d_bo);
if (!bo)
return NULL;
.size = size
};
- ret = vc5_ioctl(screen->fd, DRM_IOCTL_V3D_CREATE_BO, &create);
+ ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_CREATE_BO, &create);
bo->handle = create.handle;
bo->offset = create.offset;
if (!list_empty(&screen->bo_cache.time_list) &&
!cleared_and_retried) {
cleared_and_retried = true;
- vc5_bo_cache_free_all(&screen->bo_cache);
+ v3d_bo_cache_free_all(&screen->bo_cache);
goto retry;
}
screen->bo_size += bo->size;
if (dump_stats) {
fprintf(stderr, "Allocated %s %dkb:\n", name, size / 1024);
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
return bo;
}
void
-vc5_bo_last_unreference(struct vc5_bo *bo)
+v3d_bo_last_unreference(struct v3d_bo *bo)
{
- struct vc5_screen *screen = bo->screen;
+ struct v3d_screen *screen = bo->screen;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
mtx_lock(&screen->bo_cache.lock);
- vc5_bo_last_unreference_locked_timed(bo, time.tv_sec);
+ v3d_bo_last_unreference_locked_timed(bo, time.tv_sec);
mtx_unlock(&screen->bo_cache.lock);
}
static void
-vc5_bo_free(struct vc5_bo *bo)
+v3d_bo_free(struct v3d_bo *bo)
{
- struct vc5_screen *screen = bo->screen;
+ struct v3d_screen *screen = bo->screen;
if (bo->map) {
- if (using_vc5_simulator && bo->name &&
+ if (using_v3d_simulator && bo->name &&
strcmp(bo->name, "winsys") == 0) {
free(bo->map);
} else {
struct drm_gem_close c;
memset(&c, 0, sizeof(c));
c.handle = bo->handle;
- int ret = vc5_ioctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &c);
+ int ret = v3d_ioctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &c);
if (ret != 0)
fprintf(stderr, "close object %d: %s\n", bo->handle, strerror(errno));
bo->name ? bo->name : "",
bo->name ? " " : "",
bo->size / 1024);
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
free(bo);
}
static void
-free_stale_bos(struct vc5_screen *screen, time_t time)
+free_stale_bos(struct v3d_screen *screen, time_t time)
{
- struct vc5_bo_cache *cache = &screen->bo_cache;
+ struct v3d_bo_cache *cache = &screen->bo_cache;
bool freed_any = false;
- list_for_each_entry_safe(struct vc5_bo, bo, &cache->time_list,
+ list_for_each_entry_safe(struct v3d_bo, bo, &cache->time_list,
time_list) {
if (dump_stats && !freed_any) {
fprintf(stderr, "Freeing stale BOs:\n");
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
freed_any = true;
}
/* If it's more than a second old, free it. */
if (time - bo->free_time > 2) {
- vc5_bo_remove_from_cache(cache, bo);
- vc5_bo_free(bo);
+ v3d_bo_remove_from_cache(cache, bo);
+ v3d_bo_free(bo);
} else {
break;
}
if (dump_stats && freed_any) {
fprintf(stderr, "Freed stale BOs:\n");
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
}
static void
-vc5_bo_cache_free_all(struct vc5_bo_cache *cache)
+v3d_bo_cache_free_all(struct v3d_bo_cache *cache)
{
mtx_lock(&cache->lock);
- list_for_each_entry_safe(struct vc5_bo, bo, &cache->time_list,
+ list_for_each_entry_safe(struct v3d_bo, bo, &cache->time_list,
time_list) {
- vc5_bo_remove_from_cache(cache, bo);
- vc5_bo_free(bo);
+ v3d_bo_remove_from_cache(cache, bo);
+ v3d_bo_free(bo);
}
mtx_unlock(&cache->lock);
}
void
-vc5_bo_last_unreference_locked_timed(struct vc5_bo *bo, time_t time)
+v3d_bo_last_unreference_locked_timed(struct v3d_bo *bo, time_t time)
{
- struct vc5_screen *screen = bo->screen;
- struct vc5_bo_cache *cache = &screen->bo_cache;
+ struct v3d_screen *screen = bo->screen;
+ struct v3d_bo_cache *cache = &screen->bo_cache;
uint32_t page_index = bo->size / 4096 - 1;
if (!bo->private) {
- vc5_bo_free(bo);
+ v3d_bo_free(bo);
return;
}
if (dump_stats) {
fprintf(stderr, "Freed %s %dkb to cache:\n",
bo->name, bo->size / 1024);
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
bo->name = NULL;
free_stale_bos(screen, time);
}
-static struct vc5_bo *
-vc5_bo_open_handle(struct vc5_screen *screen,
+static struct v3d_bo *
+v3d_bo_open_handle(struct v3d_screen *screen,
uint32_t winsys_stride,
uint32_t handle, uint32_t size)
{
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
assert(size);
goto done;
}
- bo = CALLOC_STRUCT(vc5_bo);
+ bo = CALLOC_STRUCT(v3d_bo);
pipe_reference_init(&bo->reference, 1);
bo->screen = screen;
bo->handle = handle;
bo->private = false;
#ifdef USE_V3D_SIMULATOR
- vc5_simulator_open_from_handle(screen->fd, winsys_stride,
+ v3d_simulator_open_from_handle(screen->fd, winsys_stride,
bo->handle, bo->size);
bo->map = malloc(bo->size);
#endif
struct drm_v3d_get_bo_offset get = {
.handle = handle,
};
- int ret = vc5_ioctl(screen->fd, DRM_IOCTL_V3D_GET_BO_OFFSET, &get);
+ int ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_GET_BO_OFFSET, &get);
if (ret) {
fprintf(stderr, "Failed to get BO offset: %s\n",
strerror(errno));
return bo;
}
-struct vc5_bo *
-vc5_bo_open_name(struct vc5_screen *screen, uint32_t name,
+struct v3d_bo *
+v3d_bo_open_name(struct v3d_screen *screen, uint32_t name,
uint32_t winsys_stride)
{
struct drm_gem_open o = {
.name = name
};
- int ret = vc5_ioctl(screen->fd, DRM_IOCTL_GEM_OPEN, &o);
+ int ret = v3d_ioctl(screen->fd, DRM_IOCTL_GEM_OPEN, &o);
if (ret) {
fprintf(stderr, "Failed to open bo %d: %s\n",
name, strerror(errno));
return NULL;
}
- return vc5_bo_open_handle(screen, winsys_stride, o.handle, o.size);
+ return v3d_bo_open_handle(screen, winsys_stride, o.handle, o.size);
}
-struct vc5_bo *
-vc5_bo_open_dmabuf(struct vc5_screen *screen, int fd, uint32_t winsys_stride)
+struct v3d_bo *
+v3d_bo_open_dmabuf(struct v3d_screen *screen, int fd, uint32_t winsys_stride)
{
uint32_t handle;
int ret = drmPrimeFDToHandle(screen->fd, fd, &handle);
int size;
if (ret) {
- fprintf(stderr, "Failed to get vc5 handle for dmabuf %d\n", fd);
+ fprintf(stderr, "Failed to get v3d handle for dmabuf %d\n", fd);
return NULL;
}
return NULL;
}
- return vc5_bo_open_handle(screen, winsys_stride, handle, size);
+ return v3d_bo_open_handle(screen, winsys_stride, handle, size);
}
int
-vc5_bo_get_dmabuf(struct vc5_bo *bo)
+v3d_bo_get_dmabuf(struct v3d_bo *bo)
{
int fd;
int ret = drmPrimeHandleToFD(bo->screen->fd, bo->handle,
}
bool
-vc5_bo_flink(struct vc5_bo *bo, uint32_t *name)
+v3d_bo_flink(struct v3d_bo *bo, uint32_t *name)
{
struct drm_gem_flink flink = {
.handle = bo->handle,
};
- int ret = vc5_ioctl(bo->screen->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ int ret = v3d_ioctl(bo->screen->fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret) {
fprintf(stderr, "Failed to flink bo %d: %s\n",
bo->handle, strerror(errno));
return true;
}
-static int vc5_wait_bo_ioctl(int fd, uint32_t handle, uint64_t timeout_ns)
+static int v3d_wait_bo_ioctl(int fd, uint32_t handle, uint64_t timeout_ns)
{
struct drm_v3d_wait_bo wait = {
.handle = handle,
.timeout_ns = timeout_ns,
};
- int ret = vc5_ioctl(fd, DRM_IOCTL_V3D_WAIT_BO, &wait);
+ int ret = v3d_ioctl(fd, DRM_IOCTL_V3D_WAIT_BO, &wait);
if (ret == -1)
return -errno;
else
}
bool
-vc5_bo_wait(struct vc5_bo *bo, uint64_t timeout_ns, const char *reason)
+v3d_bo_wait(struct v3d_bo *bo, uint64_t timeout_ns, const char *reason)
{
- struct vc5_screen *screen = bo->screen;
+ struct v3d_screen *screen = bo->screen;
if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF) && timeout_ns && reason) {
- if (vc5_wait_bo_ioctl(screen->fd, bo->handle, 0) == -ETIME) {
+ if (v3d_wait_bo_ioctl(screen->fd, bo->handle, 0) == -ETIME) {
fprintf(stderr, "Blocking on %s BO for %s\n",
bo->name, reason);
}
}
- int ret = vc5_wait_bo_ioctl(screen->fd, bo->handle, timeout_ns);
+ int ret = v3d_wait_bo_ioctl(screen->fd, bo->handle, timeout_ns);
if (ret) {
if (ret != -ETIME) {
fprintf(stderr, "wait failed: %d\n", ret);
}
void *
-vc5_bo_map_unsynchronized(struct vc5_bo *bo)
+v3d_bo_map_unsynchronized(struct v3d_bo *bo)
{
uint64_t offset;
int ret;
struct drm_v3d_mmap_bo map;
memset(&map, 0, sizeof(map));
map.handle = bo->handle;
- ret = vc5_ioctl(bo->screen->fd, DRM_IOCTL_V3D_MMAP_BO, &map);
+ ret = v3d_ioctl(bo->screen->fd, DRM_IOCTL_V3D_MMAP_BO, &map);
offset = map.offset;
if (ret != 0) {
fprintf(stderr, "map ioctl failure\n");
}
void *
-vc5_bo_map(struct vc5_bo *bo)
+v3d_bo_map(struct v3d_bo *bo)
{
- void *map = vc5_bo_map_unsynchronized(bo);
+ void *map = v3d_bo_map_unsynchronized(bo);
- bool ok = vc5_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
+ bool ok = v3d_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
if (!ok) {
fprintf(stderr, "BO wait for map failed\n");
abort();
}
void
-vc5_bufmgr_destroy(struct pipe_screen *pscreen)
+v3d_bufmgr_destroy(struct pipe_screen *pscreen)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_bo_cache *cache = &screen->bo_cache;
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_bo_cache *cache = &screen->bo_cache;
- vc5_bo_cache_free_all(cache);
+ v3d_bo_cache_free_all(cache);
if (dump_stats) {
fprintf(stderr, "BO stats after screen destroy:\n");
- vc5_bo_dump_stats(screen);
+ v3d_bo_dump_stats(screen);
}
}
#include "util/list.h"
#include "v3d_screen.h"
-struct vc5_context;
+struct v3d_context;
-struct vc5_bo {
+struct v3d_bo {
struct pipe_reference reference;
- struct vc5_screen *screen;
+ struct v3d_screen *screen;
void *map;
const char *name;
uint32_t handle;
bool private;
};
-struct vc5_bo *vc5_bo_alloc(struct vc5_screen *screen, uint32_t size,
+struct v3d_bo *v3d_bo_alloc(struct v3d_screen *screen, uint32_t size,
const char *name);
-void vc5_bo_last_unreference(struct vc5_bo *bo);
-void vc5_bo_last_unreference_locked_timed(struct vc5_bo *bo, time_t time);
-struct vc5_bo *vc5_bo_open_name(struct vc5_screen *screen, uint32_t name,
+void v3d_bo_last_unreference(struct v3d_bo *bo);
+void v3d_bo_last_unreference_locked_timed(struct v3d_bo *bo, time_t time);
+struct v3d_bo *v3d_bo_open_name(struct v3d_screen *screen, uint32_t name,
uint32_t winsys_stride);
-struct vc5_bo *vc5_bo_open_dmabuf(struct vc5_screen *screen, int fd,
+struct v3d_bo *v3d_bo_open_dmabuf(struct v3d_screen *screen, int fd,
uint32_t winsys_stride);
-bool vc5_bo_flink(struct vc5_bo *bo, uint32_t *name);
-int vc5_bo_get_dmabuf(struct vc5_bo *bo);
+bool v3d_bo_flink(struct v3d_bo *bo, uint32_t *name);
+int v3d_bo_get_dmabuf(struct v3d_bo *bo);
static inline void
-vc5_bo_set_reference(struct vc5_bo **old_bo, struct vc5_bo *new_bo)
+v3d_bo_set_reference(struct v3d_bo **old_bo, struct v3d_bo *new_bo)
{
if (pipe_reference(&(*old_bo)->reference, &new_bo->reference))
- vc5_bo_last_unreference(*old_bo);
+ v3d_bo_last_unreference(*old_bo);
*old_bo = new_bo;
}
-static inline struct vc5_bo *
-vc5_bo_reference(struct vc5_bo *bo)
+static inline struct v3d_bo *
+v3d_bo_reference(struct v3d_bo *bo)
{
pipe_reference(NULL, &bo->reference);
return bo;
}
static inline void
-vc5_bo_unreference(struct vc5_bo **bo)
+v3d_bo_unreference(struct v3d_bo **bo)
{
- struct vc5_screen *screen;
+ struct v3d_screen *screen;
if (!*bo)
return;
if ((*bo)->private) {
/* Avoid the mutex for private BOs */
if (pipe_reference(&(*bo)->reference, NULL))
- vc5_bo_last_unreference(*bo);
+ v3d_bo_last_unreference(*bo);
} else {
screen = (*bo)->screen;
mtx_lock(&screen->bo_handles_mutex);
if (pipe_reference(&(*bo)->reference, NULL)) {
util_hash_table_remove(screen->bo_handles,
(void *)(uintptr_t)(*bo)->handle);
- vc5_bo_last_unreference(*bo);
+ v3d_bo_last_unreference(*bo);
}
mtx_unlock(&screen->bo_handles_mutex);
}
static inline void
-vc5_bo_unreference_locked_timed(struct vc5_bo **bo, time_t time)
+v3d_bo_unreference_locked_timed(struct v3d_bo **bo, time_t time)
{
if (!*bo)
return;
if (pipe_reference(&(*bo)->reference, NULL))
- vc5_bo_last_unreference_locked_timed(*bo, time);
+ v3d_bo_last_unreference_locked_timed(*bo, time);
*bo = NULL;
}
void *
-vc5_bo_map(struct vc5_bo *bo);
+v3d_bo_map(struct v3d_bo *bo);
void *
-vc5_bo_map_unsynchronized(struct vc5_bo *bo);
+v3d_bo_map_unsynchronized(struct v3d_bo *bo);
bool
-vc5_bo_wait(struct vc5_bo *bo, uint64_t timeout_ns, const char *reason);
+v3d_bo_wait(struct v3d_bo *bo, uint64_t timeout_ns, const char *reason);
bool
-vc5_wait_seqno(struct vc5_screen *screen, uint64_t seqno, uint64_t timeout_ns,
+v3d_wait_seqno(struct v3d_screen *screen, uint64_t seqno, uint64_t timeout_ns,
const char *reason);
void
-vc5_bufmgr_destroy(struct pipe_screen *pscreen);
+v3d_bufmgr_destroy(struct pipe_screen *pscreen);
#endif /* VC5_BUFMGR_H */
#include "broadcom/cle/v3dx_pack.h"
void
-vc5_init_cl(struct vc5_job *job, struct vc5_cl *cl)
+v3d_init_cl(struct v3d_job *job, struct v3d_cl *cl)
{
cl->base = NULL;
cl->next = cl->base;
}
uint32_t
-vc5_cl_ensure_space(struct vc5_cl *cl, uint32_t space, uint32_t alignment)
+v3d_cl_ensure_space(struct v3d_cl *cl, uint32_t space, uint32_t alignment)
{
uint32_t offset = align(cl_offset(cl), alignment);
return offset;
}
- vc5_bo_unreference(&cl->bo);
- cl->bo = vc5_bo_alloc(cl->job->vc5->screen, align(space, 4096), "CL");
- cl->base = vc5_bo_map(cl->bo);
+ v3d_bo_unreference(&cl->bo);
+ cl->bo = v3d_bo_alloc(cl->job->v3d->screen, align(space, 4096), "CL");
+ cl->base = v3d_bo_map(cl->bo);
cl->size = cl->bo->size;
cl->next = cl->base;
}
void
-vc5_cl_ensure_space_with_branch(struct vc5_cl *cl, uint32_t space)
+v3d_cl_ensure_space_with_branch(struct v3d_cl *cl, uint32_t space)
{
if (cl_offset(cl) + space + cl_packet_length(BRANCH) <= cl->size)
return;
- struct vc5_bo *new_bo = vc5_bo_alloc(cl->job->vc5->screen, 4096, "CL");
+ struct v3d_bo *new_bo = v3d_bo_alloc(cl->job->v3d->screen, 4096, "CL");
assert(space <= new_bo->size);
/* Chain to the new BO from the old one. */
cl_emit(cl, BRANCH, branch) {
branch.address = cl_address(new_bo, 0);
}
- vc5_bo_unreference(&cl->bo);
+ v3d_bo_unreference(&cl->bo);
} else {
/* Root the first RCL/BCL BO in the job. */
- vc5_job_add_bo(cl->job, cl->bo);
+ v3d_job_add_bo(cl->job, cl->bo);
}
cl->bo = new_bo;
- cl->base = vc5_bo_map(cl->bo);
+ cl->base = v3d_bo_map(cl->bo);
cl->size = cl->bo->size;
cl->next = cl->base;
}
void
-vc5_destroy_cl(struct vc5_cl *cl)
+v3d_destroy_cl(struct v3d_cl *cl)
{
- vc5_bo_unreference(&cl->bo);
+ v3d_bo_unreference(&cl->bo);
}
#include "util/u_math.h"
#include "util/macros.h"
-struct vc5_bo;
-struct vc5_job;
-struct vc5_cl;
+struct v3d_bo;
+struct v3d_job;
+struct v3d_cl;
/**
* Undefined structure, used for typechecking that you're passing the pointers
* to these functions correctly.
*/
-struct vc5_cl_out;
+struct v3d_cl_out;
/** A reference to a BO used in the CL packing functions */
-struct vc5_cl_reloc {
- struct vc5_bo *bo;
+struct v3d_cl_reloc {
+ struct v3d_bo *bo;
uint32_t offset;
};
-static inline void cl_pack_emit_reloc(struct vc5_cl *cl, const struct vc5_cl_reloc *);
+static inline void cl_pack_emit_reloc(struct v3d_cl *cl, const struct v3d_cl_reloc *);
-#define __gen_user_data struct vc5_cl
-#define __gen_address_type struct vc5_cl_reloc
+#define __gen_user_data struct v3d_cl
+#define __gen_address_type struct v3d_cl_reloc
#define __gen_address_offset(reloc) (((reloc)->bo ? (reloc)->bo->offset : 0) + \
(reloc)->offset)
#define __gen_emit_reloc cl_pack_emit_reloc
-struct vc5_cl {
+struct v3d_cl {
void *base;
- struct vc5_job *job;
- struct vc5_cl_out *next;
- struct vc5_bo *bo;
+ struct v3d_job *job;
+ struct v3d_cl_out *next;
+ struct v3d_bo *bo;
uint32_t size;
};
-void vc5_init_cl(struct vc5_job *job, struct vc5_cl *cl);
-void vc5_destroy_cl(struct vc5_cl *cl);
-void vc5_dump_cl(void *cl, uint32_t size, bool is_render);
-uint32_t vc5_gem_hindex(struct vc5_job *job, struct vc5_bo *bo);
+void v3d_init_cl(struct v3d_job *job, struct v3d_cl *cl);
+void v3d_destroy_cl(struct v3d_cl *cl);
+void v3d_dump_cl(void *cl, uint32_t size, bool is_render);
+uint32_t v3d_gem_hindex(struct v3d_job *job, struct v3d_bo *bo);
struct PACKED unaligned_16 { uint16_t x; };
struct PACKED unaligned_32 { uint32_t x; };
-static inline uint32_t cl_offset(struct vc5_cl *cl)
+static inline uint32_t cl_offset(struct v3d_cl *cl)
{
return (char *)cl->next - (char *)cl->base;
}
-static inline struct vc5_cl_reloc cl_get_address(struct vc5_cl *cl)
+static inline struct v3d_cl_reloc cl_get_address(struct v3d_cl *cl)
{
- return (struct vc5_cl_reloc){ .bo = cl->bo, .offset = cl_offset(cl) };
+ return (struct v3d_cl_reloc){ .bo = cl->bo, .offset = cl_offset(cl) };
}
static inline void
-cl_advance(struct vc5_cl_out **cl, uint32_t n)
+cl_advance(struct v3d_cl_out **cl, uint32_t n)
{
- (*cl) = (struct vc5_cl_out *)((char *)(*cl) + n);
+ (*cl) = (struct v3d_cl_out *)((char *)(*cl) + n);
}
-static inline struct vc5_cl_out *
-cl_start(struct vc5_cl *cl)
+static inline struct v3d_cl_out *
+cl_start(struct v3d_cl *cl)
{
return cl->next;
}
static inline void
-cl_end(struct vc5_cl *cl, struct vc5_cl_out *next)
+cl_end(struct v3d_cl *cl, struct v3d_cl_out *next)
{
cl->next = next;
assert(cl_offset(cl) <= cl->size);
static inline void
-put_unaligned_32(struct vc5_cl_out *ptr, uint32_t val)
+put_unaligned_32(struct v3d_cl_out *ptr, uint32_t val)
{
struct unaligned_32 *p = (void *)ptr;
p->x = val;
}
static inline void
-put_unaligned_16(struct vc5_cl_out *ptr, uint16_t val)
+put_unaligned_16(struct v3d_cl_out *ptr, uint16_t val)
{
struct unaligned_16 *p = (void *)ptr;
p->x = val;
}
static inline void
-cl_u8(struct vc5_cl_out **cl, uint8_t n)
+cl_u8(struct v3d_cl_out **cl, uint8_t n)
{
*(uint8_t *)(*cl) = n;
cl_advance(cl, 1);
}
static inline void
-cl_u16(struct vc5_cl_out **cl, uint16_t n)
+cl_u16(struct v3d_cl_out **cl, uint16_t n)
{
put_unaligned_16(*cl, n);
cl_advance(cl, 2);
}
static inline void
-cl_u32(struct vc5_cl_out **cl, uint32_t n)
+cl_u32(struct v3d_cl_out **cl, uint32_t n)
{
put_unaligned_32(*cl, n);
cl_advance(cl, 4);
}
static inline void
-cl_aligned_u32(struct vc5_cl_out **cl, uint32_t n)
+cl_aligned_u32(struct v3d_cl_out **cl, uint32_t n)
{
*(uint32_t *)(*cl) = n;
cl_advance(cl, 4);
}
static inline void
-cl_aligned_reloc(struct vc5_cl *cl,
- struct vc5_cl_out **cl_out,
- struct vc5_bo *bo, uint32_t offset)
+cl_aligned_reloc(struct v3d_cl *cl,
+ struct v3d_cl_out **cl_out,
+ struct v3d_bo *bo, uint32_t offset)
{
cl_aligned_u32(cl_out, bo->offset + offset);
- vc5_job_add_bo(cl->job, bo);
+ v3d_job_add_bo(cl->job, bo);
}
static inline void
-cl_ptr(struct vc5_cl_out **cl, void *ptr)
+cl_ptr(struct v3d_cl_out **cl, void *ptr)
{
- *(struct vc5_cl_out **)(*cl) = ptr;
+ *(struct v3d_cl_out **)(*cl) = ptr;
cl_advance(cl, sizeof(void *));
}
static inline void
-cl_f(struct vc5_cl_out **cl, float f)
+cl_f(struct v3d_cl_out **cl, float f)
{
cl_u32(cl, fui(f));
}
static inline void
-cl_aligned_f(struct vc5_cl_out **cl, float f)
+cl_aligned_f(struct v3d_cl_out **cl, float f)
{
cl_aligned_u32(cl, fui(f));
}
/**
* Reference to a BO with its associated offset, used in the pack process.
*/
-static inline struct vc5_cl_reloc
-cl_address(struct vc5_bo *bo, uint32_t offset)
+static inline struct v3d_cl_reloc
+cl_address(struct v3d_bo *bo, uint32_t offset)
{
- struct vc5_cl_reloc reloc = {
+ struct v3d_cl_reloc reloc = {
.bo = bo,
.offset = offset,
};
return reloc;
}
-uint32_t vc5_cl_ensure_space(struct vc5_cl *cl, uint32_t size, uint32_t align);
-void vc5_cl_ensure_space_with_branch(struct vc5_cl *cl, uint32_t size);
+uint32_t v3d_cl_ensure_space(struct v3d_cl *cl, uint32_t size, uint32_t align);
+void v3d_cl_ensure_space_with_branch(struct v3d_cl *cl, uint32_t size);
#define cl_packet_header(packet) V3DX(packet ## _header)
#define cl_packet_length(packet) V3DX(packet ## _length)
#define cl_packet_struct(packet) V3DX(packet)
static inline void *
-cl_get_emit_space(struct vc5_cl_out **cl, size_t size)
+cl_get_emit_space(struct v3d_cl_out **cl, size_t size)
{
void *addr = *cl;
cl_advance(cl, size);
*_loop_terminate = &name; \
__builtin_expect(_loop_terminate != NULL, 1); \
({ \
- struct vc5_cl_out *cl_out = cl_start(cl); \
+ struct v3d_cl_out *cl_out = cl_start(cl); \
cl_packet_pack(packet)(cl, (uint8_t *)cl_out, &name); \
cl_advance(&cl_out, cl_packet_length(packet)); \
cl_end(cl, cl_out); \
*_loop_terminate = &name; \
__builtin_expect(_loop_terminate != NULL, 1); \
({ \
- struct vc5_cl_out *cl_out = cl_start(cl); \
+ struct v3d_cl_out *cl_out = cl_start(cl); \
uint8_t packed[cl_packet_length(packet)]; \
cl_packet_pack(packet)(cl, packed, &name); \
for (int _i = 0; _i < cl_packet_length(packet); _i++) \
* for this exec.
*/
static inline void
-cl_pack_emit_reloc(struct vc5_cl *cl, const struct vc5_cl_reloc *reloc)
+cl_pack_emit_reloc(struct v3d_cl *cl, const struct v3d_cl_reloc *reloc)
{
if (reloc->bo)
- vc5_job_add_bo(cl->job, reloc->bo);
+ v3d_job_add_bo(cl->job, reloc->bo);
}
#endif /* VC5_CL_H */
#include "v3d_resource.h"
void
-vc5_flush(struct pipe_context *pctx)
+v3d_flush(struct pipe_context *pctx)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
struct hash_entry *entry;
- hash_table_foreach(vc5->jobs, entry) {
- struct vc5_job *job = entry->data;
- vc5_job_submit(vc5, job);
+ hash_table_foreach(v3d->jobs, entry) {
+ struct v3d_job *job = entry->data;
+ v3d_job_submit(v3d, job);
}
}
static void
-vc5_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
+v3d_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
unsigned flags)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
- vc5_flush(pctx);
+ v3d_flush(pctx);
if (fence) {
struct pipe_screen *screen = pctx->screen;
- struct vc5_fence *f = vc5_fence_create(vc5);
+ struct v3d_fence *f = v3d_fence_create(v3d);
screen->fence_reference(screen, fence, NULL);
*fence = (struct pipe_fence_handle *)f;
}
}
static void
-vc5_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
+v3d_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_resource *rsc = v3d_resource(prsc);
rsc->initialized_buffers = 0;
- struct hash_entry *entry = _mesa_hash_table_search(vc5->write_jobs,
+ struct hash_entry *entry = _mesa_hash_table_search(v3d->write_jobs,
prsc);
if (!entry)
return;
- struct vc5_job *job = entry->data;
+ struct v3d_job *job = entry->data;
if (job->key.zsbuf && job->key.zsbuf->texture == prsc)
job->resolve &= ~(PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL);
}
static void
-vc5_context_destroy(struct pipe_context *pctx)
+v3d_context_destroy(struct pipe_context *pctx)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
- vc5_flush(pctx);
+ v3d_flush(pctx);
- if (vc5->blitter)
- util_blitter_destroy(vc5->blitter);
+ if (v3d->blitter)
+ util_blitter_destroy(v3d->blitter);
- if (vc5->primconvert)
- util_primconvert_destroy(vc5->primconvert);
+ if (v3d->primconvert)
+ util_primconvert_destroy(v3d->primconvert);
- if (vc5->uploader)
- u_upload_destroy(vc5->uploader);
+ if (v3d->uploader)
+ u_upload_destroy(v3d->uploader);
- slab_destroy_child(&vc5->transfer_pool);
+ slab_destroy_child(&v3d->transfer_pool);
- pipe_surface_reference(&vc5->framebuffer.cbufs[0], NULL);
- pipe_surface_reference(&vc5->framebuffer.zsbuf, NULL);
+ pipe_surface_reference(&v3d->framebuffer.cbufs[0], NULL);
+ pipe_surface_reference(&v3d->framebuffer.zsbuf, NULL);
- vc5_program_fini(pctx);
+ v3d_program_fini(pctx);
- ralloc_free(vc5);
+ ralloc_free(v3d);
}
struct pipe_context *
-vc5_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+v3d_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_context *vc5;
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_context *v3d;
/* Prevent dumping of the shaders built during context setup. */
uint32_t saved_shaderdb_flag = V3D_DEBUG & V3D_DEBUG_SHADERDB;
V3D_DEBUG &= ~V3D_DEBUG_SHADERDB;
- vc5 = rzalloc(NULL, struct vc5_context);
- if (!vc5)
+ v3d = rzalloc(NULL, struct v3d_context);
+ if (!v3d)
return NULL;
- struct pipe_context *pctx = &vc5->base;
+ struct pipe_context *pctx = &v3d->base;
- vc5->screen = screen;
+ v3d->screen = screen;
int ret = drmSyncobjCreate(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
- &vc5->out_sync);
+ &v3d->out_sync);
if (ret) {
- ralloc_free(vc5);
+ ralloc_free(v3d);
return NULL;
}
pctx->screen = pscreen;
pctx->priv = priv;
- pctx->destroy = vc5_context_destroy;
- pctx->flush = vc5_pipe_flush;
- pctx->invalidate_resource = vc5_invalidate_resource;
+ pctx->destroy = v3d_context_destroy;
+ pctx->flush = v3d_pipe_flush;
+ pctx->invalidate_resource = v3d_invalidate_resource;
if (screen->devinfo.ver >= 41) {
v3d41_draw_init(pctx);
v3d33_draw_init(pctx);
v3d33_state_init(pctx);
}
- vc5_program_init(pctx);
- vc5_query_init(pctx);
- vc5_resource_context_init(pctx);
+ v3d_program_init(pctx);
+ v3d_query_init(pctx);
+ v3d_resource_context_init(pctx);
- vc5_job_init(vc5);
+ v3d_job_init(v3d);
- vc5->fd = screen->fd;
+ v3d->fd = screen->fd;
- slab_create_child(&vc5->transfer_pool, &screen->transfer_pool);
+ slab_create_child(&v3d->transfer_pool, &screen->transfer_pool);
- vc5->uploader = u_upload_create_default(&vc5->base);
- vc5->base.stream_uploader = vc5->uploader;
- vc5->base.const_uploader = vc5->uploader;
+ v3d->uploader = u_upload_create_default(&v3d->base);
+ v3d->base.stream_uploader = v3d->uploader;
+ v3d->base.const_uploader = v3d->uploader;
- vc5->blitter = util_blitter_create(pctx);
- if (!vc5->blitter)
+ v3d->blitter = util_blitter_create(pctx);
+ if (!v3d->blitter)
goto fail;
- vc5->primconvert = util_primconvert_create(pctx,
+ v3d->primconvert = util_primconvert_create(pctx,
(1 << PIPE_PRIM_QUADS) - 1);
- if (!vc5->primconvert)
+ if (!v3d->primconvert)
goto fail;
V3D_DEBUG |= saved_shaderdb_flag;
- vc5->sample_mask = (1 << VC5_MAX_SAMPLES) - 1;
- vc5->active_queries = true;
+ v3d->sample_mask = (1 << VC5_MAX_SAMPLES) - 1;
+ v3d->active_queries = true;
- return &vc5->base;
+ return &v3d->base;
fail:
pctx->destroy(pctx);
#include "v3d_drm.h"
#include "v3d_screen.h"
-struct vc5_job;
-struct vc5_bo;
-void vc5_job_add_bo(struct vc5_job *job, struct vc5_bo *bo);
+struct v3d_job;
+struct v3d_bo;
+void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
#include "v3d_bufmgr.h"
#include "v3d_resource.h"
#include "v3d_cl.h"
#ifdef USE_V3D_SIMULATOR
-#define using_vc5_simulator true
+#define using_v3d_simulator true
#else
-#define using_vc5_simulator false
+#define using_v3d_simulator false
#endif
#define VC5_DIRTY_BLEND (1 << 0)
#define VC5_MAX_FS_INPUTS 64
-struct vc5_sampler_view {
+struct v3d_sampler_view {
struct pipe_sampler_view base;
uint32_t p0;
uint32_t p1;
uint8_t texture_shader_state[32];
/* V3D 4.x: Texture state struct. */
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
};
-struct vc5_sampler_state {
+struct v3d_sampler_state {
struct pipe_sampler_state base;
uint32_t p0;
uint32_t p1;
/* V3D 3.x: Packed texture state. */
uint8_t texture_shader_state[32];
/* V3D 4.x: Sampler state struct. */
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
};
-struct vc5_texture_stateobj {
+struct v3d_texture_stateobj {
struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS];
unsigned num_textures;
struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
unsigned num_samplers;
- struct vc5_cl_reloc texture_state[PIPE_MAX_SAMPLERS];
+ struct v3d_cl_reloc texture_state[PIPE_MAX_SAMPLERS];
};
-struct vc5_shader_uniform_info {
+struct v3d_shader_uniform_info {
enum quniform_contents *contents;
uint32_t *data;
uint32_t count;
};
-struct vc5_uncompiled_shader {
+struct v3d_uncompiled_shader {
/** A name for this program, so you can track it in shader-db output. */
uint32_t program_id;
/** How many variants of this program were compiled, for shader-db. */
bool was_tgsi;
};
-struct vc5_compiled_shader {
- struct vc5_bo *bo;
+struct v3d_compiled_shader {
+ struct v3d_bo *bo;
union {
struct v3d_prog_data *base;
} prog_data;
/**
- * VC5_DIRTY_* flags that, when set in vc5->dirty, mean that the
+ * VC5_DIRTY_* flags that, when set in v3d->dirty, mean that the
* uniforms have to be rewritten (and therefore the shader state
* reemitted).
*/
uint32_t uniform_dirty_bits;
};
-struct vc5_program_stateobj {
- struct vc5_uncompiled_shader *bind_vs, *bind_fs;
- struct vc5_compiled_shader *cs, *vs, *fs;
+struct v3d_program_stateobj {
+ struct v3d_uncompiled_shader *bind_vs, *bind_fs;
+ struct v3d_compiled_shader *cs, *vs, *fs;
- struct vc5_bo *spill_bo;
+ struct v3d_bo *spill_bo;
int spill_size_per_thread;
};
-struct vc5_constbuf_stateobj {
+struct v3d_constbuf_stateobj {
struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
uint32_t enabled_mask;
uint32_t dirty_mask;
};
-struct vc5_vertexbuf_stateobj {
+struct v3d_vertexbuf_stateobj {
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
unsigned count;
uint32_t enabled_mask;
uint32_t dirty_mask;
};
-struct vc5_vertex_stateobj {
+struct v3d_vertex_stateobj {
struct pipe_vertex_element pipe[VC5_MAX_ATTRIBUTES];
unsigned num_elements;
uint8_t attrs[12 * VC5_MAX_ATTRIBUTES];
- struct vc5_bo *default_attribute_values;
+ struct v3d_bo *default_attribute_values;
};
-struct vc5_streamout_stateobj {
+struct v3d_streamout_stateobj {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
unsigned num_targets;
};
-/* Hash table key for vc5->jobs */
-struct vc5_job_key {
+/* Hash table key for v3d->jobs */
+struct v3d_job_key {
struct pipe_surface *cbufs[4];
struct pipe_surface *zsbuf;
};
-enum vc5_ez_state {
+enum v3d_ez_state {
VC5_EZ_UNDECIDED = 0,
VC5_EZ_GT_GE,
VC5_EZ_LT_LE,
* target (which would mean reading back from the old render target when
* starting to render to it again).
*/
-struct vc5_job {
- struct vc5_context *vc5;
- struct vc5_cl bcl;
- struct vc5_cl rcl;
- struct vc5_cl indirect;
- struct vc5_bo *tile_alloc;
- struct vc5_bo *tile_state;
+struct v3d_job {
+ struct v3d_context *v3d;
+ struct v3d_cl bcl;
+ struct v3d_cl rcl;
+ struct v3d_cl indirect;
+ struct v3d_bo *tile_alloc;
+ struct v3d_bo *tile_state;
uint32_t shader_rec_count;
struct drm_v3d_submit_cl submit;
* Current EZ state for drawing. Updated at the start of draw after
* we've decided on the shader being rendered.
*/
- enum vc5_ez_state ez_state;
+ enum v3d_ez_state ez_state;
/**
* The first EZ state that was used for drawing with a decided EZ
* direction (so either UNDECIDED, GT, or LT).
*/
- enum vc5_ez_state first_ez_state;
+ enum v3d_ez_state first_ez_state;
/**
* Number of draw calls (not counting full buffer clears) queued in
*/
uint32_t draw_calls_queued;
- struct vc5_job_key key;
+ struct v3d_job_key key;
};
-struct vc5_context {
+struct v3d_context {
struct pipe_context base;
int fd;
- struct vc5_screen *screen;
+ struct v3d_screen *screen;
/** The 3D rendering job for the currently bound FBO. */
- struct vc5_job *job;
+ struct v3d_job *job;
- /* Map from struct vc5_job_key to the job for that FBO.
+ /* Map from struct v3d_job_key to the job for that FBO.
*/
struct hash_table *jobs;
/**
- * Map from vc5_resource to a job writing to that resource.
+ * Map from v3d_resource to a job writing to that resource.
*
* Primarily for flushing jobs rendering to textures that are now
* being read from.
uint32_t next_uncompiled_program_id;
uint64_t next_compiled_program_id;
- struct vc5_compiler_state *compiler_state;
+ struct v3d_compiler_state *compiler_state;
uint8_t prim_mode;
/** @{ Current pipeline state objects */
struct pipe_scissor_state scissor;
struct pipe_blend_state *blend;
- struct vc5_rasterizer_state *rasterizer;
- struct vc5_depth_stencil_alpha_state *zsa;
+ struct v3d_rasterizer_state *rasterizer;
+ struct v3d_depth_stencil_alpha_state *zsa;
- struct vc5_texture_stateobj verttex, fragtex;
+ struct v3d_texture_stateobj verttex, fragtex;
- struct vc5_program_stateobj prog;
+ struct v3d_program_stateobj prog;
- struct vc5_vertex_stateobj *vtx;
+ struct v3d_vertex_stateobj *vtx;
struct {
struct pipe_blend_color f;
struct pipe_poly_stipple stipple;
struct pipe_clip_state clip;
struct pipe_viewport_state viewport;
- struct vc5_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
- struct vc5_vertexbuf_stateobj vertexbuf;
- struct vc5_streamout_stateobj streamout;
- struct vc5_bo *current_oq;
+ struct v3d_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
+ struct v3d_vertexbuf_stateobj vertexbuf;
+ struct v3d_streamout_stateobj streamout;
+ struct v3d_bo *current_oq;
/** @} */
};
-struct vc5_rasterizer_state {
+struct v3d_rasterizer_state {
struct pipe_rasterizer_state base;
/* VC5_CONFIGURATION_BITS */
uint16_t offset_factor;
};
-struct vc5_depth_stencil_alpha_state {
+struct v3d_depth_stencil_alpha_state {
struct pipe_depth_stencil_alpha_state base;
- enum vc5_ez_state ez_state;
+ enum v3d_ez_state ez_state;
/** Uniforms for stencil state.
*
fprintf(stderr, __VA_ARGS__); \
} while (0)
-static inline struct vc5_context *
-vc5_context(struct pipe_context *pcontext)
+static inline struct v3d_context *
+v3d_context(struct pipe_context *pcontext)
{
- return (struct vc5_context *)pcontext;
+ return (struct v3d_context *)pcontext;
}
-static inline struct vc5_sampler_view *
-vc5_sampler_view(struct pipe_sampler_view *psview)
+static inline struct v3d_sampler_view *
+v3d_sampler_view(struct pipe_sampler_view *psview)
{
- return (struct vc5_sampler_view *)psview;
+ return (struct v3d_sampler_view *)psview;
}
-static inline struct vc5_sampler_state *
-vc5_sampler_state(struct pipe_sampler_state *psampler)
+static inline struct v3d_sampler_state *
+v3d_sampler_state(struct pipe_sampler_state *psampler)
{
- return (struct vc5_sampler_state *)psampler;
+ return (struct v3d_sampler_state *)psampler;
}
-struct pipe_context *vc5_context_create(struct pipe_screen *pscreen,
+struct pipe_context *v3d_context_create(struct pipe_screen *pscreen,
void *priv, unsigned flags);
-void vc5_program_init(struct pipe_context *pctx);
-void vc5_program_fini(struct pipe_context *pctx);
-void vc5_query_init(struct pipe_context *pctx);
+void v3d_program_init(struct pipe_context *pctx);
+void v3d_program_fini(struct pipe_context *pctx);
+void v3d_query_init(struct pipe_context *pctx);
-void vc5_simulator_init(struct vc5_screen *screen);
-void vc5_simulator_destroy(struct vc5_screen *screen);
-int vc5_simulator_flush(struct vc5_context *vc5,
+void v3d_simulator_init(struct v3d_screen *screen);
+void v3d_simulator_destroy(struct v3d_screen *screen);
+int v3d_simulator_flush(struct v3d_context *v3d,
struct drm_v3d_submit_cl *args,
- struct vc5_job *job);
-int vc5_simulator_ioctl(int fd, unsigned long request, void *arg);
-void vc5_simulator_open_from_handle(int fd, uint32_t winsys_stride,
+ struct v3d_job *job);
+int v3d_simulator_ioctl(int fd, unsigned long request, void *arg);
+void v3d_simulator_open_from_handle(int fd, uint32_t winsys_stride,
int handle, uint32_t size);
static inline int
-vc5_ioctl(int fd, unsigned long request, void *arg)
+v3d_ioctl(int fd, unsigned long request, void *arg)
{
- if (using_vc5_simulator)
- return vc5_simulator_ioctl(fd, request, arg);
+ if (using_v3d_simulator)
+ return v3d_simulator_ioctl(fd, request, arg);
else
return drmIoctl(fd, request, arg);
}
-void vc5_set_shader_uniform_dirty_flags(struct vc5_compiled_shader *shader);
-struct vc5_cl_reloc vc5_write_uniforms(struct vc5_context *vc5,
- struct vc5_compiled_shader *shader,
- struct vc5_constbuf_stateobj *cb,
- struct vc5_texture_stateobj *texstate);
+void v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader);
+struct v3d_cl_reloc v3d_write_uniforms(struct v3d_context *v3d,
+ struct v3d_compiled_shader *shader,
+ struct v3d_constbuf_stateobj *cb,
+ struct v3d_texture_stateobj *texstate);
-void vc5_flush(struct pipe_context *pctx);
-void vc5_job_init(struct vc5_context *vc5);
-struct vc5_job *vc5_get_job(struct vc5_context *vc5,
+void v3d_flush(struct pipe_context *pctx);
+void v3d_job_init(struct v3d_context *v3d);
+struct v3d_job *v3d_get_job(struct v3d_context *v3d,
struct pipe_surface **cbufs,
struct pipe_surface *zsbuf);
-struct vc5_job *vc5_get_job_for_fbo(struct vc5_context *vc5);
-void vc5_job_add_bo(struct vc5_job *job, struct vc5_bo *bo);
-void vc5_job_add_write_resource(struct vc5_job *job, struct pipe_resource *prsc);
-void vc5_job_submit(struct vc5_context *vc5, struct vc5_job *job);
-void vc5_flush_jobs_writing_resource(struct vc5_context *vc5,
+struct v3d_job *v3d_get_job_for_fbo(struct v3d_context *v3d);
+void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
+void v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc);
+void v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job);
+void v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
struct pipe_resource *prsc);
-void vc5_flush_jobs_reading_resource(struct vc5_context *vc5,
+void v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
struct pipe_resource *prsc);
-void vc5_update_compiled_shaders(struct vc5_context *vc5, uint8_t prim_mode);
+void v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode);
-bool vc5_rt_format_supported(const struct v3d_device_info *devinfo,
+bool v3d_rt_format_supported(const struct v3d_device_info *devinfo,
enum pipe_format f);
-bool vc5_tex_format_supported(const struct v3d_device_info *devinfo,
+bool v3d_tex_format_supported(const struct v3d_device_info *devinfo,
enum pipe_format f);
-uint8_t vc5_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f);
-uint8_t vc5_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f);
-uint8_t vc5_get_tex_return_size(const struct v3d_device_info *devinfo,
+uint8_t v3d_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f);
+uint8_t v3d_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f);
+uint8_t v3d_get_tex_return_size(const struct v3d_device_info *devinfo,
enum pipe_format f,
enum pipe_tex_compare compare);
-uint8_t vc5_get_tex_return_channels(const struct v3d_device_info *devinfo,
+uint8_t v3d_get_tex_return_channels(const struct v3d_device_info *devinfo,
enum pipe_format f);
-const uint8_t *vc5_get_format_swizzle(const struct v3d_device_info *devinfo,
+const uint8_t *v3d_get_format_swizzle(const struct v3d_device_info *devinfo,
enum pipe_format f);
-void vc5_get_internal_type_bpp_for_output_format(const struct v3d_device_info *devinfo,
+void v3d_get_internal_type_bpp_for_output_format(const struct v3d_device_info *devinfo,
uint32_t format,
uint32_t *type,
uint32_t *bpp);
-void vc5_init_query_functions(struct vc5_context *vc5);
-void vc5_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
-void vc5_blitter_save(struct vc5_context *vc5);
+void v3d_init_query_functions(struct v3d_context *v3d);
+void v3d_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
+void v3d_blitter_save(struct v3d_context *v3d);
-struct vc5_fence *vc5_fence_create(struct vc5_context *vc5);
+struct v3d_fence *v3d_fence_create(struct v3d_context *v3d);
#ifdef v3dX
# include "v3dx_context.h"
* IN THE SOFTWARE.
*/
-/** @file vc5_fence.c
+/** @file v3d_fence.c
*
* Seqno-based fence management.
*
#include "v3d_context.h"
#include "v3d_bufmgr.h"
-struct vc5_fence {
+struct v3d_fence {
struct pipe_reference reference;
uint32_t sync;
};
static void
-vc5_fence_reference(struct pipe_screen *pscreen,
+v3d_fence_reference(struct pipe_screen *pscreen,
struct pipe_fence_handle **pp,
struct pipe_fence_handle *pf)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_fence **p = (struct vc5_fence **)pp;
- struct vc5_fence *f = (struct vc5_fence *)pf;
- struct vc5_fence *old = *p;
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_fence **p = (struct v3d_fence **)pp;
+ struct v3d_fence *f = (struct v3d_fence *)pf;
+ struct v3d_fence *old = *p;
if (pipe_reference(&(*p)->reference, &f->reference)) {
drmSyncobjDestroy(screen->fd, old->sync);
}
static boolean
-vc5_fence_finish(struct pipe_screen *pscreen,
+v3d_fence_finish(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct pipe_fence_handle *pf,
uint64_t timeout_ns)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_fence *f = (struct vc5_fence *)pf;
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_fence *f = (struct v3d_fence *)pf;
return drmSyncobjWait(screen->fd, &f->sync, 1, timeout_ns, 0, NULL);
}
-struct vc5_fence *
-vc5_fence_create(struct vc5_context *vc5)
+struct v3d_fence *
+v3d_fence_create(struct v3d_context *v3d)
{
- struct vc5_fence *f = calloc(1, sizeof(*f));
+ struct v3d_fence *f = calloc(1, sizeof(*f));
if (!f)
return NULL;
uint32_t new_sync;
/* Make a new sync object for the context. */
- int ret = drmSyncobjCreate(vc5->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
+ int ret = drmSyncobjCreate(v3d->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
&new_sync);
if (ret) {
free(f);
}
pipe_reference_init(&f->reference, 1);
- f->sync = vc5->out_sync;
- vc5->out_sync = new_sync;
+ f->sync = v3d->out_sync;
+ v3d->out_sync = new_sync;
return f;
}
void
-vc5_fence_init(struct vc5_screen *screen)
+v3d_fence_init(struct v3d_screen *screen)
{
- screen->base.fence_reference = vc5_fence_reference;
- screen->base.fence_finish = vc5_fence_finish;
+ screen->base.fence_reference = v3d_fence_reference;
+ screen->base.fence_finish = v3d_fence_finish;
}
#include <stdbool.h>
#include <stdint.h>
-struct vc5_format {
+struct v3d_format {
/** Set if the pipe format is defined in the table. */
bool present;
*/
/**
- * @file vc5_formats.c
+ * @file v3d_formats.c
*
* Contains the table and accessors for VC5 texture and render target format
* support.
#include "v3d_context.h"
#include "v3d_format_table.h"
-static const struct vc5_format *
+static const struct v3d_format *
get_format(const struct v3d_device_info *devinfo, enum pipe_format f)
{
if (devinfo->ver >= 41)
}
bool
-vc5_rt_format_supported(const struct v3d_device_info *devinfo,
+v3d_rt_format_supported(const struct v3d_device_info *devinfo,
enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
if (!vf)
return false;
}
uint8_t
-vc5_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f)
+v3d_get_rt_format(const struct v3d_device_info *devinfo, enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
if (!vf)
return 0;
}
bool
-vc5_tex_format_supported(const struct v3d_device_info *devinfo,
+v3d_tex_format_supported(const struct v3d_device_info *devinfo,
enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
return vf != NULL;
}
uint8_t
-vc5_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f)
+v3d_get_tex_format(const struct v3d_device_info *devinfo, enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
if (!vf)
return 0;
}
uint8_t
-vc5_get_tex_return_size(const struct v3d_device_info *devinfo,
+v3d_get_tex_return_size(const struct v3d_device_info *devinfo,
enum pipe_format f, enum pipe_tex_compare compare)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
if (!vf)
return 0;
}
uint8_t
-vc5_get_tex_return_channels(const struct v3d_device_info *devinfo,
+v3d_get_tex_return_channels(const struct v3d_device_info *devinfo,
enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
if (!vf)
return 0;
}
const uint8_t *
-vc5_get_format_swizzle(const struct v3d_device_info *devinfo, enum pipe_format f)
+v3d_get_format_swizzle(const struct v3d_device_info *devinfo, enum pipe_format f)
{
- const struct vc5_format *vf = get_format(devinfo, f);
+ const struct v3d_format *vf = get_format(devinfo, f);
static const uint8_t fallback[] = {0, 1, 2, 3};
if (!vf)
}
void
-vc5_get_internal_type_bpp_for_output_format(const struct v3d_device_info *devinfo,
+v3d_get_internal_type_bpp_for_output_format(const struct v3d_device_info *devinfo,
uint32_t format,
uint32_t *type,
uint32_t *bpp)
* IN THE SOFTWARE.
*/
-/** @file vc5_job.c
+/** @file v3d_job.c
*
* Functions for submitting VC5 render jobs to the kernel.
*/
}
static void
-vc5_job_free(struct vc5_context *vc5, struct vc5_job *job)
+v3d_job_free(struct v3d_context *v3d, struct v3d_job *job)
{
struct set_entry *entry;
set_foreach(job->bos, entry) {
- struct vc5_bo *bo = (struct vc5_bo *)entry->key;
- vc5_bo_unreference(&bo);
+ struct v3d_bo *bo = (struct v3d_bo *)entry->key;
+ v3d_bo_unreference(&bo);
}
- remove_from_ht(vc5->jobs, &job->key);
+ remove_from_ht(v3d->jobs, &job->key);
if (job->write_prscs) {
struct set_entry *entry;
set_foreach(job->write_prscs, entry) {
const struct pipe_resource *prsc = entry->key;
- remove_from_ht(vc5->write_jobs, (void *)prsc);
+ remove_from_ht(v3d->write_jobs, (void *)prsc);
}
}
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
if (job->cbufs[i]) {
- remove_from_ht(vc5->write_jobs, job->cbufs[i]->texture);
+ remove_from_ht(v3d->write_jobs, job->cbufs[i]->texture);
pipe_surface_reference(&job->cbufs[i], NULL);
}
}
if (job->zsbuf) {
- remove_from_ht(vc5->write_jobs, job->zsbuf->texture);
+ remove_from_ht(v3d->write_jobs, job->zsbuf->texture);
pipe_surface_reference(&job->zsbuf, NULL);
}
- if (vc5->job == job)
- vc5->job = NULL;
+ if (v3d->job == job)
+ v3d->job = NULL;
- vc5_destroy_cl(&job->bcl);
- vc5_destroy_cl(&job->rcl);
- vc5_destroy_cl(&job->indirect);
- vc5_bo_unreference(&job->tile_alloc);
- vc5_bo_unreference(&job->tile_state);
+ v3d_destroy_cl(&job->bcl);
+ v3d_destroy_cl(&job->rcl);
+ v3d_destroy_cl(&job->indirect);
+ v3d_bo_unreference(&job->tile_alloc);
+ v3d_bo_unreference(&job->tile_state);
ralloc_free(job);
}
-static struct vc5_job *
-vc5_job_create(struct vc5_context *vc5)
+static struct v3d_job *
+v3d_job_create(struct v3d_context *v3d)
{
- struct vc5_job *job = rzalloc(vc5, struct vc5_job);
+ struct v3d_job *job = rzalloc(v3d, struct v3d_job);
- job->vc5 = vc5;
+ job->v3d = v3d;
- vc5_init_cl(job, &job->bcl);
- vc5_init_cl(job, &job->rcl);
- vc5_init_cl(job, &job->indirect);
+ v3d_init_cl(job, &job->bcl);
+ v3d_init_cl(job, &job->rcl);
+ v3d_init_cl(job, &job->indirect);
job->draw_min_x = ~0;
job->draw_min_y = ~0;
}
void
-vc5_job_add_bo(struct vc5_job *job, struct vc5_bo *bo)
+v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo)
{
if (!bo)
return;
if (_mesa_set_search(job->bos, bo))
return;
- vc5_bo_reference(bo);
+ v3d_bo_reference(bo);
_mesa_set_add(job->bos, bo);
job->referenced_size += bo->size;
}
void
-vc5_job_add_write_resource(struct vc5_job *job, struct pipe_resource *prsc)
+v3d_job_add_write_resource(struct v3d_job *job, struct pipe_resource *prsc)
{
- struct vc5_context *vc5 = job->vc5;
+ struct v3d_context *v3d = job->v3d;
if (!job->write_prscs) {
job->write_prscs = _mesa_set_create(job,
}
_mesa_set_add(job->write_prscs, prsc);
- _mesa_hash_table_insert(vc5->write_jobs, prsc, job);
+ _mesa_hash_table_insert(v3d->write_jobs, prsc, job);
}
void
-vc5_flush_jobs_writing_resource(struct vc5_context *vc5,
+v3d_flush_jobs_writing_resource(struct v3d_context *v3d,
struct pipe_resource *prsc)
{
- struct hash_entry *entry = _mesa_hash_table_search(vc5->write_jobs,
+ struct hash_entry *entry = _mesa_hash_table_search(v3d->write_jobs,
prsc);
if (entry) {
- struct vc5_job *job = entry->data;
- vc5_job_submit(vc5, job);
+ struct v3d_job *job = entry->data;
+ v3d_job_submit(v3d, job);
}
}
void
-vc5_flush_jobs_reading_resource(struct vc5_context *vc5,
+v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
struct pipe_resource *prsc)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_resource *rsc = v3d_resource(prsc);
- vc5_flush_jobs_writing_resource(vc5, prsc);
+ v3d_flush_jobs_writing_resource(v3d, prsc);
struct hash_entry *entry;
- hash_table_foreach(vc5->jobs, entry) {
- struct vc5_job *job = entry->data;
+ hash_table_foreach(v3d->jobs, entry) {
+ struct v3d_job *job = entry->data;
if (_mesa_set_search(job->bos, rsc->bo)) {
- vc5_job_submit(vc5, job);
- /* Reminder: vc5->jobs is safe to keep iterating even
+ v3d_job_submit(v3d, job);
+ /* Reminder: v3d->jobs is safe to keep iterating even
* after deletion of an entry.
*/
continue;
}
static void
-vc5_job_set_tile_buffer_size(struct vc5_job *job)
+v3d_job_set_tile_buffer_size(struct v3d_job *job)
{
static const uint8_t tile_sizes[] = {
64, 64,
int max_bpp = RENDER_TARGET_MAXIMUM_32BPP;
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
if (job->cbufs[i]) {
- struct vc5_surface *surf = vc5_surface(job->cbufs[i]);
+ struct v3d_surface *surf = v3d_surface(job->cbufs[i]);
max_bpp = MAX2(max_bpp, surf->internal_bpp);
}
}
}
/**
- * Returns a vc5_job struture for tracking V3D rendering to a particular FBO.
+ * Returns a v3d_job struture for tracking V3D rendering to a particular FBO.
*
* If we've already started rendering to this FBO, then return old same job,
* otherwise make a new one. If we're beginning rendering to an FBO, make
* sure that any previous reads of the FBO (or writes to its color/Z surfaces)
* have been flushed.
*/
-struct vc5_job *
-vc5_get_job(struct vc5_context *vc5,
+struct v3d_job *
+v3d_get_job(struct v3d_context *v3d,
struct pipe_surface **cbufs, struct pipe_surface *zsbuf)
{
/* Return the existing job for this FBO if we have one */
- struct vc5_job_key local_key = {
+ struct v3d_job_key local_key = {
.cbufs = {
cbufs[0],
cbufs[1],
},
.zsbuf = zsbuf,
};
- struct hash_entry *entry = _mesa_hash_table_search(vc5->jobs,
+ struct hash_entry *entry = _mesa_hash_table_search(v3d->jobs,
&local_key);
if (entry)
return entry->data;
/* Creating a new job. Make sure that any previous jobs reading or
* writing these buffers are flushed.
*/
- struct vc5_job *job = vc5_job_create(vc5);
+ struct v3d_job *job = v3d_job_create(v3d);
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
if (cbufs[i]) {
- vc5_flush_jobs_reading_resource(vc5, cbufs[i]->texture);
+ v3d_flush_jobs_reading_resource(v3d, cbufs[i]->texture);
pipe_surface_reference(&job->cbufs[i], cbufs[i]);
if (cbufs[i]->texture->nr_samples > 1)
}
}
if (zsbuf) {
- vc5_flush_jobs_reading_resource(vc5, zsbuf->texture);
+ v3d_flush_jobs_reading_resource(v3d, zsbuf->texture);
pipe_surface_reference(&job->zsbuf, zsbuf);
if (zsbuf->texture->nr_samples > 1)
job->msaa = true;
}
- vc5_job_set_tile_buffer_size(job);
+ v3d_job_set_tile_buffer_size(job);
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
if (cbufs[i])
- _mesa_hash_table_insert(vc5->write_jobs,
+ _mesa_hash_table_insert(v3d->write_jobs,
cbufs[i]->texture, job);
}
if (zsbuf)
- _mesa_hash_table_insert(vc5->write_jobs, zsbuf->texture, job);
+ _mesa_hash_table_insert(v3d->write_jobs, zsbuf->texture, job);
memcpy(&job->key, &local_key, sizeof(local_key));
- _mesa_hash_table_insert(vc5->jobs, &job->key, job);
+ _mesa_hash_table_insert(v3d->jobs, &job->key, job);
return job;
}
-struct vc5_job *
-vc5_get_job_for_fbo(struct vc5_context *vc5)
+struct v3d_job *
+v3d_get_job_for_fbo(struct v3d_context *v3d)
{
- if (vc5->job)
- return vc5->job;
+ if (v3d->job)
+ return v3d->job;
- struct pipe_surface **cbufs = vc5->framebuffer.cbufs;
- struct pipe_surface *zsbuf = vc5->framebuffer.zsbuf;
- struct vc5_job *job = vc5_get_job(vc5, cbufs, zsbuf);
+ struct pipe_surface **cbufs = v3d->framebuffer.cbufs;
+ struct pipe_surface *zsbuf = v3d->framebuffer.zsbuf;
+ struct v3d_job *job = v3d_get_job(v3d, cbufs, zsbuf);
- /* The dirty flags are tracking what's been updated while vc5->job has
+ /* The dirty flags are tracking what's been updated while v3d->job has
* been bound, so set them all to ~0 when switching between jobs. We
* also need to reset all state at the start of rendering.
*/
- vc5->dirty = ~0;
+ v3d->dirty = ~0;
/* If we're binding to uninitialized buffers, no need to load their
* contents before drawing.
*/
for (int i = 0; i < 4; i++) {
if (cbufs[i]) {
- struct vc5_resource *rsc = vc5_resource(cbufs[i]->texture);
+ struct v3d_resource *rsc = v3d_resource(cbufs[i]->texture);
if (!rsc->writes)
job->cleared |= PIPE_CLEAR_COLOR0 << i;
}
}
if (zsbuf) {
- struct vc5_resource *rsc = vc5_resource(zsbuf->texture);
+ struct v3d_resource *rsc = v3d_resource(zsbuf->texture);
if (!rsc->writes)
job->cleared |= PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL;
}
- job->draw_tiles_x = DIV_ROUND_UP(vc5->framebuffer.width,
+ job->draw_tiles_x = DIV_ROUND_UP(v3d->framebuffer.width,
job->tile_width);
- job->draw_tiles_y = DIV_ROUND_UP(vc5->framebuffer.height,
+ job->draw_tiles_y = DIV_ROUND_UP(v3d->framebuffer.height,
job->tile_height);
- vc5->job = job;
+ v3d->job = job;
return job;
}
static bool
-vc5_clif_dump_lookup(void *data, uint32_t addr, void **vaddr)
+v3d_clif_dump_lookup(void *data, uint32_t addr, void **vaddr)
{
- struct vc5_job *job = data;
+ struct v3d_job *job = data;
struct set_entry *entry;
set_foreach(job->bos, entry) {
- struct vc5_bo *bo = (void *)entry->key;
+ struct v3d_bo *bo = (void *)entry->key;
if (addr >= bo->offset &&
addr < bo->offset + bo->size) {
- vc5_bo_map(bo);
+ v3d_bo_map(bo);
*vaddr = bo->map + addr - bo->offset;
return true;
}
}
static void
-vc5_clif_dump(struct vc5_context *vc5, struct vc5_job *job)
+v3d_clif_dump(struct v3d_context *v3d, struct v3d_job *job)
{
if (!(V3D_DEBUG & V3D_DEBUG_CL))
return;
- struct clif_dump *clif = clif_dump_init(&vc5->screen->devinfo,
- stderr, vc5_clif_dump_lookup,
+ struct clif_dump *clif = clif_dump_init(&v3d->screen->devinfo,
+ stderr, v3d_clif_dump_lookup,
job);
fprintf(stderr, "BCL: 0x%08x..0x%08x\n",
* Submits the job to the kernel and then reinitializes it.
*/
void
-vc5_job_submit(struct vc5_context *vc5, struct vc5_job *job)
+v3d_job_submit(struct v3d_context *v3d, struct v3d_job *job)
{
- MAYBE_UNUSED struct vc5_screen *screen = vc5->screen;
+ MAYBE_UNUSED struct v3d_screen *screen = v3d->screen;
if (!job->needs_flush)
goto done;
- if (vc5->screen->devinfo.ver >= 41)
+ if (v3d->screen->devinfo.ver >= 41)
v3d41_emit_rcl(job);
else
v3d33_emit_rcl(job);
if (cl_offset(&job->bcl) > 0) {
if (screen->devinfo.ver >= 41)
- v3d41_bcl_epilogue(vc5, job);
+ v3d41_bcl_epilogue(v3d, job);
else
- v3d33_bcl_epilogue(vc5, job);
+ v3d33_bcl_epilogue(v3d, job);
}
- job->submit.out_sync = vc5->out_sync;
+ job->submit.out_sync = v3d->out_sync;
job->submit.bcl_end = job->bcl.bo->offset + cl_offset(&job->bcl);
job->submit.rcl_end = job->rcl.bo->offset + cl_offset(&job->rcl);
* instead of binner packets.
*/
if (screen->devinfo.ver >= 41) {
- vc5_job_add_bo(job, job->tile_alloc);
+ v3d_job_add_bo(job, job->tile_alloc);
job->submit.qma = job->tile_alloc->offset;
job->submit.qms = job->tile_alloc->size;
- vc5_job_add_bo(job, job->tile_state);
+ v3d_job_add_bo(job, job->tile_state);
job->submit.qts = job->tile_state->offset;
}
- vc5_clif_dump(vc5, job);
+ v3d_clif_dump(v3d, job);
if (!(V3D_DEBUG & V3D_DEBUG_NORAST)) {
int ret;
#ifndef USE_V3D_SIMULATOR
- ret = drmIoctl(vc5->fd, DRM_IOCTL_V3D_SUBMIT_CL, &job->submit);
+ ret = drmIoctl(v3d->fd, DRM_IOCTL_V3D_SUBMIT_CL, &job->submit);
#else
- ret = vc5_simulator_flush(vc5, &job->submit, job);
+ ret = v3d_simulator_flush(v3d, &job->submit, job);
#endif
static bool warned = false;
if (ret && !warned) {
}
done:
- vc5_job_free(vc5, job);
+ v3d_job_free(v3d, job);
}
static bool
-vc5_job_compare(const void *a, const void *b)
+v3d_job_compare(const void *a, const void *b)
{
- return memcmp(a, b, sizeof(struct vc5_job_key)) == 0;
+ return memcmp(a, b, sizeof(struct v3d_job_key)) == 0;
}
static uint32_t
-vc5_job_hash(const void *key)
+v3d_job_hash(const void *key)
{
- return _mesa_hash_data(key, sizeof(struct vc5_job_key));
+ return _mesa_hash_data(key, sizeof(struct v3d_job_key));
}
void
-vc5_job_init(struct vc5_context *vc5)
+v3d_job_init(struct v3d_context *v3d)
{
- vc5->jobs = _mesa_hash_table_create(vc5,
- vc5_job_hash,
- vc5_job_compare);
- vc5->write_jobs = _mesa_hash_table_create(vc5,
+ v3d->jobs = _mesa_hash_table_create(v3d,
+ v3d_job_hash,
+ v3d_job_compare);
+ v3d->write_jobs = _mesa_hash_table_create(v3d,
_mesa_hash_pointer,
_mesa_key_pointer_equal);
}
#include "mesa/state_tracker/st_glsl_types.h"
static gl_varying_slot
-vc5_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
+v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
{
nir_foreach_variable(var, &s->outputs) {
if (var->data.driver_location == driver_location) {
* varyings together in a single data spec.
*/
static void
-vc5_set_transform_feedback_outputs(struct vc5_uncompiled_shader *so,
+v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
const struct pipe_stream_output_info *stream_output)
{
if (!stream_output->num_outputs)
*/
for (int j = 0; j < output->num_components; j++) {
gl_varying_slot slot =
- vc5_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
+ v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
slots[slot_count] =
v3d_slot_from_slot_and_component(slot,
}
static void *
-vc5_shader_state_create(struct pipe_context *pctx,
+v3d_shader_state_create(struct pipe_context *pctx,
const struct pipe_shader_state *cso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_uncompiled_shader *so = CALLOC_STRUCT(vc5_uncompiled_shader);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
if (!so)
return NULL;
- so->program_id = vc5->next_uncompiled_program_id++;
+ so->program_id = v3d->next_uncompiled_program_id++;
nir_shader *s;
so->base.type = PIPE_SHADER_IR_NIR;
so->base.ir.nir = s;
- vc5_set_transform_feedback_outputs(so, &cso->stream_output);
+ v3d_set_transform_feedback_outputs(so, &cso->stream_output);
if (V3D_DEBUG & (V3D_DEBUG_NIR |
v3d_debug_flag_for_shader_stage(s->info.stage))) {
return so;
}
-static struct vc5_compiled_shader *
-vc5_get_compiled_shader(struct vc5_context *vc5, struct v3d_key *key)
+static struct v3d_compiled_shader *
+v3d_get_compiled_shader(struct v3d_context *v3d, struct v3d_key *key)
{
- struct vc5_uncompiled_shader *shader_state = key->shader_state;
+ struct v3d_uncompiled_shader *shader_state = key->shader_state;
nir_shader *s = shader_state->base.ir.nir;
struct hash_table *ht;
uint32_t key_size;
if (s->info.stage == MESA_SHADER_FRAGMENT) {
- ht = vc5->fs_cache;
+ ht = v3d->fs_cache;
key_size = sizeof(struct v3d_fs_key);
} else {
- ht = vc5->vs_cache;
+ ht = v3d->vs_cache;
key_size = sizeof(struct v3d_vs_key);
}
if (entry)
return entry->data;
- struct vc5_compiled_shader *shader =
- rzalloc(NULL, struct vc5_compiled_shader);
+ struct v3d_compiled_shader *shader =
+ rzalloc(NULL, struct v3d_compiled_shader);
int program_id = shader_state->program_id;
int variant_id =
case MESA_SHADER_VERTEX:
shader->prog_data.vs = rzalloc(shader, struct v3d_vs_prog_data);
- qpu_insts = v3d_compile_vs(vc5->screen->compiler,
+ qpu_insts = v3d_compile_vs(v3d->screen->compiler,
(struct v3d_vs_key *)key,
shader->prog_data.vs, s,
program_id, variant_id,
case MESA_SHADER_FRAGMENT:
shader->prog_data.fs = rzalloc(shader, struct v3d_fs_prog_data);
- qpu_insts = v3d_compile_fs(vc5->screen->compiler,
+ qpu_insts = v3d_compile_fs(v3d->screen->compiler,
(struct v3d_fs_key *)key,
shader->prog_data.fs, s,
program_id, variant_id,
unreachable("bad stage");
}
- vc5_set_shader_uniform_dirty_flags(shader);
+ v3d_set_shader_uniform_dirty_flags(shader);
- shader->bo = vc5_bo_alloc(vc5->screen, shader_size, "shader");
- vc5_bo_map(shader->bo);
+ shader->bo = v3d_bo_alloc(v3d->screen, shader_size, "shader");
+ v3d_bo_map(shader->bo);
memcpy(shader->bo->map, qpu_insts, shader_size);
free(qpu_insts);
- struct vc5_key *dup_key;
+ struct v3d_key *dup_key;
dup_key = ralloc_size(shader, key_size);
memcpy(dup_key, key, key_size);
_mesa_hash_table_insert(ht, dup_key, shader);
if (shader->prog_data.base->spill_size >
- vc5->prog.spill_size_per_thread) {
+ v3d->prog.spill_size_per_thread) {
/* Max 4 QPUs per slice, 3 slices per core. We only do single
* core so far. This overallocates memory on smaller cores.
*/
int total_spill_size =
4 * 3 * shader->prog_data.base->spill_size;
- vc5_bo_unreference(&vc5->prog.spill_bo);
- vc5->prog.spill_bo = vc5_bo_alloc(vc5->screen,
+ v3d_bo_unreference(&v3d->prog.spill_bo);
+ v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
total_spill_size, "spill");
- vc5->prog.spill_size_per_thread =
+ v3d->prog.spill_size_per_thread =
shader->prog_data.base->spill_size;
}
}
static void
-vc5_setup_shared_key(struct vc5_context *vc5, struct v3d_key *key,
- struct vc5_texture_stateobj *texstate)
+v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
+ struct v3d_texture_stateobj *texstate)
{
- const struct v3d_device_info *devinfo = &vc5->screen->devinfo;
+ const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
for (int i = 0; i < texstate->num_textures; i++) {
struct pipe_sampler_view *sampler = texstate->textures[i];
- struct vc5_sampler_view *vc5_sampler = vc5_sampler_view(sampler);
+ struct v3d_sampler_view *v3d_sampler = v3d_sampler_view(sampler);
struct pipe_sampler_state *sampler_state =
texstate->samplers[i];
continue;
key->tex[i].return_size =
- vc5_get_tex_return_size(devinfo,
+ v3d_get_tex_return_size(devinfo,
sampler->format,
sampler_state->compare_mode);
key->tex[i].return_channels = 4;
} else {
key->tex[i].return_channels =
- vc5_get_tex_return_channels(devinfo,
+ v3d_get_tex_return_channels(devinfo,
sampler->format);
}
if (key->tex[i].return_size == 32 && devinfo->ver < 40) {
memcpy(key->tex[i].swizzle,
- vc5_sampler->swizzle,
- sizeof(vc5_sampler->swizzle));
+ v3d_sampler->swizzle,
+ sizeof(v3d_sampler->swizzle));
} else {
/* For 16-bit returns, we let the sampler state handle
* the swizzle.
}
}
- key->ucp_enables = vc5->rasterizer->base.clip_plane_enable;
+ key->ucp_enables = v3d->rasterizer->base.clip_plane_enable;
}
static void
-vc5_update_compiled_fs(struct vc5_context *vc5, uint8_t prim_mode)
+v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
{
- struct vc5_job *job = vc5->job;
+ struct v3d_job *job = v3d->job;
struct v3d_fs_key local_key;
struct v3d_fs_key *key = &local_key;
- if (!(vc5->dirty & (VC5_DIRTY_PRIM_MODE |
+ if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_BLEND |
VC5_DIRTY_FRAMEBUFFER |
VC5_DIRTY_ZSA |
}
memset(key, 0, sizeof(*key));
- vc5_setup_shared_key(vc5, &key->base, &vc5->fragtex);
- key->base.shader_state = vc5->prog.bind_fs;
+ v3d_setup_shared_key(v3d, &key->base, &v3d->fragtex);
+ key->base.shader_state = v3d->prog.bind_fs;
key->is_points = (prim_mode == PIPE_PRIM_POINTS);
key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
prim_mode <= PIPE_PRIM_LINE_STRIP);
- key->clamp_color = vc5->rasterizer->base.clamp_fragment_color;
- if (vc5->blend->logicop_enable) {
- key->logicop_func = vc5->blend->logicop_func;
+ key->clamp_color = v3d->rasterizer->base.clamp_fragment_color;
+ if (v3d->blend->logicop_enable) {
+ key->logicop_func = v3d->blend->logicop_func;
} else {
key->logicop_func = PIPE_LOGICOP_COPY;
}
if (job->msaa) {
- key->msaa = vc5->rasterizer->base.multisample;
- key->sample_coverage = (vc5->rasterizer->base.multisample &&
- vc5->sample_mask != (1 << VC5_MAX_SAMPLES) - 1);
- key->sample_alpha_to_coverage = vc5->blend->alpha_to_coverage;
- key->sample_alpha_to_one = vc5->blend->alpha_to_one;
+ key->msaa = v3d->rasterizer->base.multisample;
+ key->sample_coverage = (v3d->rasterizer->base.multisample &&
+ v3d->sample_mask != (1 << VC5_MAX_SAMPLES) - 1);
+ key->sample_alpha_to_coverage = v3d->blend->alpha_to_coverage;
+ key->sample_alpha_to_one = v3d->blend->alpha_to_one;
}
- key->depth_enabled = (vc5->zsa->base.depth.enabled ||
- vc5->zsa->base.stencil[0].enabled);
- if (vc5->zsa->base.alpha.enabled) {
+ key->depth_enabled = (v3d->zsa->base.depth.enabled ||
+ v3d->zsa->base.stencil[0].enabled);
+ if (v3d->zsa->base.alpha.enabled) {
key->alpha_test = true;
- key->alpha_test_func = vc5->zsa->base.alpha.func;
+ key->alpha_test_func = v3d->zsa->base.alpha.func;
}
/* gl_FragColor's propagation to however many bound color buffers
* there are means that the buffer count needs to be in the key.
*/
- key->nr_cbufs = vc5->framebuffer.nr_cbufs;
- key->swap_color_rb = vc5->swap_color_rb;
+ key->nr_cbufs = v3d->framebuffer.nr_cbufs;
+ key->swap_color_rb = v3d->swap_color_rb;
for (int i = 0; i < key->nr_cbufs; i++) {
- struct pipe_surface *cbuf = vc5->framebuffer.cbufs[i];
+ struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
if (!cbuf)
continue;
key->f32_color_rb |= 1 << i;
}
- if (vc5->prog.bind_fs->was_tgsi) {
+ if (v3d->prog.bind_fs->was_tgsi) {
if (util_format_is_pure_uint(cbuf->format))
key->uint_color_rb |= 1 << i;
else if (util_format_is_pure_sint(cbuf->format))
if (key->is_points) {
key->point_sprite_mask =
- vc5->rasterizer->base.sprite_coord_enable;
+ v3d->rasterizer->base.sprite_coord_enable;
key->point_coord_upper_left =
- (vc5->rasterizer->base.sprite_coord_mode ==
+ (v3d->rasterizer->base.sprite_coord_mode ==
PIPE_SPRITE_COORD_UPPER_LEFT);
}
- key->light_twoside = vc5->rasterizer->base.light_twoside;
- key->shade_model_flat = vc5->rasterizer->base.flatshade;
+ key->light_twoside = v3d->rasterizer->base.light_twoside;
+ key->shade_model_flat = v3d->rasterizer->base.flatshade;
- struct vc5_compiled_shader *old_fs = vc5->prog.fs;
- vc5->prog.fs = vc5_get_compiled_shader(vc5, &key->base);
- if (vc5->prog.fs == old_fs)
+ struct v3d_compiled_shader *old_fs = v3d->prog.fs;
+ v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base);
+ if (v3d->prog.fs == old_fs)
return;
- vc5->dirty |= VC5_DIRTY_COMPILED_FS;
+ v3d->dirty |= VC5_DIRTY_COMPILED_FS;
if (old_fs) {
- if (vc5->prog.fs->prog_data.fs->flat_shade_flags !=
+ if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
old_fs->prog_data.fs->flat_shade_flags) {
- vc5->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
+ v3d->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
}
- if (vc5->prog.fs->prog_data.fs->centroid_flags !=
+ if (v3d->prog.fs->prog_data.fs->centroid_flags !=
old_fs->prog_data.fs->centroid_flags) {
- vc5->dirty |= VC5_DIRTY_CENTROID_FLAGS;
+ v3d->dirty |= VC5_DIRTY_CENTROID_FLAGS;
}
}
- if (old_fs && memcmp(vc5->prog.fs->prog_data.fs->input_slots,
+ if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
old_fs->prog_data.fs->input_slots,
- sizeof(vc5->prog.fs->prog_data.fs->input_slots))) {
- vc5->dirty |= VC5_DIRTY_FS_INPUTS;
+ sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
+ v3d->dirty |= VC5_DIRTY_FS_INPUTS;
}
}
static void
-vc5_update_compiled_vs(struct vc5_context *vc5, uint8_t prim_mode)
+v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
{
struct v3d_vs_key local_key;
struct v3d_vs_key *key = &local_key;
- if (!(vc5->dirty & (VC5_DIRTY_PRIM_MODE |
+ if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_VERTTEX |
VC5_DIRTY_VTXSTATE |
}
memset(key, 0, sizeof(*key));
- vc5_setup_shared_key(vc5, &key->base, &vc5->verttex);
- key->base.shader_state = vc5->prog.bind_vs;
- key->num_fs_inputs = vc5->prog.fs->prog_data.fs->base.num_inputs;
+ v3d_setup_shared_key(v3d, &key->base, &v3d->verttex);
+ key->base.shader_state = v3d->prog.bind_vs;
+ key->num_fs_inputs = v3d->prog.fs->prog_data.fs->base.num_inputs;
STATIC_ASSERT(sizeof(key->fs_inputs) ==
- sizeof(vc5->prog.fs->prog_data.fs->input_slots));
- memcpy(key->fs_inputs, vc5->prog.fs->prog_data.fs->input_slots,
+ sizeof(v3d->prog.fs->prog_data.fs->input_slots));
+ memcpy(key->fs_inputs, v3d->prog.fs->prog_data.fs->input_slots,
sizeof(key->fs_inputs));
- key->clamp_color = vc5->rasterizer->base.clamp_vertex_color;
+ key->clamp_color = v3d->rasterizer->base.clamp_vertex_color;
key->per_vertex_point_size =
(prim_mode == PIPE_PRIM_POINTS &&
- vc5->rasterizer->base.point_size_per_vertex);
+ v3d->rasterizer->base.point_size_per_vertex);
- struct vc5_compiled_shader *vs =
- vc5_get_compiled_shader(vc5, &key->base);
- if (vs != vc5->prog.vs) {
- vc5->prog.vs = vs;
- vc5->dirty |= VC5_DIRTY_COMPILED_VS;
+ struct v3d_compiled_shader *vs =
+ v3d_get_compiled_shader(v3d, &key->base);
+ if (vs != v3d->prog.vs) {
+ v3d->prog.vs = vs;
+ v3d->dirty |= VC5_DIRTY_COMPILED_VS;
}
key->is_coord = true;
/* Coord shaders only output varyings used by transform feedback. */
- struct vc5_uncompiled_shader *shader_state = key->base.shader_state;
+ struct v3d_uncompiled_shader *shader_state = key->base.shader_state;
memcpy(key->fs_inputs, shader_state->tf_outputs,
sizeof(*key->fs_inputs) * shader_state->num_tf_outputs);
if (shader_state->num_tf_outputs < key->num_fs_inputs) {
}
key->num_fs_inputs = shader_state->num_tf_outputs;
- struct vc5_compiled_shader *cs =
- vc5_get_compiled_shader(vc5, &key->base);
- if (cs != vc5->prog.cs) {
- vc5->prog.cs = cs;
- vc5->dirty |= VC5_DIRTY_COMPILED_CS;
+ struct v3d_compiled_shader *cs =
+ v3d_get_compiled_shader(v3d, &key->base);
+ if (cs != v3d->prog.cs) {
+ v3d->prog.cs = cs;
+ v3d->dirty |= VC5_DIRTY_COMPILED_CS;
}
}
void
-vc5_update_compiled_shaders(struct vc5_context *vc5, uint8_t prim_mode)
+v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
{
- vc5_update_compiled_fs(vc5, prim_mode);
- vc5_update_compiled_vs(vc5, prim_mode);
+ v3d_update_compiled_fs(v3d, prim_mode);
+ v3d_update_compiled_vs(v3d, prim_mode);
}
static uint32_t
static void
delete_from_cache_if_matches(struct hash_table *ht,
- struct vc5_compiled_shader **last_compile,
+ struct v3d_compiled_shader **last_compile,
struct hash_entry *entry,
- struct vc5_uncompiled_shader *so)
+ struct v3d_uncompiled_shader *so)
{
const struct v3d_key *key = entry->key;
if (key->shader_state == so) {
- struct vc5_compiled_shader *shader = entry->data;
+ struct v3d_compiled_shader *shader = entry->data;
_mesa_hash_table_remove(ht, entry);
- vc5_bo_unreference(&shader->bo);
+ v3d_bo_unreference(&shader->bo);
if (shader == *last_compile)
*last_compile = NULL;
}
static void
-vc5_shader_state_delete(struct pipe_context *pctx, void *hwcso)
+v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_uncompiled_shader *so = hwcso;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_uncompiled_shader *so = hwcso;
struct hash_entry *entry;
- hash_table_foreach(vc5->fs_cache, entry) {
- delete_from_cache_if_matches(vc5->fs_cache, &vc5->prog.fs,
+ hash_table_foreach(v3d->fs_cache, entry) {
+ delete_from_cache_if_matches(v3d->fs_cache, &v3d->prog.fs,
entry, so);
}
- hash_table_foreach(vc5->vs_cache, entry) {
- delete_from_cache_if_matches(vc5->vs_cache, &vc5->prog.vs,
+ hash_table_foreach(v3d->vs_cache, entry) {
+ delete_from_cache_if_matches(v3d->vs_cache, &v3d->prog.vs,
entry, so);
}
}
static void
-vc5_fp_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->prog.bind_fs = hwcso;
- vc5->dirty |= VC5_DIRTY_UNCOMPILED_FS;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->prog.bind_fs = hwcso;
+ v3d->dirty |= VC5_DIRTY_UNCOMPILED_FS;
}
static void
-vc5_vp_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->prog.bind_vs = hwcso;
- vc5->dirty |= VC5_DIRTY_UNCOMPILED_VS;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->prog.bind_vs = hwcso;
+ v3d->dirty |= VC5_DIRTY_UNCOMPILED_VS;
}
void
-vc5_program_init(struct pipe_context *pctx)
+v3d_program_init(struct pipe_context *pctx)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
- pctx->create_vs_state = vc5_shader_state_create;
- pctx->delete_vs_state = vc5_shader_state_delete;
+ pctx->create_vs_state = v3d_shader_state_create;
+ pctx->delete_vs_state = v3d_shader_state_delete;
- pctx->create_fs_state = vc5_shader_state_create;
- pctx->delete_fs_state = vc5_shader_state_delete;
+ pctx->create_fs_state = v3d_shader_state_create;
+ pctx->delete_fs_state = v3d_shader_state_delete;
- pctx->bind_fs_state = vc5_fp_state_bind;
- pctx->bind_vs_state = vc5_vp_state_bind;
+ pctx->bind_fs_state = v3d_fp_state_bind;
+ pctx->bind_vs_state = v3d_vp_state_bind;
- vc5->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
+ v3d->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
fs_cache_compare);
- vc5->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
+ v3d->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
vs_cache_compare);
}
void
-vc5_program_fini(struct pipe_context *pctx)
+v3d_program_fini(struct pipe_context *pctx)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
struct hash_entry *entry;
- hash_table_foreach(vc5->fs_cache, entry) {
- struct vc5_compiled_shader *shader = entry->data;
- vc5_bo_unreference(&shader->bo);
+ hash_table_foreach(v3d->fs_cache, entry) {
+ struct v3d_compiled_shader *shader = entry->data;
+ v3d_bo_unreference(&shader->bo);
ralloc_free(shader);
- _mesa_hash_table_remove(vc5->fs_cache, entry);
+ _mesa_hash_table_remove(v3d->fs_cache, entry);
}
- hash_table_foreach(vc5->vs_cache, entry) {
- struct vc5_compiled_shader *shader = entry->data;
- vc5_bo_unreference(&shader->bo);
+ hash_table_foreach(v3d->vs_cache, entry) {
+ struct v3d_compiled_shader *shader = entry->data;
+ v3d_bo_unreference(&shader->bo);
ralloc_free(shader);
- _mesa_hash_table_remove(vc5->vs_cache, entry);
+ _mesa_hash_table_remove(v3d->vs_cache, entry);
}
}
#include "v3d_context.h"
#include "broadcom/cle/v3d_packet_v33_pack.h"
-struct vc5_query
+struct v3d_query
{
enum pipe_query_type type;
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
uint32_t start, end;
};
static struct pipe_query *
-vc5_create_query(struct pipe_context *pctx, unsigned query_type, unsigned index)
+v3d_create_query(struct pipe_context *pctx, unsigned query_type, unsigned index)
{
- struct vc5_query *q = calloc(1, sizeof(*q));
+ struct v3d_query *q = calloc(1, sizeof(*q));
q->type = query_type;
}
static void
-vc5_destroy_query(struct pipe_context *pctx, struct pipe_query *query)
+v3d_destroy_query(struct pipe_context *pctx, struct pipe_query *query)
{
- struct vc5_query *q = (struct vc5_query *)query;
+ struct v3d_query *q = (struct v3d_query *)query;
- vc5_bo_unreference(&q->bo);
+ v3d_bo_unreference(&q->bo);
free(q);
}
static boolean
-vc5_begin_query(struct pipe_context *pctx, struct pipe_query *query)
+v3d_begin_query(struct pipe_context *pctx, struct pipe_query *query)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_query *q = (struct vc5_query *)query;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_query *q = (struct v3d_query *)query;
switch (q->type) {
case PIPE_QUERY_PRIMITIVES_GENERATED:
- q->start = vc5->prims_generated;
+ q->start = v3d->prims_generated;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
- q->start = vc5->tf_prims_generated;
+ q->start = v3d->tf_prims_generated;
break;
default:
- q->bo = vc5_bo_alloc(vc5->screen, 4096, "query");
+ q->bo = v3d_bo_alloc(v3d->screen, 4096, "query");
- uint32_t *map = vc5_bo_map(q->bo);
+ uint32_t *map = v3d_bo_map(q->bo);
*map = 0;
- vc5->current_oq = q->bo;
- vc5->dirty |= VC5_DIRTY_OQ;
+ v3d->current_oq = q->bo;
+ v3d->dirty |= VC5_DIRTY_OQ;
break;
}
}
static bool
-vc5_end_query(struct pipe_context *pctx, struct pipe_query *query)
+v3d_end_query(struct pipe_context *pctx, struct pipe_query *query)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_query *q = (struct vc5_query *)query;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_query *q = (struct v3d_query *)query;
switch (q->type) {
case PIPE_QUERY_PRIMITIVES_GENERATED:
- q->end = vc5->prims_generated;
+ q->end = v3d->prims_generated;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
- q->end = vc5->tf_prims_generated;
+ q->end = v3d->tf_prims_generated;
break;
default:
- vc5->current_oq = NULL;
- vc5->dirty |= VC5_DIRTY_OQ;
+ v3d->current_oq = NULL;
+ v3d->dirty |= VC5_DIRTY_OQ;
break;
}
}
static boolean
-vc5_get_query_result(struct pipe_context *pctx, struct pipe_query *query,
+v3d_get_query_result(struct pipe_context *pctx, struct pipe_query *query,
boolean wait, union pipe_query_result *vresult)
{
- struct vc5_query *q = (struct vc5_query *)query;
+ struct v3d_query *q = (struct v3d_query *)query;
uint32_t result = 0;
if (q->bo) {
/* XXX: Only flush the jobs using this BO. */
- vc5_flush(pctx);
+ v3d_flush(pctx);
if (wait) {
- if (!vc5_bo_wait(q->bo, 0, "query"))
+ if (!v3d_bo_wait(q->bo, 0, "query"))
return false;
} else {
- if (!vc5_bo_wait(q->bo, ~0ull, "query"))
+ if (!v3d_bo_wait(q->bo, ~0ull, "query"))
return false;
}
/* XXX: Sum up per-core values. */
- uint32_t *map = vc5_bo_map(q->bo);
+ uint32_t *map = v3d_bo_map(q->bo);
result = *map;
- vc5_bo_unreference(&q->bo);
+ v3d_bo_unreference(&q->bo);
}
switch (q->type) {
}
static void
-vc5_set_active_query_state(struct pipe_context *pctx, boolean enable)
+v3d_set_active_query_state(struct pipe_context *pctx, boolean enable)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
- vc5->active_queries = enable;
- vc5->dirty |= VC5_DIRTY_OQ;
- vc5->dirty |= VC5_DIRTY_STREAMOUT;
+ v3d->active_queries = enable;
+ v3d->dirty |= VC5_DIRTY_OQ;
+ v3d->dirty |= VC5_DIRTY_STREAMOUT;
}
void
-vc5_query_init(struct pipe_context *pctx)
+v3d_query_init(struct pipe_context *pctx)
{
- pctx->create_query = vc5_create_query;
- pctx->destroy_query = vc5_destroy_query;
- pctx->begin_query = vc5_begin_query;
- pctx->end_query = vc5_end_query;
- pctx->get_query_result = vc5_get_query_result;
- pctx->set_active_query_state = vc5_set_active_query_state;
+ pctx->create_query = v3d_create_query;
+ pctx->destroy_query = v3d_destroy_query;
+ pctx->begin_query = v3d_begin_query;
+ pctx->end_query = v3d_end_query;
+ pctx->get_query_result = v3d_get_query_result;
+ pctx->set_active_query_state = v3d_set_active_query_state;
}
#include "broadcom/cle/v3d_packet_v33_pack.h"
static void
-vc5_debug_resource_layout(struct vc5_resource *rsc, const char *caller)
+v3d_debug_resource_layout(struct v3d_resource *rsc, const char *caller)
{
if (!(V3D_DEBUG & V3D_DEBUG_SURFACE))
return;
};
for (int i = 0; i <= prsc->last_level; i++) {
- struct vc5_resource_slice *slice = &rsc->slices[i];
+ struct v3d_resource_slice *slice = &rsc->slices[i];
int level_width = slice->stride / rsc->cpp;
int level_height = slice->padded_height;
}
static bool
-vc5_resource_bo_alloc(struct vc5_resource *rsc)
+v3d_resource_bo_alloc(struct v3d_resource *rsc)
{
struct pipe_resource *prsc = &rsc->base;
struct pipe_screen *pscreen = prsc->screen;
- struct vc5_bo *bo;
+ struct v3d_bo *bo;
- bo = vc5_bo_alloc(vc5_screen(pscreen), rsc->size, "resource");
+ bo = v3d_bo_alloc(v3d_screen(pscreen), rsc->size, "resource");
if (bo) {
- vc5_bo_unreference(&rsc->bo);
+ v3d_bo_unreference(&rsc->bo);
rsc->bo = bo;
- vc5_debug_resource_layout(rsc, "alloc");
+ v3d_debug_resource_layout(rsc, "alloc");
return true;
} else {
return false;
}
static void
-vc5_resource_transfer_unmap(struct pipe_context *pctx,
+v3d_resource_transfer_unmap(struct pipe_context *pctx,
struct pipe_transfer *ptrans)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_transfer *trans = vc5_transfer(ptrans);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_transfer *trans = v3d_transfer(ptrans);
if (trans->map) {
- struct vc5_resource *rsc = vc5_resource(ptrans->resource);
- struct vc5_resource_slice *slice = &rsc->slices[ptrans->level];
+ struct v3d_resource *rsc = v3d_resource(ptrans->resource);
+ struct v3d_resource_slice *slice = &rsc->slices[ptrans->level];
if (ptrans->usage & PIPE_TRANSFER_WRITE) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *dst = rsc->bo->map +
- vc5_layer_offset(&rsc->base,
+ v3d_layer_offset(&rsc->base,
ptrans->level,
ptrans->box.z + z);
- vc5_store_tiled_image(dst,
+ v3d_store_tiled_image(dst,
slice->stride,
(trans->map +
ptrans->stride *
}
pipe_resource_reference(&ptrans->resource, NULL);
- slab_free(&vc5->transfer_pool, ptrans);
+ slab_free(&v3d->transfer_pool, ptrans);
}
static void *
-vc5_resource_transfer_map(struct pipe_context *pctx,
+v3d_resource_transfer_map(struct pipe_context *pctx,
struct pipe_resource *prsc,
unsigned level, unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **pptrans)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_transfer *trans;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_transfer *trans;
struct pipe_transfer *ptrans;
enum pipe_format format = prsc->format;
char *buf;
}
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- if (vc5_resource_bo_alloc(rsc)) {
+ if (v3d_resource_bo_alloc(rsc)) {
/* If it might be bound as one of our vertex buffers
* or UBOs, make sure we re-emit vertex buffer state
* or uniforms.
*/
if (prsc->bind & PIPE_BIND_VERTEX_BUFFER)
- vc5->dirty |= VC5_DIRTY_VTXBUF;
+ v3d->dirty |= VC5_DIRTY_VTXBUF;
if (prsc->bind & PIPE_BIND_CONSTANT_BUFFER)
- vc5->dirty |= VC5_DIRTY_CONSTBUF;
+ v3d->dirty |= VC5_DIRTY_CONSTBUF;
} else {
/* If we failed to reallocate, flush users so that we
* don't violate any syncing requirements.
*/
- vc5_flush_jobs_reading_resource(vc5, prsc);
+ v3d_flush_jobs_reading_resource(v3d, prsc);
}
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* If we're writing and the buffer is being used by the CL, we
* to flush if the CL has written our buffer.
*/
if (usage & PIPE_TRANSFER_WRITE)
- vc5_flush_jobs_reading_resource(vc5, prsc);
+ v3d_flush_jobs_reading_resource(v3d, prsc);
else
- vc5_flush_jobs_writing_resource(vc5, prsc);
+ v3d_flush_jobs_writing_resource(v3d, prsc);
}
if (usage & PIPE_TRANSFER_WRITE) {
rsc->initialized_buffers = ~0;
}
- trans = slab_alloc(&vc5->transfer_pool);
+ trans = slab_alloc(&v3d->transfer_pool);
if (!trans)
return NULL;
*/
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
- buf = vc5_bo_map_unsynchronized(rsc->bo);
+ buf = v3d_bo_map_unsynchronized(rsc->bo);
else
- buf = vc5_bo_map(rsc->bo);
+ buf = v3d_bo_map(rsc->bo);
if (!buf) {
fprintf(stderr, "Failed to map bo\n");
goto fail;
ptrans->box.height = DIV_ROUND_UP(ptrans->box.height,
util_format_get_blockheight(format));
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource_slice *slice = &rsc->slices[level];
if (rsc->tiled) {
/* No direct mappings of tiled, since we need to manually
* tile/untile.
if (usage & PIPE_TRANSFER_READ) {
for (int z = 0; z < ptrans->box.depth; z++) {
void *src = rsc->bo->map +
- vc5_layer_offset(&rsc->base,
+ v3d_layer_offset(&rsc->base,
ptrans->level,
ptrans->box.z + z);
- vc5_load_tiled_image((trans->map +
+ v3d_load_tiled_image((trans->map +
ptrans->stride *
ptrans->box.height * z),
ptrans->stride,
fail:
- vc5_resource_transfer_unmap(pctx, ptrans);
+ v3d_resource_transfer_unmap(pctx, ptrans);
return NULL;
}
static void
-vc5_resource_destroy(struct pipe_screen *pscreen,
+v3d_resource_destroy(struct pipe_screen *pscreen,
struct pipe_resource *prsc)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_resource *rsc = v3d_resource(prsc);
- vc5_bo_unreference(&rsc->bo);
+ v3d_bo_unreference(&rsc->bo);
free(rsc);
}
static boolean
-vc5_resource_get_handle(struct pipe_screen *pscreen,
+v3d_resource_get_handle(struct pipe_screen *pscreen,
struct pipe_context *pctx,
struct pipe_resource *prsc,
struct winsys_handle *whandle,
unsigned usage)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_bo *bo = rsc->bo;
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_bo *bo = rsc->bo;
whandle->stride = rsc->slices[0].stride;
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
- return vc5_bo_flink(bo, &whandle->handle);
+ return v3d_bo_flink(bo, &whandle->handle);
case DRM_API_HANDLE_TYPE_KMS:
whandle->handle = bo->handle;
return TRUE;
case DRM_API_HANDLE_TYPE_FD:
- whandle->handle = vc5_bo_get_dmabuf(bo);
+ whandle->handle = v3d_bo_get_dmabuf(bo);
return whandle->handle != -1;
}
* between columns of UIF blocks.
*/
static uint32_t
-vc5_get_ub_pad(struct vc5_resource *rsc, uint32_t height)
+v3d_get_ub_pad(struct v3d_resource *rsc, uint32_t height)
{
- uint32_t utile_h = vc5_utile_height(rsc->cpp);
+ uint32_t utile_h = v3d_utile_height(rsc->cpp);
uint32_t uif_block_h = utile_h * 2;
uint32_t height_ub = height / uif_block_h;
}
static void
-vc5_setup_slices(struct vc5_resource *rsc)
+v3d_setup_slices(struct v3d_resource *rsc)
{
struct pipe_resource *prsc = &rsc->base;
uint32_t width = prsc->width0;
uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
uint32_t offset = 0;
- uint32_t utile_w = vc5_utile_width(rsc->cpp);
- uint32_t utile_h = vc5_utile_height(rsc->cpp);
+ uint32_t utile_w = v3d_utile_width(rsc->cpp);
+ uint32_t utile_h = v3d_utile_height(rsc->cpp);
uint32_t uif_block_w = utile_w * 2;
uint32_t uif_block_h = utile_h * 2;
uint32_t block_width = util_format_get_blockwidth(prsc->format);
bool uif_top = msaa;
for (int i = prsc->last_level; i >= 0; i--) {
- struct vc5_resource_slice *slice = &rsc->slices[i];
+ struct v3d_resource_slice *slice = &rsc->slices[i];
uint32_t level_width, level_height, level_depth;
if (i < 2) {
level_height = align(level_height,
uif_block_h);
- slice->ub_pad = vc5_get_ub_pad(rsc,
+ slice->ub_pad = v3d_get_ub_pad(rsc,
level_height);
level_height += slice->ub_pad * uif_block_h;
}
uint32_t
-vc5_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
+v3d_layer_offset(struct pipe_resource *prsc, uint32_t level, uint32_t layer)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ struct v3d_resource_slice *slice = &rsc->slices[level];
if (prsc->target == PIPE_TEXTURE_3D)
return slice->offset + layer * slice->size;
return slice->offset + layer * rsc->cube_map_stride;
}
-static struct vc5_resource *
-vc5_resource_setup(struct pipe_screen *pscreen,
+static struct v3d_resource *
+v3d_resource_setup(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_resource *rsc = CALLOC_STRUCT(vc5_resource);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = CALLOC_STRUCT(v3d_resource);
if (!rsc)
return NULL;
struct pipe_resource *prsc = &rsc->base;
if (screen->devinfo.ver < 40 && prsc->nr_samples > 1)
rsc->cpp *= prsc->nr_samples;
} else {
- assert(vc5_rt_format_supported(&screen->devinfo, prsc->format));
+ assert(v3d_rt_format_supported(&screen->devinfo, prsc->format));
uint32_t output_image_format =
- vc5_get_rt_format(&screen->devinfo, prsc->format);
+ v3d_get_rt_format(&screen->devinfo, prsc->format);
uint32_t internal_type;
uint32_t internal_bpp;
- vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
+ v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
output_image_format,
&internal_type,
&internal_bpp);
}
static struct pipe_resource *
-vc5_resource_create_with_modifiers(struct pipe_screen *pscreen,
+v3d_resource_create_with_modifiers(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl,
const uint64_t *modifiers,
int count)
{
bool linear_ok = find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count);
- struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
+ struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
struct pipe_resource *prsc = &rsc->base;
/* Use a tiled layout if we can, for better 3D performance. */
bool should_tile = true;
/* Scanout BOs for simulator need to be linear for interaction with
* i965.
*/
- if (using_vc5_simulator &&
+ if (using_v3d_simulator &&
tmpl->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
should_tile = false;
rsc->internal_format = prsc->format;
- vc5_setup_slices(rsc);
- if (!vc5_resource_bo_alloc(rsc))
+ v3d_setup_slices(rsc);
+ if (!v3d_resource_bo_alloc(rsc))
goto fail;
return prsc;
fail:
- vc5_resource_destroy(pscreen, prsc);
+ v3d_resource_destroy(pscreen, prsc);
return NULL;
}
struct pipe_resource *
-vc5_resource_create(struct pipe_screen *pscreen,
+v3d_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
const uint64_t mod = DRM_FORMAT_MOD_INVALID;
- return vc5_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
+ return v3d_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
}
static struct pipe_resource *
-vc5_resource_from_handle(struct pipe_screen *pscreen,
+v3d_resource_from_handle(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl,
struct winsys_handle *whandle,
unsigned usage)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
- struct vc5_resource *rsc = vc5_resource_setup(pscreen, tmpl);
+ struct v3d_screen *screen = v3d_screen(pscreen);
+ struct v3d_resource *rsc = v3d_resource_setup(pscreen, tmpl);
struct pipe_resource *prsc = &rsc->base;
- struct vc5_resource_slice *slice = &rsc->slices[0];
+ struct v3d_resource_slice *slice = &rsc->slices[0];
if (!rsc)
return NULL;
switch (whandle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
- rsc->bo = vc5_bo_open_name(screen,
+ rsc->bo = v3d_bo_open_name(screen,
whandle->handle, whandle->stride);
break;
case DRM_API_HANDLE_TYPE_FD:
- rsc->bo = vc5_bo_open_dmabuf(screen,
+ rsc->bo = v3d_bo_open_dmabuf(screen,
whandle->handle, whandle->stride);
break;
default:
rsc->internal_format = prsc->format;
- vc5_setup_slices(rsc);
- vc5_debug_resource_layout(rsc, "import");
+ v3d_setup_slices(rsc);
+ v3d_debug_resource_layout(rsc, "import");
if (whandle->stride != slice->stride) {
static bool warned = false;
return prsc;
fail:
- vc5_resource_destroy(pscreen, prsc);
+ v3d_resource_destroy(pscreen, prsc);
return NULL;
}
static struct pipe_surface *
-vc5_create_surface(struct pipe_context *pctx,
+v3d_create_surface(struct pipe_context *pctx,
struct pipe_resource *ptex,
const struct pipe_surface *surf_tmpl)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_screen *screen = vc5->screen;
- struct vc5_surface *surface = CALLOC_STRUCT(vc5_surface);
- struct vc5_resource *rsc = vc5_resource(ptex);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_screen *screen = v3d->screen;
+ struct v3d_surface *surface = CALLOC_STRUCT(v3d_surface);
+ struct v3d_resource *rsc = v3d_resource(ptex);
if (!surface)
return NULL;
struct pipe_surface *psurf = &surface->base;
unsigned level = surf_tmpl->u.tex.level;
- struct vc5_resource_slice *slice = &rsc->slices[level];
+ struct v3d_resource_slice *slice = &rsc->slices[level];
pipe_reference_init(&psurf->reference, 1);
pipe_resource_reference(&psurf->texture, ptex);
psurf->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
psurf->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
- surface->offset = vc5_layer_offset(ptex, level,
+ surface->offset = v3d_layer_offset(ptex, level,
psurf->u.tex.first_layer);
surface->tiling = slice->tiling;
- surface->format = vc5_get_rt_format(&screen->devinfo, psurf->format);
+ surface->format = v3d_get_rt_format(&screen->devinfo, psurf->format);
if (util_format_is_depth_or_stencil(psurf->format)) {
switch (psurf->format) {
}
} else {
uint32_t bpp, type;
- vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
+ v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
surface->format,
&type, &bpp);
surface->internal_type = type;
surface->tiling == VC5_TILING_UIF_XOR) {
surface->padded_height_of_output_image_in_uif_blocks =
(slice->padded_height /
- (2 * vc5_utile_height(rsc->cpp)));
+ (2 * v3d_utile_height(rsc->cpp)));
}
if (rsc->separate_stencil) {
surface->separate_stencil =
- vc5_create_surface(pctx, &rsc->separate_stencil->base,
+ v3d_create_surface(pctx, &rsc->separate_stencil->base,
surf_tmpl);
}
}
static void
-vc5_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
+v3d_surface_destroy(struct pipe_context *pctx, struct pipe_surface *psurf)
{
- struct vc5_surface *surf = vc5_surface(psurf);
+ struct v3d_surface *surf = v3d_surface(psurf);
if (surf->separate_stencil)
pipe_surface_reference(&surf->separate_stencil, NULL);
}
static void
-vc5_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
+v3d_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
{
/* All calls to flush_resource are followed by a flush of the context,
* so there's nothing to do.
}
static enum pipe_format
-vc5_resource_get_internal_format(struct pipe_resource *prsc)
+v3d_resource_get_internal_format(struct pipe_resource *prsc)
{
- return vc5_resource(prsc)->internal_format;
+ return v3d_resource(prsc)->internal_format;
}
static void
-vc5_resource_set_stencil(struct pipe_resource *prsc,
+v3d_resource_set_stencil(struct pipe_resource *prsc,
struct pipe_resource *stencil)
{
- vc5_resource(prsc)->separate_stencil = vc5_resource(stencil);
+ v3d_resource(prsc)->separate_stencil = v3d_resource(stencil);
}
static struct pipe_resource *
-vc5_resource_get_stencil(struct pipe_resource *prsc)
+v3d_resource_get_stencil(struct pipe_resource *prsc)
{
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_resource *rsc = v3d_resource(prsc);
return &rsc->separate_stencil->base;
}
static const struct u_transfer_vtbl transfer_vtbl = {
- .resource_create = vc5_resource_create,
- .resource_destroy = vc5_resource_destroy,
- .transfer_map = vc5_resource_transfer_map,
- .transfer_unmap = vc5_resource_transfer_unmap,
+ .resource_create = v3d_resource_create,
+ .resource_destroy = v3d_resource_destroy,
+ .transfer_map = v3d_resource_transfer_map,
+ .transfer_unmap = v3d_resource_transfer_unmap,
.transfer_flush_region = u_default_transfer_flush_region,
- .get_internal_format = vc5_resource_get_internal_format,
- .set_stencil = vc5_resource_set_stencil,
- .get_stencil = vc5_resource_get_stencil,
+ .get_internal_format = v3d_resource_get_internal_format,
+ .set_stencil = v3d_resource_set_stencil,
+ .get_stencil = v3d_resource_get_stencil,
};
void
-vc5_resource_screen_init(struct pipe_screen *pscreen)
+v3d_resource_screen_init(struct pipe_screen *pscreen)
{
pscreen->resource_create_with_modifiers =
- vc5_resource_create_with_modifiers;
+ v3d_resource_create_with_modifiers;
pscreen->resource_create = u_transfer_helper_resource_create;
- pscreen->resource_from_handle = vc5_resource_from_handle;
- pscreen->resource_get_handle = vc5_resource_get_handle;
+ pscreen->resource_from_handle = v3d_resource_from_handle;
+ pscreen->resource_get_handle = v3d_resource_get_handle;
pscreen->resource_destroy = u_transfer_helper_resource_destroy;
pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl,
true, true, true);
}
void
-vc5_resource_context_init(struct pipe_context *pctx)
+v3d_resource_context_init(struct pipe_context *pctx)
{
pctx->transfer_map = u_transfer_helper_transfer_map;
pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
pctx->transfer_unmap = u_transfer_helper_transfer_unmap;
pctx->buffer_subdata = u_default_buffer_subdata;
pctx->texture_subdata = u_default_texture_subdata;
- pctx->create_surface = vc5_create_surface;
- pctx->surface_destroy = vc5_surface_destroy;
+ pctx->create_surface = v3d_create_surface;
+ pctx->surface_destroy = v3d_surface_destroy;
pctx->resource_copy_region = util_resource_copy_region;
- pctx->blit = vc5_blit;
- pctx->flush_resource = vc5_flush_resource;
+ pctx->blit = v3d_blit;
+ pctx->flush_resource = v3d_flush_resource;
}
*/
/**
- * Tiling mode enum used for vc5_resource.c, which maps directly to the Memory
+ * Tiling mode enum used for v3d_resource.c, which maps directly to the Memory
* Format field of render target and Z/Stencil config.
*/
-enum vc5_tiling_mode {
+enum v3d_tiling_mode {
/* Untiled resources. Not valid as texture inputs. */
VC5_TILING_RASTER,
VC5_TILING_UIF_XOR,
};
-struct vc5_transfer {
+struct v3d_transfer {
struct pipe_transfer base;
void *map;
};
-struct vc5_resource_slice {
+struct v3d_resource_slice {
uint32_t offset;
uint32_t stride;
uint32_t padded_height;
*/
uint32_t size;
uint8_t ub_pad;
- enum vc5_tiling_mode tiling;
+ enum v3d_tiling_mode tiling;
};
-struct vc5_surface {
+struct v3d_surface {
struct pipe_surface base;
uint32_t offset;
- enum vc5_tiling_mode tiling;
+ enum v3d_tiling_mode tiling;
/**
* Output image format for TILE_RENDERING_MODE_CONFIGURATION
*/
struct pipe_surface *separate_stencil;
};
-struct vc5_resource {
+struct v3d_resource {
struct pipe_resource base;
- struct vc5_bo *bo;
- struct vc5_resource_slice slices[VC5_MAX_MIP_LEVELS];
+ struct v3d_bo *bo;
+ struct v3d_resource_slice slices[VC5_MAX_MIP_LEVELS];
uint32_t cube_map_stride;
uint32_t size;
int cpp;
enum pipe_format internal_format;
/* Resource storing the S8 part of a Z32F_S8 resource, or NULL. */
- struct vc5_resource *separate_stencil;
+ struct v3d_resource *separate_stencil;
};
-static inline struct vc5_resource *
-vc5_resource(struct pipe_resource *prsc)
+static inline struct v3d_resource *
+v3d_resource(struct pipe_resource *prsc)
{
- return (struct vc5_resource *)prsc;
+ return (struct v3d_resource *)prsc;
}
-static inline struct vc5_surface *
-vc5_surface(struct pipe_surface *psurf)
+static inline struct v3d_surface *
+v3d_surface(struct pipe_surface *psurf)
{
- return (struct vc5_surface *)psurf;
+ return (struct v3d_surface *)psurf;
}
-static inline struct vc5_transfer *
-vc5_transfer(struct pipe_transfer *ptrans)
+static inline struct v3d_transfer *
+v3d_transfer(struct pipe_transfer *ptrans)
{
- return (struct vc5_transfer *)ptrans;
+ return (struct v3d_transfer *)ptrans;
}
-void vc5_resource_screen_init(struct pipe_screen *pscreen);
-void vc5_resource_context_init(struct pipe_context *pctx);
-struct pipe_resource *vc5_resource_create(struct pipe_screen *pscreen,
+void v3d_resource_screen_init(struct pipe_screen *pscreen);
+void v3d_resource_context_init(struct pipe_context *pctx);
+struct pipe_resource *v3d_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl);
-uint32_t vc5_layer_offset(struct pipe_resource *prsc, uint32_t level,
+uint32_t v3d_layer_offset(struct pipe_resource *prsc, uint32_t level,
uint32_t layer);
#include "compiler/v3d_compiler.h"
static const char *
-vc5_screen_get_name(struct pipe_screen *pscreen)
+v3d_screen_get_name(struct pipe_screen *pscreen)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
+ struct v3d_screen *screen = v3d_screen(pscreen);
if (!screen->name) {
screen->name = ralloc_asprintf(screen,
}
static const char *
-vc5_screen_get_vendor(struct pipe_screen *pscreen)
+v3d_screen_get_vendor(struct pipe_screen *pscreen)
{
return "Broadcom";
}
static void
-vc5_screen_destroy(struct pipe_screen *pscreen)
+v3d_screen_destroy(struct pipe_screen *pscreen)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
+ struct v3d_screen *screen = v3d_screen(pscreen);
util_hash_table_destroy(screen->bo_handles);
- vc5_bufmgr_destroy(pscreen);
+ v3d_bufmgr_destroy(pscreen);
slab_destroy_parent(&screen->transfer_pool);
- if (using_vc5_simulator)
- vc5_simulator_destroy(screen);
+ if (using_v3d_simulator)
+ v3d_simulator_destroy(screen);
v3d_compiler_free(screen->compiler);
}
static int
-vc5_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+v3d_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
+ struct v3d_screen *screen = v3d_screen(pscreen);
switch (param) {
/* Supported features (boolean caps). */
}
static float
-vc5_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+v3d_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
{
switch (param) {
case PIPE_CAPF_MAX_LINE_WIDTH:
}
static int
-vc5_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
+v3d_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
enum pipe_shader_cap param)
{
if (shader != PIPE_SHADER_VERTEX &&
}
static boolean
-vc5_screen_is_format_supported(struct pipe_screen *pscreen,
+v3d_screen_is_format_supported(struct pipe_screen *pscreen,
enum pipe_format format,
enum pipe_texture_target target,
unsigned sample_count,
unsigned usage)
{
- struct vc5_screen *screen = vc5_screen(pscreen);
+ struct v3d_screen *screen = v3d_screen(pscreen);
if (sample_count > 1 && sample_count != VC5_MAX_SAMPLES)
return FALSE;
}
if ((usage & PIPE_BIND_RENDER_TARGET) &&
- !vc5_rt_format_supported(&screen->devinfo, format)) {
+ !v3d_rt_format_supported(&screen->devinfo, format)) {
return FALSE;
}
if ((usage & PIPE_BIND_SAMPLER_VIEW) &&
- !vc5_tex_format_supported(&screen->devinfo, format)) {
+ !v3d_tex_format_supported(&screen->devinfo, format)) {
return FALSE;
}
}
static bool
-vc5_get_device_info(struct vc5_screen *screen)
+v3d_get_device_info(struct v3d_screen *screen)
{
struct drm_v3d_get_param ident0 = {
.param = DRM_V3D_PARAM_V3D_CORE0_IDENT0,
};
int ret;
- ret = vc5_ioctl(screen->fd, DRM_IOCTL_V3D_GET_PARAM, &ident0);
+ ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_GET_PARAM, &ident0);
if (ret != 0) {
fprintf(stderr, "Couldn't get V3D core IDENT0: %s\n",
strerror(errno));
return false;
}
- ret = vc5_ioctl(screen->fd, DRM_IOCTL_V3D_GET_PARAM, &ident1);
+ ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_GET_PARAM, &ident1);
if (ret != 0) {
fprintf(stderr, "Couldn't get V3D core IDENT1: %s\n",
strerror(errno));
}
static const void *
-vc5_screen_get_compiler_options(struct pipe_screen *pscreen,
+v3d_screen_get_compiler_options(struct pipe_screen *pscreen,
enum pipe_shader_ir ir, unsigned shader)
{
return &v3d_nir_options;
struct pipe_screen *
v3d_screen_create(int fd)
{
- struct vc5_screen *screen = rzalloc(NULL, struct vc5_screen);
+ struct v3d_screen *screen = rzalloc(NULL, struct v3d_screen);
struct pipe_screen *pscreen;
pscreen = &screen->base;
- pscreen->destroy = vc5_screen_destroy;
- pscreen->get_param = vc5_screen_get_param;
- pscreen->get_paramf = vc5_screen_get_paramf;
- pscreen->get_shader_param = vc5_screen_get_shader_param;
- pscreen->context_create = vc5_context_create;
- pscreen->is_format_supported = vc5_screen_is_format_supported;
+ pscreen->destroy = v3d_screen_destroy;
+ pscreen->get_param = v3d_screen_get_param;
+ pscreen->get_paramf = v3d_screen_get_paramf;
+ pscreen->get_shader_param = v3d_screen_get_shader_param;
+ pscreen->context_create = v3d_context_create;
+ pscreen->is_format_supported = v3d_screen_is_format_supported;
screen->fd = fd;
list_inithead(&screen->bo_cache.time_list);
screen->bo_handles = util_hash_table_create(handle_hash, handle_compare);
#if defined(USE_V3D_SIMULATOR)
- vc5_simulator_init(screen);
+ v3d_simulator_init(screen);
#endif
- if (!vc5_get_device_info(screen))
+ if (!v3d_get_device_info(screen))
goto fail;
- slab_create_parent(&screen->transfer_pool, sizeof(struct vc5_transfer), 16);
+ slab_create_parent(&screen->transfer_pool, sizeof(struct v3d_transfer), 16);
- vc5_fence_init(screen);
+ v3d_fence_init(screen);
v3d_process_debug_variable();
- vc5_resource_screen_init(pscreen);
+ v3d_resource_screen_init(pscreen);
screen->compiler = v3d_compiler_init(&screen->devinfo);
- pscreen->get_name = vc5_screen_get_name;
- pscreen->get_vendor = vc5_screen_get_vendor;
- pscreen->get_device_vendor = vc5_screen_get_vendor;
- pscreen->get_compiler_options = vc5_screen_get_compiler_options;
+ pscreen->get_name = v3d_screen_get_name;
+ pscreen->get_vendor = v3d_screen_get_vendor;
+ pscreen->get_device_vendor = v3d_screen_get_vendor;
+ pscreen->get_compiler_options = v3d_screen_get_compiler_options;
return pscreen;
#include "broadcom/common/v3d_debug.h"
#include "broadcom/common/v3d_device_info.h"
-struct vc5_bo;
+struct v3d_bo;
#define VC5_MAX_MIP_LEVELS 12
#define VC5_MAX_TEXTURE_SAMPLERS 32
#define VC5_UIFBLOCK_SIZE (4 * VC5_UBLOCK_SIZE)
#define VC5_UIFBLOCK_ROW_SIZE (4 * VC5_UIFBLOCK_SIZE)
-struct vc5_simulator_file;
+struct v3d_simulator_file;
-struct vc5_screen {
+struct v3d_screen {
struct pipe_screen base;
int fd;
struct slab_parent_pool transfer_pool;
- struct vc5_bo_cache {
- /** List of struct vc5_bo freed, by age. */
+ struct v3d_bo_cache {
+ /** List of struct v3d_bo freed, by age. */
struct list_head time_list;
- /** List of struct vc5_bo freed, per size, by age. */
+ /** List of struct v3d_bo freed, per size, by age. */
struct list_head *size_list;
uint32_t size_list_size;
uint32_t bo_size;
uint32_t bo_count;
- struct vc5_simulator_file *sim_file;
+ struct v3d_simulator_file *sim_file;
};
-static inline struct vc5_screen *
-vc5_screen(struct pipe_screen *screen)
+static inline struct v3d_screen *
+v3d_screen(struct pipe_screen *screen)
{
- return (struct vc5_screen *)screen;
+ return (struct v3d_screen *)screen;
}
struct pipe_screen *v3d_screen_create(int fd);
void
-vc5_fence_init(struct vc5_screen *screen);
+v3d_fence_init(struct v3d_screen *screen);
#endif /* VC5_SCREEN_H */
*/
/**
- * @file vc5_simulator.c
+ * @file v3d_simulator.c
*
* Implements VC5 simulation on top of a non-VC5 GEM fd.
*
#include "v3d_context.h"
/** Global (across GEM fds) state for the simulator */
-static struct vc5_simulator_state {
+static struct v3d_simulator_state {
mtx_t mutex;
struct v3d_hw *v3d;
struct mem_block *heap;
struct mem_block *overflow;
- /** Mapping from GEM handle to struct vc5_simulator_bo * */
+ /** Mapping from GEM handle to struct v3d_simulator_bo * */
struct hash_table *fd_map;
int refcount;
};
/** Per-GEM-fd state for the simulator. */
-struct vc5_simulator_file {
+struct v3d_simulator_file {
int fd;
- /** Mapping from GEM handle to struct vc5_simulator_bo * */
+ /** Mapping from GEM handle to struct v3d_simulator_bo * */
struct hash_table *bo_map;
struct mem_block *gmp;
void *gmp_vaddr;
};
-/** Wrapper for drm_vc5_bo tracking the simulator-specific state. */
-struct vc5_simulator_bo {
- struct vc5_simulator_file *file;
+/** Wrapper for drm_v3d_bo tracking the simulator-specific state. */
+struct v3d_simulator_bo {
+ struct v3d_simulator_file *file;
/** Area for this BO within sim_state->mem */
struct mem_block *block;
return (void *)(uintptr_t)key;
}
-static struct vc5_simulator_file *
-vc5_get_simulator_file_for_fd(int fd)
+static struct v3d_simulator_file *
+v3d_get_simulator_file_for_fd(int fd)
{
struct hash_entry *entry = _mesa_hash_table_search(sim_state.fd_map,
int_to_key(fd + 1));
* permissions (bit 0 = read, bit 1 = write, write-only forbidden).
*/
static void
-set_gmp_flags(struct vc5_simulator_file *file,
+set_gmp_flags(struct v3d_simulator_file *file,
uint32_t offset, uint32_t size, uint32_t flag)
{
assert((offset & ((1 << GMP_ALIGN2) - 1)) == 0);
* Allocates space in simulator memory and returns a tracking struct for it
* that also contains the drm_gem_cma_object struct.
*/
-static struct vc5_simulator_bo *
-vc5_create_simulator_bo(int fd, int handle, unsigned size)
+static struct v3d_simulator_bo *
+v3d_create_simulator_bo(int fd, int handle, unsigned size)
{
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
- struct vc5_simulator_bo *sim_bo = rzalloc(file,
- struct vc5_simulator_bo);
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
+ struct v3d_simulator_bo *sim_bo = rzalloc(file,
+ struct v3d_simulator_bo);
size = align(size, 4096);
sim_bo->file = file;
*(uint32_t *)(sim_bo->vaddr + sim_bo->size) = BO_SENTINEL;
- /* A handle of 0 is used for vc5_gem.c internal allocations that
+ /* A handle of 0 is used for v3d_gem.c internal allocations that
* don't need to go in the lookup table.
*/
if (handle != 0) {
}
static void
-vc5_free_simulator_bo(struct vc5_simulator_bo *sim_bo)
+v3d_free_simulator_bo(struct v3d_simulator_bo *sim_bo)
{
- struct vc5_simulator_file *sim_file = sim_bo->file;
+ struct v3d_simulator_file *sim_file = sim_bo->file;
if (sim_bo->winsys_map)
munmap(sim_bo->winsys_map, sim_bo->size);
ralloc_free(sim_bo);
}
-static struct vc5_simulator_bo *
-vc5_get_simulator_bo(struct vc5_simulator_file *file, int gem_handle)
+static struct v3d_simulator_bo *
+v3d_get_simulator_bo(struct v3d_simulator_file *file, int gem_handle)
{
mtx_lock(&sim_state.mutex);
struct hash_entry *entry =
}
static int
-vc5_simulator_pin_bos(int fd, struct vc5_job *job)
+v3d_simulator_pin_bos(int fd, struct v3d_job *job)
{
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
struct set_entry *entry;
set_foreach(job->bos, entry) {
- struct vc5_bo *bo = (struct vc5_bo *)entry->key;
- struct vc5_simulator_bo *sim_bo =
- vc5_get_simulator_bo(file, bo->handle);
+ struct v3d_bo *bo = (struct v3d_bo *)entry->key;
+ struct v3d_simulator_bo *sim_bo =
+ v3d_get_simulator_bo(file, bo->handle);
- vc5_bo_map(bo);
+ v3d_bo_map(bo);
memcpy(sim_bo->vaddr, bo->map, bo->size);
}
}
static int
-vc5_simulator_unpin_bos(int fd, struct vc5_job *job)
+v3d_simulator_unpin_bos(int fd, struct v3d_job *job)
{
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
struct set_entry *entry;
set_foreach(job->bos, entry) {
- struct vc5_bo *bo = (struct vc5_bo *)entry->key;
- struct vc5_simulator_bo *sim_bo =
- vc5_get_simulator_bo(file, bo->handle);
+ struct v3d_bo *bo = (struct v3d_bo *)entry->key;
+ struct v3d_simulator_bo *sim_bo =
+ v3d_get_simulator_bo(file, bo->handle);
if (*(uint32_t *)(sim_bo->vaddr +
sim_bo->size) != BO_SENTINEL) {
fprintf(stderr, "Buffer overflow in %s\n", bo->name);
}
- vc5_bo_map(bo);
+ v3d_bo_map(bo);
memcpy(bo->map, sim_bo->vaddr, bo->size);
}
#if 0
static void
-vc5_dump_to_file(struct vc5_exec_info *exec)
+v3d_dump_to_file(struct v3d_exec_info *exec)
{
static int dumpno = 0;
- struct drm_vc5_get_hang_state *state;
- struct drm_vc5_get_hang_state_bo *bo_state;
+ struct drm_v3d_get_hang_state *state;
+ struct drm_v3d_get_hang_state_bo *bo_state;
unsigned int dump_version = 0;
- if (!(vc5_debug & VC5_DEBUG_DUMP))
+ if (!(v3d_debug & VC5_DEBUG_DUMP))
return;
state = calloc(1, sizeof(*state));
int unref_count = 0;
- list_for_each_entry_safe(struct drm_vc5_bo, bo, &exec->unref_list,
+ list_for_each_entry_safe(struct drm_v3d_bo, bo, &exec->unref_list,
unref_head) {
unref_count++;
}
bo_state = calloc(state->bo_count, sizeof(*bo_state));
char *filename = NULL;
- asprintf(&filename, "vc5-dri-%d.dump", dumpno++);
+ asprintf(&filename, "v3d-dri-%d.dump", dumpno++);
FILE *f = fopen(filename, "w+");
if (!f) {
fprintf(stderr, "Couldn't open %s: %s", filename,
bo_state[i].size = cma_bo->base.size;
}
- list_for_each_entry_safe(struct drm_vc5_bo, bo, &exec->unref_list,
+ list_for_each_entry_safe(struct drm_v3d_bo, bo, &exec->unref_list,
unref_head) {
struct drm_gem_cma_object *cma_bo = &bo->base;
bo_state[i].handle = 0;
fwrite(cma_bo->vaddr, cma_bo->base.size, 1, f);
}
- list_for_each_entry_safe(struct drm_vc5_bo, bo, &exec->unref_list,
+ list_for_each_entry_safe(struct drm_v3d_bo, bo, &exec->unref_list,
unref_head) {
struct drm_gem_cma_object *cma_bo = &bo->base;
fwrite(cma_bo->vaddr, cma_bo->base.size, 1, f);
#endif
int
-vc5_simulator_flush(struct vc5_context *vc5,
- struct drm_v3d_submit_cl *submit, struct vc5_job *job)
+v3d_simulator_flush(struct v3d_context *v3d,
+ struct drm_v3d_submit_cl *submit, struct v3d_job *job)
{
- struct vc5_screen *screen = vc5->screen;
+ struct v3d_screen *screen = v3d->screen;
int fd = screen->fd;
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
- struct vc5_surface *csurf = vc5_surface(vc5->framebuffer.cbufs[0]);
- struct vc5_resource *ctex = csurf ? vc5_resource(csurf->base.texture) : NULL;
- struct vc5_simulator_bo *csim_bo = ctex ? vc5_get_simulator_bo(file, ctex->bo->handle) : NULL;
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
+ struct v3d_surface *csurf = v3d_surface(v3d->framebuffer.cbufs[0]);
+ struct v3d_resource *ctex = csurf ? v3d_resource(csurf->base.texture) : NULL;
+ struct v3d_simulator_bo *csim_bo = ctex ? v3d_get_simulator_bo(file, ctex->bo->handle) : NULL;
uint32_t winsys_stride = ctex ? csim_bo->winsys_stride : 0;
uint32_t sim_stride = ctex ? ctex->slices[0].stride : 0;
uint32_t row_len = MIN2(sim_stride, winsys_stride);
}
}
- ret = vc5_simulator_pin_bos(fd, job);
+ ret = v3d_simulator_pin_bos(fd, job);
if (ret)
return ret;
- //vc5_dump_to_file(&exec);
+ //v3d_dump_to_file(&exec);
if (sim_state.ver >= 41)
v3d41_simulator_flush(sim_state.v3d, submit, file->gmp->ofs);
else
v3d33_simulator_flush(sim_state.v3d, submit, file->gmp->ofs);
- ret = vc5_simulator_unpin_bos(fd, job);
+ ret = v3d_simulator_unpin_bos(fd, job);
if (ret)
return ret;
* Map the underlying GEM object from the real hardware GEM handle.
*/
static void *
-vc5_simulator_map_winsys_bo(int fd, struct vc5_simulator_bo *sim_bo)
+v3d_simulator_map_winsys_bo(int fd, struct v3d_simulator_bo *sim_bo)
{
int ret;
void *map;
* time, but we're still using drmPrimeFDToHandle() so we have this helper to
* be called afterward instead.
*/
-void vc5_simulator_open_from_handle(int fd, uint32_t winsys_stride,
+void v3d_simulator_open_from_handle(int fd, uint32_t winsys_stride,
int handle, uint32_t size)
{
- struct vc5_simulator_bo *sim_bo =
- vc5_create_simulator_bo(fd, handle, size);
+ struct v3d_simulator_bo *sim_bo =
+ v3d_create_simulator_bo(fd, handle, size);
sim_bo->winsys_stride = winsys_stride;
- sim_bo->winsys_map = vc5_simulator_map_winsys_bo(fd, sim_bo);
+ sim_bo->winsys_map = v3d_simulator_map_winsys_bo(fd, sim_bo);
}
/**
* Making a VC5 BO is just a matter of making a corresponding BO on the host.
*/
static int
-vc5_simulator_create_bo_ioctl(int fd, struct drm_v3d_create_bo *args)
+v3d_simulator_create_bo_ioctl(int fd, struct drm_v3d_create_bo *args)
{
int ret;
struct drm_mode_create_dumb create = {
args->handle = create.handle;
- struct vc5_simulator_bo *sim_bo =
- vc5_create_simulator_bo(fd, create.handle, args->size);
+ struct v3d_simulator_bo *sim_bo =
+ v3d_create_simulator_bo(fd, create.handle, args->size);
args->offset = sim_bo->block->ofs;
* We just pass this straight through to dumb mmap.
*/
static int
-vc5_simulator_mmap_bo_ioctl(int fd, struct drm_v3d_mmap_bo *args)
+v3d_simulator_mmap_bo_ioctl(int fd, struct drm_v3d_mmap_bo *args)
{
int ret;
struct drm_mode_map_dumb map = {
}
static int
-vc5_simulator_get_bo_offset_ioctl(int fd, struct drm_v3d_get_bo_offset *args)
+v3d_simulator_get_bo_offset_ioctl(int fd, struct drm_v3d_get_bo_offset *args)
{
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
- struct vc5_simulator_bo *sim_bo = vc5_get_simulator_bo(file,
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
+ struct v3d_simulator_bo *sim_bo = v3d_get_simulator_bo(file,
args->handle);
args->offset = sim_bo->block->ofs;
}
static int
-vc5_simulator_gem_close_ioctl(int fd, struct drm_gem_close *args)
+v3d_simulator_gem_close_ioctl(int fd, struct drm_gem_close *args)
{
/* Free the simulator's internal tracking. */
- struct vc5_simulator_file *file = vc5_get_simulator_file_for_fd(fd);
- struct vc5_simulator_bo *sim_bo = vc5_get_simulator_bo(file,
+ struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
+ struct v3d_simulator_bo *sim_bo = v3d_get_simulator_bo(file,
args->handle);
- vc5_free_simulator_bo(sim_bo);
+ v3d_free_simulator_bo(sim_bo);
/* Pass the call on down. */
return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, args);
}
static int
-vc5_simulator_get_param_ioctl(int fd, struct drm_v3d_get_param *args)
+v3d_simulator_get_param_ioctl(int fd, struct drm_v3d_get_param *args)
{
if (sim_state.ver >= 41)
return v3d41_simulator_get_param_ioctl(sim_state.v3d, args);
}
int
-vc5_simulator_ioctl(int fd, unsigned long request, void *args)
+v3d_simulator_ioctl(int fd, unsigned long request, void *args)
{
switch (request) {
case DRM_IOCTL_V3D_CREATE_BO:
- return vc5_simulator_create_bo_ioctl(fd, args);
+ return v3d_simulator_create_bo_ioctl(fd, args);
case DRM_IOCTL_V3D_MMAP_BO:
- return vc5_simulator_mmap_bo_ioctl(fd, args);
+ return v3d_simulator_mmap_bo_ioctl(fd, args);
case DRM_IOCTL_V3D_GET_BO_OFFSET:
- return vc5_simulator_get_bo_offset_ioctl(fd, args);
+ return v3d_simulator_get_bo_offset_ioctl(fd, args);
case DRM_IOCTL_V3D_WAIT_BO:
- /* We do all of the vc5 rendering synchronously, so we just
+ /* We do all of the v3d rendering synchronously, so we just
* return immediately on the wait ioctls. This ignores any
* native rendering to the host BO, so it does mean we race on
* front buffer rendering.
return 0;
case DRM_IOCTL_V3D_GET_PARAM:
- return vc5_simulator_get_param_ioctl(fd, args);
+ return v3d_simulator_get_param_ioctl(fd, args);
case DRM_IOCTL_GEM_CLOSE:
- return vc5_simulator_gem_close_ioctl(fd, args);
+ return v3d_simulator_gem_close_ioctl(fd, args);
case DRM_IOCTL_GEM_OPEN:
case DRM_IOCTL_GEM_FLINK:
}
static void
-vc5_simulator_init_global(const struct v3d_device_info *devinfo)
+v3d_simulator_init_global(const struct v3d_device_info *devinfo)
{
mtx_lock(&sim_state.mutex);
if (sim_state.refcount++) {
}
void
-vc5_simulator_init(struct vc5_screen *screen)
+v3d_simulator_init(struct v3d_screen *screen)
{
- vc5_simulator_init_global(&screen->devinfo);
+ v3d_simulator_init_global(&screen->devinfo);
- screen->sim_file = rzalloc(screen, struct vc5_simulator_file);
- struct vc5_simulator_file *sim_file = screen->sim_file;
+ screen->sim_file = rzalloc(screen, struct v3d_simulator_file);
+ struct v3d_simulator_file *sim_file = screen->sim_file;
screen->sim_file->bo_map =
_mesa_hash_table_create(screen->sim_file,
}
void
-vc5_simulator_destroy(struct vc5_screen *screen)
+v3d_simulator_destroy(struct v3d_screen *screen)
{
mtx_lock(&sim_state.mutex);
if (!--sim_state.refcount) {
* IN THE SOFTWARE.
*/
-/** @file vc5_tiling.c
+/** @file v3d_tiling.c
*
* Handles information about the VC5 tiling formats, and loading and storing
* from them.
/** Return the width in pixels of a 64-byte microtile. */
uint32_t
-vc5_utile_width(int cpp)
+v3d_utile_width(int cpp)
{
switch (cpp) {
case 1:
/** Return the height in pixels of a 64-byte microtile. */
uint32_t
-vc5_utile_height(int cpp)
+v3d_utile_height(int cpp)
{
switch (cpp) {
case 1:
* arrangement.
*/
static inline uint32_t
-vc5_get_utile_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y)
+v3d_get_utile_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y)
{
- uint32_t utile_w = vc5_utile_width(cpp);
- uint32_t utile_h = vc5_utile_height(cpp);
+ uint32_t utile_w = v3d_utile_width(cpp);
+ uint32_t utile_h = v3d_utile_height(cpp);
assert(x < utile_w && y < utile_h);
* LINEARTILE is a single line of utiles in either the X or Y direction.
*/
static inline uint32_t
-vc5_get_lt_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y)
+v3d_get_lt_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y)
{
- uint32_t utile_w = vc5_utile_width(cpp);
- uint32_t utile_h = vc5_utile_height(cpp);
+ uint32_t utile_w = v3d_utile_width(cpp);
+ uint32_t utile_h = v3d_utile_height(cpp);
uint32_t utile_index_x = x / utile_w;
uint32_t utile_index_y = y / utile_h;
assert(utile_index_x == 0 || utile_index_y == 0);
return (64 * (utile_index_x + utile_index_y) +
- vc5_get_utile_pixel_offset(cpp,
+ v3d_get_utile_pixel_offset(cpp,
x & (utile_w - 1),
y & (utile_h - 1)));
}
* utiles), and the UIF blocks are in 1 or 2 columns in raster order.
*/
static inline uint32_t
-vc5_get_ublinear_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y,
+v3d_get_ublinear_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y,
int ublinear_number)
{
- uint32_t utile_w = vc5_utile_width(cpp);
- uint32_t utile_h = vc5_utile_height(cpp);
+ uint32_t utile_w = v3d_utile_width(cpp);
+ uint32_t utile_h = v3d_utile_height(cpp);
uint32_t ub_w = utile_w * 2;
uint32_t ub_h = utile_h * 2;
uint32_t ub_x = x / ub_w;
ub_x) +
((x & utile_w) ? 64 : 0) +
((y & utile_h) ? 128 : 0) +
- + vc5_get_utile_pixel_offset(cpp,
+ + v3d_get_utile_pixel_offset(cpp,
x & (utile_w - 1),
y & (utile_h - 1)));
}
static inline uint32_t
-vc5_get_ublinear_2_column_pixel_offset(uint32_t cpp, uint32_t image_h,
+v3d_get_ublinear_2_column_pixel_offset(uint32_t cpp, uint32_t image_h,
uint32_t x, uint32_t y)
{
- return vc5_get_ublinear_pixel_offset(cpp, x, y, 2);
+ return v3d_get_ublinear_pixel_offset(cpp, x, y, 2);
}
static inline uint32_t
-vc5_get_ublinear_1_column_pixel_offset(uint32_t cpp, uint32_t image_h,
+v3d_get_ublinear_1_column_pixel_offset(uint32_t cpp, uint32_t image_h,
uint32_t x, uint32_t y)
{
- return vc5_get_ublinear_pixel_offset(cpp, x, y, 1);
+ return v3d_get_ublinear_pixel_offset(cpp, x, y, 1);
}
/**
* 4x4 groups, and those 4x4 groups are then stored in raster order.
*/
static inline uint32_t
-vc5_get_uif_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y,
+v3d_get_uif_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y,
bool do_xor)
{
- uint32_t utile_w = vc5_utile_width(cpp);
- uint32_t utile_h = vc5_utile_height(cpp);
+ uint32_t utile_w = v3d_utile_width(cpp);
+ uint32_t utile_h = v3d_utile_height(cpp);
uint32_t mb_width = utile_w * 2;
uint32_t mb_height = utile_h * 2;
uint32_t log2_mb_width = ffs(mb_width) - 1;
uint32_t mb_pixel_address = (mb_base_addr +
mb_tile_offset +
- vc5_get_utile_pixel_offset(cpp,
+ v3d_get_utile_pixel_offset(cpp,
utile_x,
utile_y));
}
static inline uint32_t
-vc5_get_uif_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
+v3d_get_uif_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
uint32_t x, uint32_t y)
{
- return vc5_get_uif_pixel_offset(cpp, image_h, x, y, true);
+ return v3d_get_uif_pixel_offset(cpp, image_h, x, y, true);
}
static inline uint32_t
-vc5_get_uif_no_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
+v3d_get_uif_no_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
uint32_t x, uint32_t y)
{
- return vc5_get_uif_pixel_offset(cpp, image_h, x, y, false);
+ return v3d_get_uif_pixel_offset(cpp, image_h, x, y, false);
}
static inline void
-vc5_move_pixels_general_percpp(void *gpu, uint32_t gpu_stride,
+v3d_move_pixels_general_percpp(void *gpu, uint32_t gpu_stride,
void *cpu, uint32_t cpu_stride,
int cpp, uint32_t image_h,
const struct pipe_box *box,
}
static inline void
-vc5_move_pixels_general(void *gpu, uint32_t gpu_stride,
+v3d_move_pixels_general(void *gpu, uint32_t gpu_stride,
void *cpu, uint32_t cpu_stride,
int cpp, uint32_t image_h,
const struct pipe_box *box,
{
switch (cpp) {
case 1:
- vc5_move_pixels_general_percpp(gpu, gpu_stride,
+ v3d_move_pixels_general_percpp(gpu, gpu_stride,
cpu, cpu_stride,
1, image_h, box,
get_pixel_offset,
is_load);
break;
case 2:
- vc5_move_pixels_general_percpp(gpu, gpu_stride,
+ v3d_move_pixels_general_percpp(gpu, gpu_stride,
cpu, cpu_stride,
2, image_h, box,
get_pixel_offset,
is_load);
break;
case 4:
- vc5_move_pixels_general_percpp(gpu, gpu_stride,
+ v3d_move_pixels_general_percpp(gpu, gpu_stride,
cpu, cpu_stride,
4, image_h, box,
get_pixel_offset,
is_load);
break;
case 8:
- vc5_move_pixels_general_percpp(gpu, gpu_stride,
+ v3d_move_pixels_general_percpp(gpu, gpu_stride,
cpu, cpu_stride,
8, image_h, box,
get_pixel_offset,
is_load);
break;
case 16:
- vc5_move_pixels_general_percpp(gpu, gpu_stride,
+ v3d_move_pixels_general_percpp(gpu, gpu_stride,
cpu, cpu_stride,
16, image_h, box,
get_pixel_offset,
}
static inline void
-vc5_move_tiled_image(void *gpu, uint32_t gpu_stride,
+v3d_move_tiled_image(void *gpu, uint32_t gpu_stride,
void *cpu, uint32_t cpu_stride,
- enum vc5_tiling_mode tiling_format,
+ enum v3d_tiling_mode tiling_format,
int cpp,
uint32_t image_h,
const struct pipe_box *box,
{
switch (tiling_format) {
case VC5_TILING_UIF_XOR:
- vc5_move_pixels_general(gpu, gpu_stride,
+ v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
- vc5_get_uif_xor_pixel_offset,
+ v3d_get_uif_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UIF_NO_XOR:
- vc5_move_pixels_general(gpu, gpu_stride,
+ v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
- vc5_get_uif_no_xor_pixel_offset,
+ v3d_get_uif_no_xor_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_2_COLUMN:
- vc5_move_pixels_general(gpu, gpu_stride,
+ v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
- vc5_get_ublinear_2_column_pixel_offset,
+ v3d_get_ublinear_2_column_pixel_offset,
is_load);
break;
case VC5_TILING_UBLINEAR_1_COLUMN:
- vc5_move_pixels_general(gpu, gpu_stride,
+ v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
- vc5_get_ublinear_1_column_pixel_offset,
+ v3d_get_ublinear_1_column_pixel_offset,
is_load);
break;
case VC5_TILING_LINEARTILE:
- vc5_move_pixels_general(gpu, gpu_stride,
+ v3d_move_pixels_general(gpu, gpu_stride,
cpu, cpu_stride,
cpp, image_h, box,
- vc5_get_lt_pixel_offset,
+ v3d_get_lt_pixel_offset,
is_load);
break;
default:
* start of \p dst according to the given tiling format.
*/
void
-vc5_load_tiled_image(void *dst, uint32_t dst_stride,
+v3d_load_tiled_image(void *dst, uint32_t dst_stride,
void *src, uint32_t src_stride,
- enum vc5_tiling_mode tiling_format, int cpp,
+ enum v3d_tiling_mode tiling_format, int cpp,
uint32_t image_h,
const struct pipe_box *box)
{
- vc5_move_tiled_image(src, src_stride,
+ v3d_move_tiled_image(src, src_stride,
dst, dst_stride,
tiling_format,
cpp,
* \p dst according to the given tiling format.
*/
void
-vc5_store_tiled_image(void *dst, uint32_t dst_stride,
+v3d_store_tiled_image(void *dst, uint32_t dst_stride,
void *src, uint32_t src_stride,
- enum vc5_tiling_mode tiling_format, int cpp,
+ enum v3d_tiling_mode tiling_format, int cpp,
uint32_t image_h,
const struct pipe_box *box)
{
- vc5_move_tiled_image(dst, dst_stride,
+ v3d_move_tiled_image(dst, dst_stride,
src, src_stride,
tiling_format,
cpp,
#ifndef VC5_TILING_H
#define VC5_TILING_H
-uint32_t vc5_utile_width(int cpp) ATTRIBUTE_CONST;
-uint32_t vc5_utile_height(int cpp) ATTRIBUTE_CONST;
-bool vc5_size_is_lt(uint32_t width, uint32_t height, int cpp) ATTRIBUTE_CONST;
-void vc5_load_utile(void *dst, void *src, uint32_t dst_stride, uint32_t cpp);
-void vc5_store_utile(void *dst, void *src, uint32_t src_stride, uint32_t cpp);
-void vc5_load_tiled_image(void *dst, uint32_t dst_stride,
+uint32_t v3d_utile_width(int cpp) ATTRIBUTE_CONST;
+uint32_t v3d_utile_height(int cpp) ATTRIBUTE_CONST;
+bool v3d_size_is_lt(uint32_t width, uint32_t height, int cpp) ATTRIBUTE_CONST;
+void v3d_load_utile(void *dst, void *src, uint32_t dst_stride, uint32_t cpp);
+void v3d_store_utile(void *dst, void *src, uint32_t src_stride, uint32_t cpp);
+void v3d_load_tiled_image(void *dst, uint32_t dst_stride,
void *src, uint32_t src_stride,
- enum vc5_tiling_mode tiling_format, int cpp,
+ enum v3d_tiling_mode tiling_format, int cpp,
uint32_t image_h,
const struct pipe_box *box);
-void vc5_store_tiled_image(void *dst, uint32_t dst_stride,
+void v3d_store_tiled_image(void *dst, uint32_t dst_stride,
void *src, uint32_t src_stride,
- enum vc5_tiling_mode tiling_format, int cpp,
+ enum v3d_tiling_mode tiling_format, int cpp,
uint32_t image_h,
const struct pipe_box *box);
}
static void
-write_texture_border_color(struct vc5_job *job,
- struct vc5_cl_out **uniforms,
- struct vc5_texture_stateobj *texstate,
+write_texture_border_color(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_texture_stateobj *texstate,
uint32_t unit)
{
struct pipe_sampler_state *sampler = texstate->samplers[unit];
struct pipe_sampler_view *texture = texstate->textures[unit];
- struct vc5_resource *rsc = vc5_resource(texture->texture);
+ struct v3d_resource *rsc = v3d_resource(texture->texture);
union util_color uc;
const struct util_format_description *tex_format_desc =
border_color,
tex_format_desc->swizzle);
- /* Now, pack so that when the vc5_format-sampled texture contents are
- * replaced with our border color, the vc5_get_format_swizzle()
+ /* Now, pack so that when the v3d_format-sampled texture contents are
+ * replaced with our border color, the v3d_get_format_swizzle()
* swizzling will get the right channels.
*/
if (util_format_is_depth_or_stencil(texture->format)) {
uc.ui[0] = util_pack_z(PIPE_FORMAT_Z24X8_UNORM,
sampler->border_color.f[0]) << 8;
} else {
- switch (rsc->vc5_format) {
+ switch (rsc->v3d_format) {
default:
case VC5_TEXTURE_TYPE_RGBA8888:
util_pack_color(storage_color,
#endif
static uint32_t
-get_texrect_scale(struct vc5_texture_stateobj *texstate,
+get_texrect_scale(struct v3d_texture_stateobj *texstate,
enum quniform_contents contents,
uint32_t data)
{
}
static uint32_t
-get_texture_size(struct vc5_texture_stateobj *texstate,
+get_texture_size(struct v3d_texture_stateobj *texstate,
enum quniform_contents contents,
uint32_t data)
{
}
}
-static struct vc5_bo *
-vc5_upload_ubo(struct vc5_context *vc5,
- struct vc5_compiled_shader *shader,
+static struct v3d_bo *
+v3d_upload_ubo(struct v3d_context *v3d,
+ struct v3d_compiled_shader *shader,
const uint32_t *gallium_uniforms)
{
if (!shader->prog_data.base->ubo_size)
return NULL;
- struct vc5_bo *ubo = vc5_bo_alloc(vc5->screen,
+ struct v3d_bo *ubo = v3d_bo_alloc(v3d->screen,
shader->prog_data.base->ubo_size,
"ubo");
- void *data = vc5_bo_map(ubo);
+ void *data = v3d_bo_map(ubo);
for (uint32_t i = 0; i < shader->prog_data.base->num_ubo_ranges; i++) {
memcpy(data + shader->prog_data.base->ubo_ranges[i].dst_offset,
((const void *)gallium_uniforms +
* two together here.
*/
static void
-write_texture_p0(struct vc5_job *job,
- struct vc5_cl_out **uniforms,
- struct vc5_texture_stateobj *texstate,
+write_texture_p0(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_texture_stateobj *texstate,
uint32_t unit,
uint32_t shader_data)
{
struct pipe_sampler_state *psampler = texstate->samplers[unit];
- struct vc5_sampler_state *sampler = vc5_sampler_state(psampler);
+ struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
cl_aligned_u32(uniforms, shader_data | sampler->p0);
}
/** Writes the V3D 3.x P1 (CFG_MODE=1) texture parameter. */
static void
-write_texture_p1(struct vc5_job *job,
- struct vc5_cl_out **uniforms,
- struct vc5_texture_stateobj *texstate,
+write_texture_p1(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_texture_stateobj *texstate,
uint32_t data)
{
/* Extract the texture unit from the top bits, and the compiler's
uint32_t p1 = data & 0x1f;
struct pipe_sampler_view *psview = texstate->textures[unit];
- struct vc5_sampler_view *sview = vc5_sampler_view(psview);
+ struct v3d_sampler_view *sview = v3d_sampler_view(psview);
struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 unpacked = {
.texture_state_record_base_address = texstate->texture_state[unit],
/** Writes the V3D 4.x TMU configuration parameter 0. */
static void
-write_tmu_p0(struct vc5_job *job,
- struct vc5_cl_out **uniforms,
- struct vc5_texture_stateobj *texstate,
+write_tmu_p0(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_texture_stateobj *texstate,
uint32_t data)
{
/* Extract the texture unit from the top bits, and the compiler's
uint32_t p0 = data & 0x00ffffff;
struct pipe_sampler_view *psview = texstate->textures[unit];
- struct vc5_sampler_view *sview = vc5_sampler_view(psview);
- struct vc5_resource *rsc = vc5_resource(psview->texture);
+ struct v3d_sampler_view *sview = v3d_sampler_view(psview);
+ struct v3d_resource *rsc = v3d_resource(psview->texture);
cl_aligned_reloc(&job->indirect, uniforms, sview->bo, p0);
- vc5_job_add_bo(job, rsc->bo);
+ v3d_job_add_bo(job, rsc->bo);
}
/** Writes the V3D 4.x TMU configuration parameter 1. */
static void
-write_tmu_p1(struct vc5_job *job,
- struct vc5_cl_out **uniforms,
- struct vc5_texture_stateobj *texstate,
+write_tmu_p1(struct v3d_job *job,
+ struct v3d_cl_out **uniforms,
+ struct v3d_texture_stateobj *texstate,
uint32_t data)
{
/* Extract the texture unit from the top bits, and the compiler's
uint32_t p0 = data & 0x00ffffff;
struct pipe_sampler_state *psampler = texstate->samplers[unit];
- struct vc5_sampler_state *sampler = vc5_sampler_state(psampler);
+ struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
cl_aligned_reloc(&job->indirect, uniforms, sampler->bo, p0);
}
-struct vc5_cl_reloc
-vc5_write_uniforms(struct vc5_context *vc5, struct vc5_compiled_shader *shader,
- struct vc5_constbuf_stateobj *cb,
- struct vc5_texture_stateobj *texstate)
+struct v3d_cl_reloc
+v3d_write_uniforms(struct v3d_context *v3d, struct v3d_compiled_shader *shader,
+ struct v3d_constbuf_stateobj *cb,
+ struct v3d_texture_stateobj *texstate)
{
struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms;
- struct vc5_job *job = vc5->job;
+ struct v3d_job *job = v3d->job;
const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
- struct vc5_bo *ubo = vc5_upload_ubo(vc5, shader, gallium_uniforms);
+ struct v3d_bo *ubo = v3d_upload_ubo(v3d, shader, gallium_uniforms);
/* We always need to return some space for uniforms, because the HW
* will be prefetching, even if we don't read any in the program.
*/
- vc5_cl_ensure_space(&job->indirect, MAX2(uinfo->count, 1) * 4, 4);
+ v3d_cl_ensure_space(&job->indirect, MAX2(uinfo->count, 1) * 4, 4);
- struct vc5_cl_reloc uniform_stream = cl_get_address(&job->indirect);
- vc5_bo_reference(uniform_stream.bo);
+ struct v3d_cl_reloc uniform_stream = cl_get_address(&job->indirect);
+ v3d_bo_reference(uniform_stream.bo);
- struct vc5_cl_out *uniforms =
+ struct v3d_cl_out *uniforms =
cl_start(&job->indirect);
for (int i = 0; i < uinfo->count; i++) {
gallium_uniforms[uinfo->data[i]]);
break;
case QUNIFORM_VIEWPORT_X_SCALE:
- cl_aligned_f(&uniforms, vc5->viewport.scale[0] * 256.0f);
+ cl_aligned_f(&uniforms, v3d->viewport.scale[0] * 256.0f);
break;
case QUNIFORM_VIEWPORT_Y_SCALE:
- cl_aligned_f(&uniforms, vc5->viewport.scale[1] * 256.0f);
+ cl_aligned_f(&uniforms, v3d->viewport.scale[1] * 256.0f);
break;
case QUNIFORM_VIEWPORT_Z_OFFSET:
- cl_aligned_f(&uniforms, vc5->viewport.translate[2]);
+ cl_aligned_f(&uniforms, v3d->viewport.translate[2]);
break;
case QUNIFORM_VIEWPORT_Z_SCALE:
- cl_aligned_f(&uniforms, vc5->viewport.scale[2]);
+ cl_aligned_f(&uniforms, v3d->viewport.scale[2]);
break;
case QUNIFORM_USER_CLIP_PLANE:
cl_aligned_f(&uniforms,
- vc5->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]);
+ v3d->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]);
break;
case QUNIFORM_TMU_CONFIG_P0:
case QUNIFORM_STENCIL:
cl_aligned_u32(&uniforms,
- vc5->zsa->stencil_uniforms[uinfo->data[i]] |
+ v3d->zsa->stencil_uniforms[uinfo->data[i]] |
(uinfo->data[i] <= 1 ?
- (vc5->stencil_ref.ref_value[uinfo->data[i]] << 8) :
+ (v3d->stencil_ref.ref_value[uinfo->data[i]] << 8) :
0));
break;
case QUNIFORM_ALPHA_REF:
cl_aligned_f(&uniforms,
- vc5->zsa->base.alpha.ref_value);
+ v3d->zsa->base.alpha.ref_value);
break;
case QUNIFORM_SAMPLE_MASK:
- cl_aligned_u32(&uniforms, vc5->sample_mask);
+ cl_aligned_u32(&uniforms, v3d->sample_mask);
break;
case QUNIFORM_UBO_ADDR:
ubo, 0);
} else {
int ubo_index = uinfo->data[i];
- struct vc5_resource *rsc =
- vc5_resource(cb->cb[ubo_index].buffer);
+ struct v3d_resource *rsc =
+ v3d_resource(cb->cb[ubo_index].buffer);
cl_aligned_reloc(&job->indirect, &uniforms,
rsc->bo,
case QUNIFORM_SPILL_OFFSET:
cl_aligned_reloc(&job->indirect, &uniforms,
- vc5->prog.spill_bo, 0);
+ v3d->prog.spill_bo, 0);
break;
case QUNIFORM_SPILL_SIZE_PER_THREAD:
cl_aligned_u32(&uniforms,
- vc5->prog.spill_size_per_thread);
+ v3d->prog.spill_size_per_thread);
break;
default:
cl_end(&job->indirect, uniforms);
- vc5_bo_unreference(&ubo);
+ v3d_bo_unreference(&ubo);
return uniform_stream;
}
void
-vc5_set_shader_uniform_dirty_flags(struct vc5_compiled_shader *shader)
+v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
{
uint32_t dirty = 0;
*/
struct v3d_hw;
-struct vc5_format;
+struct v3d_format;
void v3dX(emit_state)(struct pipe_context *pctx);
-void v3dX(emit_rcl)(struct vc5_job *job);
+void v3dX(emit_rcl)(struct v3d_job *job);
void v3dX(draw_init)(struct pipe_context *pctx);
void v3dX(state_init)(struct pipe_context *pctx);
-void v3dX(bcl_epilogue)(struct vc5_context *vc5, struct vc5_job *job);
+void v3dX(bcl_epilogue)(struct v3d_context *v3d, struct v3d_job *job);
void v3dX(simulator_init_regs)(struct v3d_hw *v3d);
int v3dX(simulator_get_param_ioctl)(struct v3d_hw *v3d,
struct drm_v3d_get_param *args);
void v3dX(simulator_flush)(struct v3d_hw *v3d, struct drm_v3d_submit_cl *submit,
uint32_t gmp_ofs);
-const struct vc5_format *v3dX(get_format_desc)(enum pipe_format f);
+const struct v3d_format *v3dX(get_format_desc)(enum pipe_format f);
void v3dX(get_internal_type_bpp_for_output_format)(uint32_t format,
uint32_t *type,
uint32_t *bpp);
* Does the initial bining command list setup for drawing to a given FBO.
*/
static void
-vc5_start_draw(struct vc5_context *vc5)
+v3d_start_draw(struct v3d_context *v3d)
{
- struct vc5_job *job = vc5->job;
+ struct v3d_job *job = v3d->job;
if (job->needs_flush)
return;
/* Get space to emit our BCL state, using a branch to jump to a new BO
* if necessary.
*/
- vc5_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
+ v3d_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
job->submit.bcl_start = job->bcl.bo->offset;
- vc5_job_add_bo(job, job->bcl.bo);
+ v3d_job_add_bo(job, job->bcl.bo);
- job->tile_alloc = vc5_bo_alloc(vc5->screen, 1024 * 1024, "tile alloc");
- uint32_t tsda_per_tile_size = vc5->screen->devinfo.ver >= 40 ? 256 : 64;
- job->tile_state = vc5_bo_alloc(vc5->screen,
+ job->tile_alloc = v3d_bo_alloc(v3d->screen, 1024 * 1024, "tile alloc");
+ uint32_t tsda_per_tile_size = v3d->screen->devinfo.ver >= 40 ? 256 : 64;
+ job->tile_state = v3d_bo_alloc(v3d->screen,
job->draw_tiles_y *
job->draw_tiles_x *
tsda_per_tile_size,
cl_emit(&job->bcl, TILE_BINNING_MODE_CONFIGURATION_PART1, config) {
#if V3D_VERSION >= 40
- config.width_in_pixels_minus_1 = vc5->framebuffer.width - 1;
- config.height_in_pixels_minus_1 = vc5->framebuffer.height - 1;
+ config.width_in_pixels_minus_1 = v3d->framebuffer.width - 1;
+ config.height_in_pixels_minus_1 = v3d->framebuffer.height - 1;
config.number_of_render_targets_minus_1 =
- MAX2(vc5->framebuffer.nr_cbufs, 1) - 1;
+ MAX2(v3d->framebuffer.nr_cbufs, 1) - 1;
#else /* V3D_VERSION < 40 */
config.tile_state_data_array_base_address =
cl_address(job->tile_state, 0);
config.height_in_tiles = job->draw_tiles_y;
/* Must be >= 1 */
config.number_of_render_targets =
- MAX2(vc5->framebuffer.nr_cbufs, 1);
+ MAX2(v3d->framebuffer.nr_cbufs, 1);
#endif /* V3D_VERSION < 40 */
config.multisample_mode_4x = job->msaa;
cl_emit(&job->bcl, START_TILE_BINNING, bin);
job->needs_flush = true;
- job->draw_width = vc5->framebuffer.width;
- job->draw_height = vc5->framebuffer.height;
+ job->draw_width = v3d->framebuffer.width;
+ job->draw_height = v3d->framebuffer.height;
}
static void
-vc5_predraw_check_textures(struct pipe_context *pctx,
- struct vc5_texture_stateobj *stage_tex)
+v3d_predraw_check_textures(struct pipe_context *pctx,
+ struct v3d_texture_stateobj *stage_tex)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
for (int i = 0; i < stage_tex->num_textures; i++) {
struct pipe_sampler_view *view = stage_tex->textures[i];
if (!view)
continue;
- vc5_flush_jobs_writing_resource(vc5, view->texture);
+ v3d_flush_jobs_writing_resource(v3d, view->texture);
}
}
static void
-vc5_emit_gl_shader_state(struct vc5_context *vc5,
+v3d_emit_gl_shader_state(struct v3d_context *v3d,
const struct pipe_draw_info *info)
{
- struct vc5_job *job = vc5->job;
+ struct v3d_job *job = v3d->job;
/* VC5_DIRTY_VTXSTATE */
- struct vc5_vertex_stateobj *vtx = vc5->vtx;
+ struct v3d_vertex_stateobj *vtx = v3d->vtx;
/* VC5_DIRTY_VTXBUF */
- struct vc5_vertexbuf_stateobj *vertexbuf = &vc5->vertexbuf;
+ struct v3d_vertexbuf_stateobj *vertexbuf = &v3d->vertexbuf;
/* Upload the uniforms to the indirect CL first */
- struct vc5_cl_reloc fs_uniforms =
- vc5_write_uniforms(vc5, vc5->prog.fs,
- &vc5->constbuf[PIPE_SHADER_FRAGMENT],
- &vc5->fragtex);
- struct vc5_cl_reloc vs_uniforms =
- vc5_write_uniforms(vc5, vc5->prog.vs,
- &vc5->constbuf[PIPE_SHADER_VERTEX],
- &vc5->verttex);
- struct vc5_cl_reloc cs_uniforms =
- vc5_write_uniforms(vc5, vc5->prog.cs,
- &vc5->constbuf[PIPE_SHADER_VERTEX],
- &vc5->verttex);
+ struct v3d_cl_reloc fs_uniforms =
+ v3d_write_uniforms(v3d, v3d->prog.fs,
+ &v3d->constbuf[PIPE_SHADER_FRAGMENT],
+ &v3d->fragtex);
+ struct v3d_cl_reloc vs_uniforms =
+ v3d_write_uniforms(v3d, v3d->prog.vs,
+ &v3d->constbuf[PIPE_SHADER_VERTEX],
+ &v3d->verttex);
+ struct v3d_cl_reloc cs_uniforms =
+ v3d_write_uniforms(v3d, v3d->prog.cs,
+ &v3d->constbuf[PIPE_SHADER_VERTEX],
+ &v3d->verttex);
/* See GFXH-930 workaround below */
uint32_t num_elements_to_emit = MAX2(vtx->num_elements, 1);
uint32_t shader_rec_offset =
- vc5_cl_ensure_space(&job->indirect,
+ v3d_cl_ensure_space(&job->indirect,
cl_packet_length(GL_SHADER_STATE_RECORD) +
num_elements_to_emit *
cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD),
/* VC5_DIRTY_PRIM_MODE | VC5_DIRTY_RASTERIZER */
shader.point_size_in_shaded_vertex_data =
(info->mode == PIPE_PRIM_POINTS &&
- vc5->rasterizer->base.point_size_per_vertex);
+ v3d->rasterizer->base.point_size_per_vertex);
/* Must be set if the shader modifies Z, discards, or modifies
* the sample mask. For any of these cases, the fragment
* shader needs to write the Z value (even just discards).
*/
shader.fragment_shader_does_z_writes =
- (vc5->prog.fs->prog_data.fs->writes_z ||
- vc5->prog.fs->prog_data.fs->discard);
+ (v3d->prog.fs->prog_data.fs->writes_z ||
+ v3d->prog.fs->prog_data.fs->discard);
shader.fragment_shader_uses_real_pixel_centre_w_in_addition_to_centroid_w2 =
- vc5->prog.fs->prog_data.fs->uses_centroid_and_center_w;
+ v3d->prog.fs->prog_data.fs->uses_centroid_and_center_w;
shader.number_of_varyings_in_fragment_shader =
- vc5->prog.fs->prog_data.base->num_inputs;
+ v3d->prog.fs->prog_data.base->num_inputs;
shader.propagate_nans = true;
shader.coordinate_shader_code_address =
- cl_address(vc5->prog.cs->bo, 0);
+ cl_address(v3d->prog.cs->bo, 0);
shader.vertex_shader_code_address =
- cl_address(vc5->prog.vs->bo, 0);
+ cl_address(v3d->prog.vs->bo, 0);
shader.fragment_shader_code_address =
- cl_address(vc5->prog.fs->bo, 0);
+ cl_address(v3d->prog.fs->bo, 0);
/* XXX: Use combined input/output size flag in the common
* case.
shader.coordinate_shader_has_separate_input_and_output_vpm_blocks = true;
shader.vertex_shader_has_separate_input_and_output_vpm_blocks = true;
shader.coordinate_shader_input_vpm_segment_size =
- MAX2(vc5->prog.cs->prog_data.vs->vpm_input_size, 1);
+ MAX2(v3d->prog.cs->prog_data.vs->vpm_input_size, 1);
shader.vertex_shader_input_vpm_segment_size =
- MAX2(vc5->prog.vs->prog_data.vs->vpm_input_size, 1);
+ MAX2(v3d->prog.vs->prog_data.vs->vpm_input_size, 1);
shader.coordinate_shader_output_vpm_segment_size =
- vc5->prog.cs->prog_data.vs->vpm_output_size;
+ v3d->prog.cs->prog_data.vs->vpm_output_size;
shader.vertex_shader_output_vpm_segment_size =
- vc5->prog.vs->prog_data.vs->vpm_output_size;
+ v3d->prog.vs->prog_data.vs->vpm_output_size;
shader.coordinate_shader_uniforms_address = cs_uniforms;
shader.vertex_shader_uniforms_address = vs_uniforms;
#if V3D_VERSION >= 41
shader.coordinate_shader_4_way_threadable =
- vc5->prog.cs->prog_data.vs->base.threads == 4;
+ v3d->prog.cs->prog_data.vs->base.threads == 4;
shader.vertex_shader_4_way_threadable =
- vc5->prog.vs->prog_data.vs->base.threads == 4;
+ v3d->prog.vs->prog_data.vs->base.threads == 4;
shader.fragment_shader_4_way_threadable =
- vc5->prog.fs->prog_data.fs->base.threads == 4;
+ v3d->prog.fs->prog_data.fs->base.threads == 4;
shader.coordinate_shader_start_in_final_thread_section =
- vc5->prog.cs->prog_data.vs->base.single_seg;
+ v3d->prog.cs->prog_data.vs->base.single_seg;
shader.vertex_shader_start_in_final_thread_section =
- vc5->prog.vs->prog_data.vs->base.single_seg;
+ v3d->prog.vs->prog_data.vs->base.single_seg;
shader.fragment_shader_start_in_final_thread_section =
- vc5->prog.fs->prog_data.fs->base.single_seg;
+ v3d->prog.fs->prog_data.fs->base.single_seg;
#else
shader.coordinate_shader_4_way_threadable =
- vc5->prog.cs->prog_data.vs->base.threads == 4;
+ v3d->prog.cs->prog_data.vs->base.threads == 4;
shader.coordinate_shader_2_way_threadable =
- vc5->prog.cs->prog_data.vs->base.threads == 2;
+ v3d->prog.cs->prog_data.vs->base.threads == 2;
shader.vertex_shader_4_way_threadable =
- vc5->prog.vs->prog_data.vs->base.threads == 4;
+ v3d->prog.vs->prog_data.vs->base.threads == 4;
shader.vertex_shader_2_way_threadable =
- vc5->prog.vs->prog_data.vs->base.threads == 2;
+ v3d->prog.vs->prog_data.vs->base.threads == 2;
shader.fragment_shader_4_way_threadable =
- vc5->prog.fs->prog_data.fs->base.threads == 4;
+ v3d->prog.fs->prog_data.fs->base.threads == 4;
shader.fragment_shader_2_way_threadable =
- vc5->prog.fs->prog_data.fs->base.threads == 2;
+ v3d->prog.fs->prog_data.fs->base.threads == 2;
#endif
shader.vertex_id_read_by_coordinate_shader =
- vc5->prog.cs->prog_data.vs->uses_vid;
+ v3d->prog.cs->prog_data.vs->uses_vid;
shader.instance_id_read_by_coordinate_shader =
- vc5->prog.cs->prog_data.vs->uses_iid;
+ v3d->prog.cs->prog_data.vs->uses_iid;
shader.vertex_id_read_by_vertex_shader =
- vc5->prog.vs->prog_data.vs->uses_vid;
+ v3d->prog.vs->prog_data.vs->uses_vid;
shader.instance_id_read_by_vertex_shader =
- vc5->prog.vs->prog_data.vs->uses_iid;
+ v3d->prog.vs->prog_data.vs->uses_iid;
shader.address_of_default_attribute_values =
cl_address(vtx->default_attribute_values, 0);
struct pipe_vertex_element *elem = &vtx->pipe[i];
struct pipe_vertex_buffer *vb =
&vertexbuf->vb[elem->vertex_buffer_index];
- struct vc5_resource *rsc = vc5_resource(vb->buffer.resource);
+ struct v3d_resource *rsc = v3d_resource(vb->buffer.resource);
const uint32_t size =
cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD);
vb->buffer_offset +
elem->src_offset);
attr.number_of_values_read_by_coordinate_shader =
- vc5->prog.cs->prog_data.vs->vattr_sizes[i];
+ v3d->prog.cs->prog_data.vs->vattr_sizes[i];
attr.number_of_values_read_by_vertex_shader =
- vc5->prog.vs->prog_data.vs->vattr_sizes[i];
+ v3d->prog.vs->prog_data.vs->vattr_sizes[i];
#if V3D_VERSION >= 41
attr.maximum_index = 0xffffff;
#endif
state.number_of_attribute_arrays = num_elements_to_emit;
}
- vc5_bo_unreference(&cs_uniforms.bo);
- vc5_bo_unreference(&vs_uniforms.bo);
- vc5_bo_unreference(&fs_uniforms.bo);
+ v3d_bo_unreference(&cs_uniforms.bo);
+ v3d_bo_unreference(&vs_uniforms.bo);
+ v3d_bo_unreference(&fs_uniforms.bo);
job->shader_rec_count++;
}
* recorded by CL packets.
*/
static void
-vc5_tf_statistics_record(struct vc5_context *vc5,
+v3d_tf_statistics_record(struct v3d_context *v3d,
const struct pipe_draw_info *info,
bool prim_tf)
{
- if (!vc5->active_queries)
+ if (!v3d->active_queries)
return;
uint32_t prims = u_prims_for_vertices(info->mode, info->count);
- vc5->prims_generated += prims;
+ v3d->prims_generated += prims;
if (prim_tf) {
/* XXX: Only count if we didn't overflow. */
- vc5->tf_prims_generated += prims;
+ v3d->tf_prims_generated += prims;
}
}
static void
-vc5_update_job_ez(struct vc5_context *vc5, struct vc5_job *job)
+v3d_update_job_ez(struct v3d_context *v3d, struct v3d_job *job)
{
- switch (vc5->zsa->ez_state) {
+ switch (v3d->zsa->ez_state) {
case VC5_EZ_UNDECIDED:
/* If the Z/S state didn't pick a direction but didn't
* disable, then go along with the current EZ state. This
* the current direction if we've decided on one.
*/
if (job->ez_state == VC5_EZ_UNDECIDED)
- job->ez_state = vc5->zsa->ez_state;
- else if (job->ez_state != vc5->zsa->ez_state)
+ job->ez_state = v3d->zsa->ez_state;
+ else if (job->ez_state != v3d->zsa->ez_state)
job->ez_state = VC5_EZ_DISABLED;
break;
* the chosen EZ direction (though we could use
* ARB_conservative_depth's hints to avoid this)
*/
- if (vc5->prog.fs->prog_data.fs->writes_z) {
+ if (v3d->prog.fs->prog_data.fs->writes_z) {
job->ez_state = VC5_EZ_DISABLED;
}
}
static void
-vc5_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
+v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
if (!info->count_from_stream_output && !info->indirect &&
!info->primitive_restart &&
}
if (info->mode >= PIPE_PRIM_QUADS) {
- util_primconvert_save_rasterizer_state(vc5->primconvert, &vc5->rasterizer->base);
- util_primconvert_draw_vbo(vc5->primconvert, info);
+ util_primconvert_save_rasterizer_state(v3d->primconvert, &v3d->rasterizer->base);
+ util_primconvert_draw_vbo(v3d->primconvert, info);
perf_debug("Fallback conversion for %d %s vertices\n",
info->count, u_prim_name(info->mode));
return;
/* Before setting up the draw, flush anything writing to the textures
* that we read from.
*/
- vc5_predraw_check_textures(pctx, &vc5->verttex);
- vc5_predraw_check_textures(pctx, &vc5->fragtex);
+ v3d_predraw_check_textures(pctx, &v3d->verttex);
+ v3d_predraw_check_textures(pctx, &v3d->fragtex);
- struct vc5_job *job = vc5_get_job_for_fbo(vc5);
+ struct v3d_job *job = v3d_get_job_for_fbo(v3d);
/* Get space to emit our draw call into the BCL, using a branch to
* jump to a new BO if necessary.
*/
- vc5_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
+ v3d_cl_ensure_space_with_branch(&job->bcl, 256 /* XXX */);
- if (vc5->prim_mode != info->mode) {
- vc5->prim_mode = info->mode;
- vc5->dirty |= VC5_DIRTY_PRIM_MODE;
+ if (v3d->prim_mode != info->mode) {
+ v3d->prim_mode = info->mode;
+ v3d->dirty |= VC5_DIRTY_PRIM_MODE;
}
- vc5_start_draw(vc5);
- vc5_update_compiled_shaders(vc5, info->mode);
- vc5_update_job_ez(vc5, job);
+ v3d_start_draw(v3d);
+ v3d_update_compiled_shaders(v3d, info->mode);
+ v3d_update_job_ez(v3d, job);
#if V3D_VERSION >= 41
v3d41_emit_state(pctx);
v3d33_emit_state(pctx);
#endif
- if (vc5->dirty & (VC5_DIRTY_VTXBUF |
+ if (v3d->dirty & (VC5_DIRTY_VTXBUF |
VC5_DIRTY_VTXSTATE |
VC5_DIRTY_PRIM_MODE |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_COMPILED_CS |
VC5_DIRTY_COMPILED_VS |
VC5_DIRTY_COMPILED_FS |
- vc5->prog.cs->uniform_dirty_bits |
- vc5->prog.vs->uniform_dirty_bits |
- vc5->prog.fs->uniform_dirty_bits)) {
- vc5_emit_gl_shader_state(vc5, info);
+ v3d->prog.cs->uniform_dirty_bits |
+ v3d->prog.vs->uniform_dirty_bits |
+ v3d->prog.fs->uniform_dirty_bits)) {
+ v3d_emit_gl_shader_state(v3d, info);
}
- vc5->dirty = 0;
+ v3d->dirty = 0;
/* The Base Vertex/Base Instance packet sets those values to nonzero
* for the next draw call only.
/* V3D 3.x: The HW only processes transform feedback on primitives
* with the flag set.
*/
- if (vc5->streamout.num_targets)
+ if (v3d->streamout.num_targets)
prim_tf_enable = (V3D_PRIM_POINTS_TF - V3D_PRIM_POINTS);
#endif
- vc5_tf_statistics_record(vc5, info, vc5->streamout.num_targets);
+ v3d_tf_statistics_record(v3d, info, v3d->streamout.num_targets);
/* Note that the primitive type fields match with OpenGL/gallium
* definitions, up to but not including QUADS.
struct pipe_resource *prsc;
if (info->has_user_indices) {
prsc = NULL;
- u_upload_data(vc5->uploader, 0,
+ u_upload_data(v3d->uploader, 0,
info->count * info->index_size, 4,
info->index.user,
&offset, &prsc);
} else {
prsc = info->index.resource;
}
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_resource *rsc = v3d_resource(prsc);
#if V3D_VERSION >= 40
cl_emit(&job->bcl, INDEX_BUFFER_SETUP, ib) {
}
job->draw_calls_queued++;
- if (vc5->zsa && job->zsbuf &&
- (vc5->zsa->base.depth.enabled ||
- vc5->zsa->base.stencil[0].enabled)) {
- struct vc5_resource *rsc = vc5_resource(job->zsbuf->texture);
- vc5_job_add_bo(job, rsc->bo);
+ if (v3d->zsa && job->zsbuf &&
+ (v3d->zsa->base.depth.enabled ||
+ v3d->zsa->base.stencil[0].enabled)) {
+ struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
+ v3d_job_add_bo(job, rsc->bo);
- if (vc5->zsa->base.depth.enabled) {
+ if (v3d->zsa->base.depth.enabled) {
job->resolve |= PIPE_CLEAR_DEPTH;
rsc->initialized_buffers = PIPE_CLEAR_DEPTH;
}
- if (vc5->zsa->base.stencil[0].enabled) {
+ if (v3d->zsa->base.stencil[0].enabled) {
job->resolve |= PIPE_CLEAR_STENCIL;
rsc->initialized_buffers |= PIPE_CLEAR_STENCIL;
}
if (job->resolve & bit || !job->cbufs[i])
continue;
- struct vc5_resource *rsc = vc5_resource(job->cbufs[i]->texture);
+ struct v3d_resource *rsc = v3d_resource(job->cbufs[i]->texture);
job->resolve |= bit;
- vc5_job_add_bo(job, rsc->bo);
+ v3d_job_add_bo(job, rsc->bo);
}
if (job->referenced_size > 768 * 1024 * 1024) {
perf_debug("Flushing job with %dkb to try to free up memory\n",
job->referenced_size / 1024);
- vc5_flush(pctx);
+ v3d_flush(pctx);
}
if (V3D_DEBUG & V3D_DEBUG_ALWAYS_FLUSH)
- vc5_flush(pctx);
+ v3d_flush(pctx);
}
static void
-vc5_clear(struct pipe_context *pctx, unsigned buffers,
+v3d_clear(struct pipe_context *pctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_job *job = vc5_get_job_for_fbo(vc5);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_job *job = v3d_get_job_for_fbo(v3d);
/* We can't flag new buffers for clearing once we've queued draws. We
* could avoid this by using the 3d engine to clear.
*/
if (job->draw_calls_queued) {
perf_debug("Flushing rendering to process new clear.\n");
- vc5_job_submit(vc5, job);
- job = vc5_get_job_for_fbo(vc5);
+ v3d_job_submit(v3d, job);
+ job = v3d_get_job_for_fbo(v3d);
}
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
if (!(buffers & bit))
continue;
- struct pipe_surface *psurf = vc5->framebuffer.cbufs[i];
- struct vc5_surface *surf = vc5_surface(psurf);
- struct vc5_resource *rsc = vc5_resource(psurf->texture);
+ struct pipe_surface *psurf = v3d->framebuffer.cbufs[i];
+ struct v3d_surface *surf = v3d_surface(psurf);
+ struct v3d_resource *rsc = v3d_resource(psurf->texture);
union util_color uc;
uint32_t internal_size = 4 << surf->internal_bpp;
static union pipe_color_union swapped_color;
- if (vc5->swap_color_rb & (1 << i)) {
+ if (v3d->swap_color_rb & (1 << i)) {
swapped_color.f[0] = color->f[2];
swapped_color.f[1] = color->f[1];
swapped_color.f[2] = color->f[0];
unsigned zsclear = buffers & PIPE_CLEAR_DEPTHSTENCIL;
if (zsclear) {
- struct vc5_resource *rsc =
- vc5_resource(vc5->framebuffer.zsbuf->texture);
+ struct v3d_resource *rsc =
+ v3d_resource(v3d->framebuffer.zsbuf->texture);
if (zsclear & PIPE_CLEAR_DEPTH)
job->clear_z = depth;
job->draw_min_x = 0;
job->draw_min_y = 0;
- job->draw_max_x = vc5->framebuffer.width;
- job->draw_max_y = vc5->framebuffer.height;
+ job->draw_max_x = v3d->framebuffer.width;
+ job->draw_max_y = v3d->framebuffer.height;
job->cleared |= buffers;
job->resolve |= buffers;
- vc5_start_draw(vc5);
+ v3d_start_draw(v3d);
}
static void
-vc5_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
+v3d_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
const union pipe_color_union *color,
unsigned x, unsigned y, unsigned w, unsigned h,
bool render_condition_enabled)
}
static void
-vc5_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
+v3d_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
unsigned buffers, double depth, unsigned stencil,
unsigned x, unsigned y, unsigned w, unsigned h,
bool render_condition_enabled)
void
v3dX(draw_init)(struct pipe_context *pctx)
{
- pctx->draw_vbo = vc5_draw_vbo;
- pctx->clear = vc5_clear;
- pctx->clear_render_target = vc5_clear_render_target;
- pctx->clear_depth_stencil = vc5_clear_depth_stencil;
+ pctx->draw_vbo = v3d_draw_vbo;
+ pctx->clear = v3d_clear;
+ pctx->clear_render_target = v3d_clear_render_target;
+ pctx->clear_depth_stencil = v3d_clear_depth_stencil;
}
#include "broadcom/compiler/v3d_compiler.h"
static uint8_t
-vc5_factor(enum pipe_blendfactor factor, bool dst_alpha_one)
+v3d_factor(enum pipe_blendfactor factor, bool dst_alpha_one)
{
/* We may get a bad blendfactor when blending is disabled. */
if (factor == 0)
static inline uint16_t
swizzled_border_color(const struct v3d_device_info *devinfo,
struct pipe_sampler_state *sampler,
- struct vc5_sampler_view *sview,
+ struct v3d_sampler_view *sview,
int chan)
{
const struct util_format_description *desc =
* For swizzling in the shader, we don't do any pre-swizzling of the
* border color.
*/
- if (vc5_get_tex_return_size(devinfo, sview->base.format,
+ if (v3d_get_tex_return_size(devinfo, sview->base.format,
sampler->compare_mode) != 32)
swiz = desc->swizzle[swiz];
}
static void
-emit_one_texture(struct vc5_context *vc5, struct vc5_texture_stateobj *stage_tex,
+emit_one_texture(struct v3d_context *v3d, struct v3d_texture_stateobj *stage_tex,
int i)
{
- struct vc5_job *job = vc5->job;
+ struct v3d_job *job = v3d->job;
struct pipe_sampler_state *psampler = stage_tex->samplers[i];
- struct vc5_sampler_state *sampler = vc5_sampler_state(psampler);
+ struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
struct pipe_sampler_view *psview = stage_tex->textures[i];
- struct vc5_sampler_view *sview = vc5_sampler_view(psview);
+ struct v3d_sampler_view *sview = v3d_sampler_view(psview);
struct pipe_resource *prsc = psview->texture;
- struct vc5_resource *rsc = vc5_resource(prsc);
- const struct v3d_device_info *devinfo = &vc5->screen->devinfo;
+ struct v3d_resource *rsc = v3d_resource(prsc);
+ const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
stage_tex->texture_state[i].offset =
- vc5_cl_ensure_space(&job->indirect,
+ v3d_cl_ensure_space(&job->indirect,
cl_packet_length(TEXTURE_SHADER_STATE),
32);
- vc5_bo_set_reference(&stage_tex->texture_state[i].bo,
+ v3d_bo_set_reference(&stage_tex->texture_state[i].bo,
job->indirect.bo);
- uint32_t return_size = vc5_get_tex_return_size(devinfo, psview->format,
+ uint32_t return_size = v3d_get_tex_return_size(devinfo, psview->format,
psampler->compare_mode);
struct V3D33_TEXTURE_SHADER_STATE unpacked = {
packed[i] |= sview->texture_shader_state[i] | sampler->texture_shader_state[i];
/* TMU indirect structs need to be 32b aligned. */
- vc5_cl_ensure_space(&job->indirect, ARRAY_SIZE(packed), 32);
+ v3d_cl_ensure_space(&job->indirect, ARRAY_SIZE(packed), 32);
cl_emit_prepacked(&job->indirect, &packed);
}
static void
-emit_textures(struct vc5_context *vc5, struct vc5_texture_stateobj *stage_tex)
+emit_textures(struct v3d_context *v3d, struct v3d_texture_stateobj *stage_tex)
{
for (int i = 0; i < stage_tex->num_textures; i++) {
if (stage_tex->textures[i])
- emit_one_texture(vc5, stage_tex, i);
+ emit_one_texture(v3d, stage_tex, i);
}
}
#endif /* V3D_VERSION < 40 */
static uint32_t
-translate_colormask(struct vc5_context *vc5, uint32_t colormask, int rt)
+translate_colormask(struct v3d_context *v3d, uint32_t colormask, int rt)
{
- if (vc5->swap_color_rb & (1 << rt)) {
+ if (v3d->swap_color_rb & (1 << rt)) {
colormask = ((colormask & (2 | 8)) |
((colormask & 1) << 2) |
((colormask & 4) >> 2));
}
static void
-emit_rt_blend(struct vc5_context *vc5, struct vc5_job *job,
+emit_rt_blend(struct v3d_context *v3d, struct v3d_job *job,
struct pipe_blend_state *blend, int rt)
{
cl_emit(&job->bcl, BLEND_CONFIG, config) {
config.colour_blend_mode = rtblend->rgb_func;
config.colour_blend_dst_factor =
- vc5_factor(rtblend->rgb_dst_factor,
- vc5->blend_dst_alpha_one);
+ v3d_factor(rtblend->rgb_dst_factor,
+ v3d->blend_dst_alpha_one);
config.colour_blend_src_factor =
- vc5_factor(rtblend->rgb_src_factor,
- vc5->blend_dst_alpha_one);
+ v3d_factor(rtblend->rgb_src_factor,
+ v3d->blend_dst_alpha_one);
config.alpha_blend_mode = rtblend->alpha_func;
config.alpha_blend_dst_factor =
- vc5_factor(rtblend->alpha_dst_factor,
- vc5->blend_dst_alpha_one);
+ v3d_factor(rtblend->alpha_dst_factor,
+ v3d->blend_dst_alpha_one);
config.alpha_blend_src_factor =
- vc5_factor(rtblend->alpha_src_factor,
- vc5->blend_dst_alpha_one);
+ v3d_factor(rtblend->alpha_src_factor,
+ v3d->blend_dst_alpha_one);
}
}
void
v3dX(emit_state)(struct pipe_context *pctx)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_job *job = vc5->job;
- bool rasterizer_discard = vc5->rasterizer->base.rasterizer_discard;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_job *job = v3d->job;
+ bool rasterizer_discard = v3d->rasterizer->base.rasterizer_discard;
- if (vc5->dirty & (VC5_DIRTY_SCISSOR | VC5_DIRTY_VIEWPORT |
+ if (v3d->dirty & (VC5_DIRTY_SCISSOR | VC5_DIRTY_VIEWPORT |
VC5_DIRTY_RASTERIZER)) {
- float *vpscale = vc5->viewport.scale;
- float *vptranslate = vc5->viewport.translate;
+ float *vpscale = v3d->viewport.scale;
+ float *vptranslate = v3d->viewport.translate;
float vp_minx = -fabsf(vpscale[0]) + vptranslate[0];
float vp_maxx = fabsf(vpscale[0]) + vptranslate[0];
float vp_miny = -fabsf(vpscale[1]) + vptranslate[1];
* primitives would rasterize outside of the view volume.
*/
uint32_t minx, miny, maxx, maxy;
- if (!vc5->rasterizer->base.scissor) {
+ if (!v3d->rasterizer->base.scissor) {
minx = MAX2(vp_minx, 0);
miny = MAX2(vp_miny, 0);
maxx = MIN2(vp_maxx, job->draw_width);
maxy = MIN2(vp_maxy, job->draw_height);
} else {
- minx = MAX2(vp_minx, vc5->scissor.minx);
- miny = MAX2(vp_miny, vc5->scissor.miny);
- maxx = MIN2(vp_maxx, vc5->scissor.maxx);
- maxy = MIN2(vp_maxy, vc5->scissor.maxy);
+ minx = MAX2(vp_minx, v3d->scissor.minx);
+ miny = MAX2(vp_miny, v3d->scissor.miny);
+ maxx = MIN2(vp_maxx, v3d->scissor.maxx);
+ maxy = MIN2(vp_maxy, v3d->scissor.maxy);
}
cl_emit(&job->bcl, CLIP_WINDOW, clip) {
job->draw_max_y = MAX2(job->draw_max_y, maxy);
}
- if (vc5->dirty & (VC5_DIRTY_RASTERIZER |
+ if (v3d->dirty & (VC5_DIRTY_RASTERIZER |
VC5_DIRTY_ZSA |
VC5_DIRTY_BLEND |
VC5_DIRTY_COMPILED_FS)) {
cl_emit(&job->bcl, CONFIGURATION_BITS, config) {
config.enable_forward_facing_primitive =
!rasterizer_discard &&
- !(vc5->rasterizer->base.cull_face &
+ !(v3d->rasterizer->base.cull_face &
PIPE_FACE_FRONT);
config.enable_reverse_facing_primitive =
!rasterizer_discard &&
- !(vc5->rasterizer->base.cull_face &
+ !(v3d->rasterizer->base.cull_face &
PIPE_FACE_BACK);
/* This seems backwards, but it's what gets the
* clipflat test to pass.
*/
config.clockwise_primitives =
- vc5->rasterizer->base.front_ccw;
+ v3d->rasterizer->base.front_ccw;
config.enable_depth_offset =
- vc5->rasterizer->base.offset_tri;
+ v3d->rasterizer->base.offset_tri;
config.rasterizer_oversample_mode =
- vc5->rasterizer->base.multisample;
+ v3d->rasterizer->base.multisample;
config.direct3d_provoking_vertex =
- vc5->rasterizer->base.flatshade_first;
+ v3d->rasterizer->base.flatshade_first;
- config.blend_enable = vc5->blend->rt[0].blend_enable;
+ config.blend_enable = v3d->blend->rt[0].blend_enable;
/* Note: EZ state may update based on the compiled FS,
* along with ZSA
*/
config.early_z_updates_enable =
(job->ez_state != VC5_EZ_DISABLED);
- if (vc5->zsa->base.depth.enabled) {
+ if (v3d->zsa->base.depth.enabled) {
config.z_updates_enable =
- vc5->zsa->base.depth.writemask;
+ v3d->zsa->base.depth.writemask;
config.early_z_enable =
config.early_z_updates_enable;
config.depth_test_function =
- vc5->zsa->base.depth.func;
+ v3d->zsa->base.depth.func;
} else {
config.depth_test_function = PIPE_FUNC_ALWAYS;
}
config.stencil_enable =
- vc5->zsa->base.stencil[0].enabled;
+ v3d->zsa->base.stencil[0].enabled;
}
}
- if (vc5->dirty & VC5_DIRTY_RASTERIZER &&
- vc5->rasterizer->base.offset_tri) {
+ if (v3d->dirty & VC5_DIRTY_RASTERIZER &&
+ v3d->rasterizer->base.offset_tri) {
cl_emit(&job->bcl, DEPTH_OFFSET, depth) {
depth.depth_offset_factor =
- vc5->rasterizer->offset_factor;
+ v3d->rasterizer->offset_factor;
depth.depth_offset_units =
- vc5->rasterizer->offset_units;
+ v3d->rasterizer->offset_units;
}
}
- if (vc5->dirty & VC5_DIRTY_RASTERIZER) {
+ if (v3d->dirty & VC5_DIRTY_RASTERIZER) {
cl_emit(&job->bcl, POINT_SIZE, point_size) {
- point_size.point_size = vc5->rasterizer->point_size;
+ point_size.point_size = v3d->rasterizer->point_size;
}
cl_emit(&job->bcl, LINE_WIDTH, line_width) {
- line_width.line_width = vc5->rasterizer->base.line_width;
+ line_width.line_width = v3d->rasterizer->base.line_width;
}
}
- if (vc5->dirty & VC5_DIRTY_VIEWPORT) {
+ if (v3d->dirty & VC5_DIRTY_VIEWPORT) {
cl_emit(&job->bcl, CLIPPER_XY_SCALING, clip) {
clip.viewport_half_width_in_1_256th_of_pixel =
- vc5->viewport.scale[0] * 256.0f;
+ v3d->viewport.scale[0] * 256.0f;
clip.viewport_half_height_in_1_256th_of_pixel =
- vc5->viewport.scale[1] * 256.0f;
+ v3d->viewport.scale[1] * 256.0f;
}
cl_emit(&job->bcl, CLIPPER_Z_SCALE_AND_OFFSET, clip) {
clip.viewport_z_offset_zc_to_zs =
- vc5->viewport.translate[2];
+ v3d->viewport.translate[2];
clip.viewport_z_scale_zc_to_zs =
- vc5->viewport.scale[2];
+ v3d->viewport.scale[2];
}
cl_emit(&job->bcl, CLIPPER_Z_MIN_MAX_CLIPPING_PLANES, clip) {
- clip.minimum_zw = (vc5->viewport.translate[2] -
- vc5->viewport.scale[2]);
- clip.maximum_zw = (vc5->viewport.translate[2] +
- vc5->viewport.scale[2]);
+ clip.minimum_zw = (v3d->viewport.translate[2] -
+ v3d->viewport.scale[2]);
+ clip.maximum_zw = (v3d->viewport.translate[2] +
+ v3d->viewport.scale[2]);
}
cl_emit(&job->bcl, VIEWPORT_OFFSET, vp) {
vp.viewport_centre_x_coordinate =
- vc5->viewport.translate[0];
+ v3d->viewport.translate[0];
vp.viewport_centre_y_coordinate =
- vc5->viewport.translate[1];
+ v3d->viewport.translate[1];
}
}
- if (vc5->dirty & VC5_DIRTY_BLEND && vc5->blend->rt[0].blend_enable) {
- struct pipe_blend_state *blend = vc5->blend;
+ if (v3d->dirty & VC5_DIRTY_BLEND && v3d->blend->rt[0].blend_enable) {
+ struct pipe_blend_state *blend = v3d->blend;
if (blend->independent_blend_enable) {
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++)
- emit_rt_blend(vc5, job, blend, i);
+ emit_rt_blend(v3d, job, blend, i);
} else {
- emit_rt_blend(vc5, job, blend, 0);
+ emit_rt_blend(v3d, job, blend, 0);
}
}
- if (vc5->dirty & VC5_DIRTY_BLEND) {
- struct pipe_blend_state *blend = vc5->blend;
+ if (v3d->dirty & VC5_DIRTY_BLEND) {
+ struct pipe_blend_state *blend = v3d->blend;
cl_emit(&job->bcl, COLOUR_WRITE_MASKS, mask) {
if (blend->independent_blend_enable) {
mask.render_target_0_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[0].colormask, 0);
+ translate_colormask(v3d, blend->rt[0].colormask, 0);
mask.render_target_1_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[1].colormask, 1);
+ translate_colormask(v3d, blend->rt[1].colormask, 1);
mask.render_target_2_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[2].colormask, 2);
+ translate_colormask(v3d, blend->rt[2].colormask, 2);
mask.render_target_3_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[3].colormask, 3);
+ translate_colormask(v3d, blend->rt[3].colormask, 3);
} else {
mask.render_target_0_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[0].colormask, 0);
+ translate_colormask(v3d, blend->rt[0].colormask, 0);
mask.render_target_1_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[0].colormask, 1);
+ translate_colormask(v3d, blend->rt[0].colormask, 1);
mask.render_target_2_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[0].colormask, 2);
+ translate_colormask(v3d, blend->rt[0].colormask, 2);
mask.render_target_3_per_colour_component_write_masks =
- translate_colormask(vc5, blend->rt[0].colormask, 3);
+ translate_colormask(v3d, blend->rt[0].colormask, 3);
}
}
}
/* GFXH-1431: On V3D 3.x, writing BLEND_CONFIG resets the constant
* color.
*/
- if (vc5->dirty & VC5_DIRTY_BLEND_COLOR ||
- (V3D_VERSION < 41 && (vc5->dirty & VC5_DIRTY_BLEND))) {
+ if (v3d->dirty & VC5_DIRTY_BLEND_COLOR ||
+ (V3D_VERSION < 41 && (v3d->dirty & VC5_DIRTY_BLEND))) {
cl_emit(&job->bcl, BLEND_CONSTANT_COLOUR, colour) {
- colour.red_f16 = (vc5->swap_color_rb ?
- vc5->blend_color.hf[2] :
- vc5->blend_color.hf[0]);
- colour.green_f16 = vc5->blend_color.hf[1];
- colour.blue_f16 = (vc5->swap_color_rb ?
- vc5->blend_color.hf[0] :
- vc5->blend_color.hf[2]);
- colour.alpha_f16 = vc5->blend_color.hf[3];
+ colour.red_f16 = (v3d->swap_color_rb ?
+ v3d->blend_color.hf[2] :
+ v3d->blend_color.hf[0]);
+ colour.green_f16 = v3d->blend_color.hf[1];
+ colour.blue_f16 = (v3d->swap_color_rb ?
+ v3d->blend_color.hf[0] :
+ v3d->blend_color.hf[2]);
+ colour.alpha_f16 = v3d->blend_color.hf[3];
}
}
- if (vc5->dirty & (VC5_DIRTY_ZSA | VC5_DIRTY_STENCIL_REF)) {
- struct pipe_stencil_state *front = &vc5->zsa->base.stencil[0];
- struct pipe_stencil_state *back = &vc5->zsa->base.stencil[1];
+ if (v3d->dirty & (VC5_DIRTY_ZSA | VC5_DIRTY_STENCIL_REF)) {
+ struct pipe_stencil_state *front = &v3d->zsa->base.stencil[0];
+ struct pipe_stencil_state *back = &v3d->zsa->base.stencil[1];
if (front->enabled) {
cl_emit_with_prepacked(&job->bcl, STENCIL_CONFIG,
- vc5->zsa->stencil_front, config) {
+ v3d->zsa->stencil_front, config) {
config.stencil_ref_value =
- vc5->stencil_ref.ref_value[0];
+ v3d->stencil_ref.ref_value[0];
}
}
if (back->enabled) {
cl_emit_with_prepacked(&job->bcl, STENCIL_CONFIG,
- vc5->zsa->stencil_back, config) {
+ v3d->zsa->stencil_back, config) {
config.stencil_ref_value =
- vc5->stencil_ref.ref_value[1];
+ v3d->stencil_ref.ref_value[1];
}
}
}
/* Pre-4.x, we have texture state that depends on both the sampler and
* the view, so we merge them together at draw time.
*/
- if (vc5->dirty & VC5_DIRTY_FRAGTEX)
- emit_textures(vc5, &vc5->fragtex);
+ if (v3d->dirty & VC5_DIRTY_FRAGTEX)
+ emit_textures(v3d, &v3d->fragtex);
- if (vc5->dirty & VC5_DIRTY_VERTTEX)
- emit_textures(vc5, &vc5->verttex);
+ if (v3d->dirty & VC5_DIRTY_VERTTEX)
+ emit_textures(v3d, &v3d->verttex);
#endif
- if (vc5->dirty & VC5_DIRTY_FLAT_SHADE_FLAGS) {
+ if (v3d->dirty & VC5_DIRTY_FLAT_SHADE_FLAGS) {
bool emitted_any = false;
- for (int i = 0; i < ARRAY_SIZE(vc5->prog.fs->prog_data.fs->flat_shade_flags); i++) {
- if (!vc5->prog.fs->prog_data.fs->flat_shade_flags[i])
+ for (int i = 0; i < ARRAY_SIZE(v3d->prog.fs->prog_data.fs->flat_shade_flags); i++) {
+ if (!v3d->prog.fs->prog_data.fs->flat_shade_flags[i])
continue;
cl_emit(&job->bcl, FLAT_SHADE_FLAGS, flags) {
}
flags.flat_shade_flags_for_varyings_v024 =
- vc5->prog.fs->prog_data.fs->flat_shade_flags[i];
+ v3d->prog.fs->prog_data.fs->flat_shade_flags[i];
}
emitted_any = true;
}
#if V3D_VERSION >= 40
- if (vc5->dirty & VC5_DIRTY_CENTROID_FLAGS) {
+ if (v3d->dirty & VC5_DIRTY_CENTROID_FLAGS) {
bool emitted_any = false;
- for (int i = 0; i < ARRAY_SIZE(vc5->prog.fs->prog_data.fs->centroid_flags); i++) {
- if (!vc5->prog.fs->prog_data.fs->centroid_flags[i])
+ for (int i = 0; i < ARRAY_SIZE(v3d->prog.fs->prog_data.fs->centroid_flags); i++) {
+ if (!v3d->prog.fs->prog_data.fs->centroid_flags[i])
continue;
cl_emit(&job->bcl, CENTROID_FLAGS, flags) {
}
flags.centroid_flags_for_varyings_v024 =
- vc5->prog.fs->prog_data.fs->centroid_flags[i];
+ v3d->prog.fs->prog_data.fs->centroid_flags[i];
}
emitted_any = true;
/* Set up the transform feedback data specs (which VPM entries to
* output to which buffers).
*/
- if (vc5->dirty & (VC5_DIRTY_STREAMOUT |
+ if (v3d->dirty & (VC5_DIRTY_STREAMOUT |
VC5_DIRTY_RASTERIZER |
VC5_DIRTY_PRIM_MODE)) {
- struct vc5_streamout_stateobj *so = &vc5->streamout;
+ struct v3d_streamout_stateobj *so = &v3d->streamout;
if (so->num_targets) {
- bool psiz_per_vertex = (vc5->prim_mode == PIPE_PRIM_POINTS &&
- vc5->rasterizer->base.point_size_per_vertex);
+ bool psiz_per_vertex = (v3d->prim_mode == PIPE_PRIM_POINTS &&
+ v3d->rasterizer->base.point_size_per_vertex);
uint16_t *tf_specs = (psiz_per_vertex ?
- vc5->prog.bind_vs->tf_specs_psiz :
- vc5->prog.bind_vs->tf_specs);
+ v3d->prog.bind_vs->tf_specs_psiz :
+ v3d->prog.bind_vs->tf_specs);
#if V3D_VERSION >= 40
- job->tf_enabled = (vc5->prog.bind_vs->num_tf_specs != 0 &&
- vc5->active_queries);
+ job->tf_enabled = (v3d->prog.bind_vs->num_tf_specs != 0 &&
+ v3d->active_queries);
cl_emit(&job->bcl, TRANSFORM_FEEDBACK_SPECS, tfe) {
tfe.number_of_16_bit_output_data_specs_following =
- vc5->prog.bind_vs->num_tf_specs;
+ v3d->prog.bind_vs->num_tf_specs;
tfe.enable = job->tf_enabled;
};
#else /* V3D_VERSION < 40 */
tfe.number_of_32_bit_output_buffer_address_following =
so->num_targets;
tfe.number_of_16_bit_output_data_specs_following =
- vc5->prog.bind_vs->num_tf_specs;
+ v3d->prog.bind_vs->num_tf_specs;
};
#endif /* V3D_VERSION < 40 */
- for (int i = 0; i < vc5->prog.bind_vs->num_tf_specs; i++) {
+ for (int i = 0; i < v3d->prog.bind_vs->num_tf_specs; i++) {
cl_emit_prepacked(&job->bcl, &tf_specs[i]);
}
} else if (job->tf_enabled) {
}
/* Set up the trasnform feedback buffers. */
- if (vc5->dirty & VC5_DIRTY_STREAMOUT) {
- struct vc5_streamout_stateobj *so = &vc5->streamout;
+ if (v3d->dirty & VC5_DIRTY_STREAMOUT) {
+ struct v3d_streamout_stateobj *so = &v3d->streamout;
for (int i = 0; i < so->num_targets; i++) {
const struct pipe_stream_output_target *target =
so->targets[i];
- struct vc5_resource *rsc = target ?
- vc5_resource(target->buffer) : NULL;
+ struct v3d_resource *rsc = target ?
+ v3d_resource(target->buffer) : NULL;
#if V3D_VERSION >= 40
if (!target)
};
#endif /* V3D_VERSION < 40 */
if (target) {
- vc5_job_add_write_resource(vc5->job,
+ v3d_job_add_write_resource(v3d->job,
target->buffer);
}
/* XXX: buffer_size? */
}
}
- if (vc5->dirty & VC5_DIRTY_OQ) {
+ if (v3d->dirty & VC5_DIRTY_OQ) {
cl_emit(&job->bcl, OCCLUSION_QUERY_COUNTER, counter) {
- job->oq_enabled = vc5->active_queries && vc5->current_oq;
+ job->oq_enabled = v3d->active_queries && v3d->current_oq;
if (job->oq_enabled) {
- counter.address = cl_address(vc5->current_oq, 0);
+ counter.address = cl_address(v3d->current_oq, 0);
}
}
}
#define SWIZ_XXXX SWIZ(X, X, X, X)
#define SWIZ_000X SWIZ(0, 0, 0, X)
-static const struct vc5_format format_table[] = {
+static const struct v3d_format format_table[] = {
FORMAT(B8G8R8A8_UNORM, RGBA8, RGBA8, SWIZ_ZYXW, 16, 0),
FORMAT(B8G8R8X8_UNORM, RGBA8, RGBA8, SWIZ_ZYX1, 16, 0),
FORMAT(B8G8R8A8_SRGB, SRGB8_ALPHA8, RGBA8, SWIZ_ZYXW, 16, 0),
FORMAT(DXT5_RGBA, NO, BC3, SWIZ_XYZ1, 16, 0),
};
-const struct vc5_format *
+const struct v3d_format *
v3dX(get_format_desc)(enum pipe_format f)
{
if (f < ARRAY_SIZE(format_table) && format_table[f].present)
#include "v3d_context.h"
#include "broadcom/cle/v3dx_pack.h"
-void v3dX(bcl_epilogue)(struct vc5_context *vc5, struct vc5_job *job)
+void v3dX(bcl_epilogue)(struct v3d_context *v3d, struct v3d_job *job)
{
- vc5_cl_ensure_space_with_branch(&job->bcl,
+ v3d_cl_ensure_space_with_branch(&job->bcl,
cl_packet_length(OCCLUSION_QUERY_COUNTER) +
#if V3D_VERSION >= 41
cl_packet_length(TRANSFORM_FEEDBACK_SPECS) +
* dummy store.
*/
static void
-flush_last_load(struct vc5_cl *cl)
+flush_last_load(struct v3d_cl *cl)
{
if (V3D_VERSION >= 40)
return;
}
static void
-load_general(struct vc5_cl *cl, struct pipe_surface *psurf, int buffer,
+load_general(struct v3d_cl *cl, struct pipe_surface *psurf, int buffer,
uint32_t pipe_bit, uint32_t *loads_pending)
{
- struct vc5_surface *surf = vc5_surface(psurf);
+ struct v3d_surface *surf = v3d_surface(psurf);
bool separate_stencil = surf->separate_stencil && buffer == STENCIL;
if (separate_stencil) {
psurf = surf->separate_stencil;
- surf = vc5_surface(psurf);
+ surf = v3d_surface(psurf);
}
- struct vc5_resource *rsc = vc5_resource(psurf->texture);
+ struct v3d_resource *rsc = v3d_resource(psurf->texture);
cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
load.buffer_to_load = buffer;
load.height_in_ub_or_stride =
surf->padded_height_of_output_image_in_uif_blocks;
} else if (surf->tiling == VC5_TILING_RASTER) {
- struct vc5_resource_slice *slice =
+ struct v3d_resource_slice *slice =
&rsc->slices[psurf->u.tex.level];
load.height_in_ub_or_stride = slice->stride;
}
}
static void
-store_general(struct vc5_job *job,
- struct vc5_cl *cl, struct pipe_surface *psurf, int buffer,
+store_general(struct v3d_job *job,
+ struct v3d_cl *cl, struct pipe_surface *psurf, int buffer,
int pipe_bit, uint32_t *stores_pending, bool general_color_clear)
{
- struct vc5_surface *surf = vc5_surface(psurf);
+ struct v3d_surface *surf = v3d_surface(psurf);
bool separate_stencil = surf->separate_stencil && buffer == STENCIL;
if (separate_stencil) {
psurf = surf->separate_stencil;
- surf = vc5_surface(psurf);
+ surf = v3d_surface(psurf);
}
*stores_pending &= ~pipe_bit;
bool last_store = !(*stores_pending);
- struct vc5_resource *rsc = vc5_resource(psurf->texture);
+ struct v3d_resource *rsc = v3d_resource(psurf->texture);
rsc->writes++;
store.height_in_ub_or_stride =
surf->padded_height_of_output_image_in_uif_blocks;
} else if (surf->tiling == VC5_TILING_RASTER) {
- struct vc5_resource_slice *slice =
+ struct v3d_resource_slice *slice =
&rsc->slices[psurf->u.tex.level];
store.height_in_ub_or_stride = slice->stride;
}
}
static void
-vc5_rcl_emit_loads(struct vc5_job *job, struct vc5_cl *cl)
+v3d_rcl_emit_loads(struct v3d_job *job, struct v3d_cl *cl)
{
uint32_t loads_pending = job->resolve & ~job->cleared;
if ((loads_pending & PIPE_CLEAR_DEPTHSTENCIL) &&
(V3D_VERSION >= 40 ||
(job->zsbuf && job->zsbuf->texture->nr_samples > 1))) {
- struct vc5_resource *rsc = vc5_resource(job->zsbuf->texture);
+ struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
if (rsc->separate_stencil &&
(loads_pending & PIPE_CLEAR_STENCIL)) {
}
static void
-vc5_rcl_emit_stores(struct vc5_job *job, struct vc5_cl *cl)
+v3d_rcl_emit_stores(struct v3d_job *job, struct v3d_cl *cl)
{
MAYBE_UNUSED bool needs_color_clear = job->cleared & PIPE_CLEAR_COLOR_BUFFERS;
MAYBE_UNUSED bool needs_z_clear = job->cleared & PIPE_CLEAR_DEPTH;
if (job->resolve & PIPE_CLEAR_DEPTHSTENCIL && job->zsbuf &&
!(V3D_VERSION < 40 && job->zsbuf->texture->nr_samples <= 1)) {
- struct vc5_resource *rsc = vc5_resource(job->zsbuf->texture);
+ struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
if (rsc->separate_stencil) {
if (job->resolve & PIPE_CLEAR_DEPTH) {
store_general(job, cl, job->zsbuf, Z,
}
static void
-vc5_rcl_emit_generic_per_tile_list(struct vc5_job *job, int last_cbuf)
+v3d_rcl_emit_generic_per_tile_list(struct v3d_job *job, int last_cbuf)
{
/* Emit the generic list in our indirect state -- the rcl will just
* have pointers into it.
*/
- struct vc5_cl *cl = &job->indirect;
- vc5_cl_ensure_space(cl, 200, 1);
- struct vc5_cl_reloc tile_list_start = cl_get_address(cl);
+ struct v3d_cl *cl = &job->indirect;
+ v3d_cl_ensure_space(cl, 200, 1);
+ struct v3d_cl_reloc tile_list_start = cl_get_address(cl);
if (V3D_VERSION >= 40) {
/* V3D 4.x only requires a single tile coordinates, and
cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
}
- vc5_rcl_emit_loads(job, cl);
+ v3d_rcl_emit_loads(job, cl);
if (V3D_VERSION < 40) {
/* Tile Coordinates triggers the last reload and sets where
cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
- vc5_rcl_emit_stores(job, cl);
+ v3d_rcl_emit_stores(job, cl);
#if V3D_VERSION >= 40
cl_emit(cl, END_OF_TILE_MARKER, end);
#if V3D_VERSION >= 40
static void
-v3d_setup_render_target(struct vc5_job *job, int cbuf,
+v3d_setup_render_target(struct v3d_job *job, int cbuf,
uint32_t *rt_bpp, uint32_t *rt_type, uint32_t *rt_clamp)
{
if (!job->cbufs[cbuf])
return;
- struct vc5_surface *surf = vc5_surface(job->cbufs[cbuf]);
+ struct v3d_surface *surf = v3d_surface(job->cbufs[cbuf]);
*rt_bpp = surf->internal_bpp;
*rt_type = surf->internal_type;
*rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
#else /* V3D_VERSION < 40 */
static void
-v3d_emit_z_stencil_config(struct vc5_job *job, struct vc5_surface *surf,
- struct vc5_resource *rsc, bool is_separate_stencil)
+v3d_emit_z_stencil_config(struct v3d_job *job, struct v3d_surface *surf,
+ struct v3d_resource *rsc, bool is_separate_stencil)
{
cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_Z_STENCIL_CONFIG, zs) {
zs.address = cl_address(rsc->bo, surf->offset);
#define div_round_up(a, b) (((a) + (b) - 1) / b)
void
-v3dX(emit_rcl)(struct vc5_job *job)
+v3dX(emit_rcl)(struct v3d_job *job)
{
/* The RCL list should be empty. */
assert(!job->rcl.bo);
- vc5_cl_ensure_space_with_branch(&job->rcl, 200 + 256 *
+ v3d_cl_ensure_space_with_branch(&job->rcl, 200 + 256 *
cl_packet_length(SUPERTILE_COORDINATES));
job->submit.rcl_start = job->rcl.bo->offset;
- vc5_job_add_bo(job, job->rcl.bo);
+ v3d_job_add_bo(job, job->rcl.bo);
int nr_cbufs = 0;
for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
config.enable_stencil_store = job->resolve & PIPE_CLEAR_STENCIL;
#else /* V3D_VERSION >= 40 */
if (job->zsbuf) {
- struct vc5_surface *surf = vc5_surface(job->zsbuf);
+ struct v3d_surface *surf = v3d_surface(job->zsbuf);
config.internal_depth_type = surf->internal_type;
}
#endif /* V3D_VERSION >= 40 */
struct pipe_surface *psurf = job->cbufs[i];
if (!psurf)
continue;
- struct vc5_surface *surf = vc5_surface(psurf);
- struct vc5_resource *rsc = vc5_resource(psurf->texture);
+ struct v3d_surface *surf = v3d_surface(psurf);
+ struct v3d_resource *rsc = v3d_resource(psurf->texture);
MAYBE_UNUSED uint32_t config_pad = 0;
uint32_t clear_pad = 0;
/* XXX: Set the pad for raster. */
if (surf->tiling == VC5_TILING_UIF_NO_XOR ||
surf->tiling == VC5_TILING_UIF_XOR) {
- int uif_block_height = vc5_utile_height(rsc->cpp) * 2;
+ int uif_block_height = v3d_utile_height(rsc->cpp) * 2;
uint32_t implicit_padded_height = (align(job->draw_height, uif_block_height) /
uif_block_height);
if (surf->padded_height_of_output_image_in_uif_blocks -
/* TODO: Don't bother emitting if we don't load/clear Z/S. */
if (job->zsbuf) {
struct pipe_surface *psurf = job->zsbuf;
- struct vc5_surface *surf = vc5_surface(psurf);
- struct vc5_resource *rsc = vc5_resource(psurf->texture);
+ struct v3d_surface *surf = v3d_surface(psurf);
+ struct v3d_resource *rsc = v3d_resource(psurf->texture);
v3d_emit_z_stencil_config(job, surf, rsc, false);
*/
if (surf->separate_stencil) {
v3d_emit_z_stencil_config(job,
- vc5_surface(surf->separate_stencil),
+ v3d_surface(surf->separate_stencil),
rsc->separate_stencil, true);
}
}
cl_emit(&job->rcl, FLUSH_VCD_CACHE, flush);
- vc5_rcl_emit_generic_per_tile_list(job, nr_cbufs - 1);
+ v3d_rcl_emit_generic_per_tile_list(job, nr_cbufs - 1);
cl_emit(&job->rcl, WAIT_ON_SEMAPHORE, sem);
*/
/**
- * @file vc5_simulator_hw.c
+ * @file v3d_simulator_hw.c
*
* Implements the actual HW interaction betweeh the GL driver's VC5 simulator and the simulator.
*
#define V3D_READ(reg) v3d_hw_read_reg(v3d, reg)
static void
-vc5_flush_l3(struct v3d_hw *v3d)
+v3d_flush_l3(struct v3d_hw *v3d)
{
if (!v3d_hw_has_gca(v3d))
return;
/* Invalidates the L2 cache. This is a read-only cache. */
static void
-vc5_flush_l2(struct v3d_hw *v3d)
+v3d_flush_l2(struct v3d_hw *v3d)
{
V3D_WRITE(V3D_CTL_0_L2CACTL,
V3D_CTL_0_L2CACTL_L2CCLR_SET |
/* Invalidates texture L2 cachelines */
static void
-vc5_flush_l2t(struct v3d_hw *v3d)
+v3d_flush_l2t(struct v3d_hw *v3d)
{
V3D_WRITE(V3D_CTL_0_L2TFLSTA, 0);
V3D_WRITE(V3D_CTL_0_L2TFLEND, ~0);
/* Invalidates the slice caches. These are read-only caches. */
static void
-vc5_flush_slices(struct v3d_hw *v3d)
+v3d_flush_slices(struct v3d_hw *v3d)
{
V3D_WRITE(V3D_CTL_0_SLCACTL, ~0);
}
static void
-vc5_flush_caches(struct v3d_hw *v3d)
+v3d_flush_caches(struct v3d_hw *v3d)
{
- vc5_flush_l3(v3d);
- vc5_flush_l2(v3d);
- vc5_flush_l2t(v3d);
- vc5_flush_slices(v3d);
+ v3d_flush_l3(v3d);
+ v3d_flush_l2(v3d);
+ v3d_flush_l2t(v3d);
+ v3d_flush_slices(v3d);
}
int
;
}
- vc5_flush_caches(v3d);
+ v3d_flush_caches(v3d);
if (submit->qma) {
V3D_WRITE(V3D_CLE_0_CT0QMA, submit->qma);
#include "broadcom/cle/v3dx_pack.h"
static void *
-vc5_generic_cso_state_create(const void *src, uint32_t size)
+v3d_generic_cso_state_create(const void *src, uint32_t size)
{
void *dst = calloc(1, size);
if (!dst)
}
static void
-vc5_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso)
+v3d_generic_cso_state_delete(struct pipe_context *pctx, void *hwcso)
{
free(hwcso);
}
static void
-vc5_set_blend_color(struct pipe_context *pctx,
+v3d_set_blend_color(struct pipe_context *pctx,
const struct pipe_blend_color *blend_color)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->blend_color.f = *blend_color;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->blend_color.f = *blend_color;
for (int i = 0; i < 4; i++) {
- vc5->blend_color.hf[i] =
+ v3d->blend_color.hf[i] =
util_float_to_half(blend_color->color[i]);
}
- vc5->dirty |= VC5_DIRTY_BLEND_COLOR;
+ v3d->dirty |= VC5_DIRTY_BLEND_COLOR;
}
static void
-vc5_set_stencil_ref(struct pipe_context *pctx,
+v3d_set_stencil_ref(struct pipe_context *pctx,
const struct pipe_stencil_ref *stencil_ref)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->stencil_ref = *stencil_ref;
- vc5->dirty |= VC5_DIRTY_STENCIL_REF;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->stencil_ref = *stencil_ref;
+ v3d->dirty |= VC5_DIRTY_STENCIL_REF;
}
static void
-vc5_set_clip_state(struct pipe_context *pctx,
+v3d_set_clip_state(struct pipe_context *pctx,
const struct pipe_clip_state *clip)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->clip = *clip;
- vc5->dirty |= VC5_DIRTY_CLIP;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->clip = *clip;
+ v3d->dirty |= VC5_DIRTY_CLIP;
}
static void
-vc5_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
+v3d_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->sample_mask = sample_mask & ((1 << VC5_MAX_SAMPLES) - 1);
- vc5->dirty |= VC5_DIRTY_SAMPLE_MASK;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->sample_mask = sample_mask & ((1 << VC5_MAX_SAMPLES) - 1);
+ v3d->dirty |= VC5_DIRTY_SAMPLE_MASK;
}
static uint16_t
}
static void *
-vc5_create_rasterizer_state(struct pipe_context *pctx,
+v3d_create_rasterizer_state(struct pipe_context *pctx,
const struct pipe_rasterizer_state *cso)
{
- struct vc5_rasterizer_state *so;
+ struct v3d_rasterizer_state *so;
- so = CALLOC_STRUCT(vc5_rasterizer_state);
+ so = CALLOC_STRUCT(v3d_rasterizer_state);
if (!so)
return NULL;
/* Blend state is baked into shaders. */
static void *
-vc5_create_blend_state(struct pipe_context *pctx,
+v3d_create_blend_state(struct pipe_context *pctx,
const struct pipe_blend_state *cso)
{
- return vc5_generic_cso_state_create(cso, sizeof(*cso));
+ return v3d_generic_cso_state_create(cso, sizeof(*cso));
}
static uint32_t
}
static void *
-vc5_create_depth_stencil_alpha_state(struct pipe_context *pctx,
+v3d_create_depth_stencil_alpha_state(struct pipe_context *pctx,
const struct pipe_depth_stencil_alpha_state *cso)
{
- struct vc5_depth_stencil_alpha_state *so;
+ struct v3d_depth_stencil_alpha_state *so;
- so = CALLOC_STRUCT(vc5_depth_stencil_alpha_state);
+ so = CALLOC_STRUCT(v3d_depth_stencil_alpha_state);
if (!so)
return NULL;
}
static void
-vc5_set_polygon_stipple(struct pipe_context *pctx,
+v3d_set_polygon_stipple(struct pipe_context *pctx,
const struct pipe_poly_stipple *stipple)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->stipple = *stipple;
- vc5->dirty |= VC5_DIRTY_STIPPLE;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->stipple = *stipple;
+ v3d->dirty |= VC5_DIRTY_STIPPLE;
}
static void
-vc5_set_scissor_states(struct pipe_context *pctx,
+v3d_set_scissor_states(struct pipe_context *pctx,
unsigned start_slot,
unsigned num_scissors,
const struct pipe_scissor_state *scissor)
{
- struct vc5_context *vc5 = vc5_context(pctx);
+ struct v3d_context *v3d = v3d_context(pctx);
- vc5->scissor = *scissor;
- vc5->dirty |= VC5_DIRTY_SCISSOR;
+ v3d->scissor = *scissor;
+ v3d->dirty |= VC5_DIRTY_SCISSOR;
}
static void
-vc5_set_viewport_states(struct pipe_context *pctx,
+v3d_set_viewport_states(struct pipe_context *pctx,
unsigned start_slot,
unsigned num_viewports,
const struct pipe_viewport_state *viewport)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->viewport = *viewport;
- vc5->dirty |= VC5_DIRTY_VIEWPORT;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->viewport = *viewport;
+ v3d->dirty |= VC5_DIRTY_VIEWPORT;
}
static void
-vc5_set_vertex_buffers(struct pipe_context *pctx,
+v3d_set_vertex_buffers(struct pipe_context *pctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *vb)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_vertexbuf_stateobj *so = &vc5->vertexbuf;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_vertexbuf_stateobj *so = &v3d->vertexbuf;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
start_slot, count);
so->count = util_last_bit(so->enabled_mask);
- vc5->dirty |= VC5_DIRTY_VTXBUF;
+ v3d->dirty |= VC5_DIRTY_VTXBUF;
}
static void
-vc5_blend_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->blend = hwcso;
- vc5->dirty |= VC5_DIRTY_BLEND;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->blend = hwcso;
+ v3d->dirty |= VC5_DIRTY_BLEND;
}
static void
-vc5_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->rasterizer = hwcso;
- vc5->dirty |= VC5_DIRTY_RASTERIZER;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->rasterizer = hwcso;
+ v3d->dirty |= VC5_DIRTY_RASTERIZER;
}
static void
-vc5_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->zsa = hwcso;
- vc5->dirty |= VC5_DIRTY_ZSA;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->zsa = hwcso;
+ v3d->dirty |= VC5_DIRTY_ZSA;
}
static void *
-vc5_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
+v3d_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_vertex_stateobj *so = CALLOC_STRUCT(vc5_vertex_stateobj);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_vertex_stateobj *so = CALLOC_STRUCT(v3d_vertex_stateobj);
if (!so)
return NULL;
/* Set up the default attribute values in case any of the vertex
* elements use them.
*/
- so->default_attribute_values = vc5_bo_alloc(vc5->screen,
+ so->default_attribute_values = v3d_bo_alloc(v3d->screen,
VC5_MAX_ATTRIBUTES *
4 * sizeof(float),
"default attributes");
- uint32_t *attrs = vc5_bo_map(so->default_attribute_values);
+ uint32_t *attrs = v3d_bo_map(so->default_attribute_values);
for (int i = 0; i < VC5_MAX_ATTRIBUTES; i++) {
attrs[i * 4 + 0] = 0;
attrs[i * 4 + 1] = 0;
}
static void
-vc5_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
+v3d_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- vc5->vtx = hwcso;
- vc5->dirty |= VC5_DIRTY_VTXSTATE;
+ struct v3d_context *v3d = v3d_context(pctx);
+ v3d->vtx = hwcso;
+ v3d->dirty |= VC5_DIRTY_VTXSTATE;
}
static void
-vc5_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
+v3d_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
const struct pipe_constant_buffer *cb)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_constbuf_stateobj *so = &vc5->constbuf[shader];
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_constbuf_stateobj *so = &v3d->constbuf[shader];
util_copy_constant_buffer(&so->cb[index], cb);
so->enabled_mask |= 1 << index;
so->dirty_mask |= 1 << index;
- vc5->dirty |= VC5_DIRTY_CONSTBUF;
+ v3d->dirty |= VC5_DIRTY_CONSTBUF;
}
static void
-vc5_set_framebuffer_state(struct pipe_context *pctx,
+v3d_set_framebuffer_state(struct pipe_context *pctx,
const struct pipe_framebuffer_state *framebuffer)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct pipe_framebuffer_state *cso = &vc5->framebuffer;
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct pipe_framebuffer_state *cso = &v3d->framebuffer;
- vc5->job = NULL;
+ v3d->job = NULL;
util_copy_framebuffer_state(cso, framebuffer);
- vc5->swap_color_rb = 0;
- vc5->blend_dst_alpha_one = 0;
- for (int i = 0; i < vc5->framebuffer.nr_cbufs; i++) {
- struct pipe_surface *cbuf = vc5->framebuffer.cbufs[i];
+ v3d->swap_color_rb = 0;
+ v3d->blend_dst_alpha_one = 0;
+ for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) {
+ struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
if (!cbuf)
continue;
*/
if (desc->swizzle[0] == PIPE_SWIZZLE_Z &&
cbuf->format != PIPE_FORMAT_B5G6R5_UNORM) {
- vc5->swap_color_rb |= 1 << i;
+ v3d->swap_color_rb |= 1 << i;
}
if (desc->swizzle[3] == PIPE_SWIZZLE_1)
- vc5->blend_dst_alpha_one |= 1 << i;
+ v3d->blend_dst_alpha_one |= 1 << i;
}
- vc5->dirty |= VC5_DIRTY_FRAMEBUFFER;
+ v3d->dirty |= VC5_DIRTY_FRAMEBUFFER;
}
-static struct vc5_texture_stateobj *
-vc5_get_stage_tex(struct vc5_context *vc5, enum pipe_shader_type shader)
+static struct v3d_texture_stateobj *
+v3d_get_stage_tex(struct v3d_context *v3d, enum pipe_shader_type shader)
{
switch (shader) {
case PIPE_SHADER_FRAGMENT:
- vc5->dirty |= VC5_DIRTY_FRAGTEX;
- return &vc5->fragtex;
+ v3d->dirty |= VC5_DIRTY_FRAGTEX;
+ return &v3d->fragtex;
break;
case PIPE_SHADER_VERTEX:
- vc5->dirty |= VC5_DIRTY_VERTTEX;
- return &vc5->verttex;
+ v3d->dirty |= VC5_DIRTY_VERTTEX;
+ return &v3d->verttex;
break;
default:
fprintf(stderr, "Unknown shader target %d\n", shader);
static void *
-vc5_create_sampler_state(struct pipe_context *pctx,
+v3d_create_sampler_state(struct pipe_context *pctx,
const struct pipe_sampler_state *cso)
{
- MAYBE_UNUSED struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_sampler_state *so = CALLOC_STRUCT(vc5_sampler_state);
+ MAYBE_UNUSED struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_sampler_state *so = CALLOC_STRUCT(v3d_sampler_state);
if (!so)
return NULL;
cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
#if V3D_VERSION >= 40
- so->bo = vc5_bo_alloc(vc5->screen, cl_packet_length(SAMPLER_STATE),
+ so->bo = v3d_bo_alloc(v3d->screen, cl_packet_length(SAMPLER_STATE),
"sampler");
- void *map = vc5_bo_map(so->bo);
+ void *map = v3d_bo_map(so->bo);
v3dx_pack(map, SAMPLER_STATE, sampler) {
sampler.wrap_i_border = false;
}
static void
-vc5_sampler_states_bind(struct pipe_context *pctx,
+v3d_sampler_states_bind(struct pipe_context *pctx,
enum pipe_shader_type shader, unsigned start,
unsigned nr, void **hwcso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_texture_stateobj *stage_tex = v3d_get_stage_tex(v3d, shader);
assert(start == 0);
unsigned i;
}
static void
-vc5_sampler_state_delete(struct pipe_context *pctx,
+v3d_sampler_state_delete(struct pipe_context *pctx,
void *hwcso)
{
struct pipe_sampler_state *psampler = hwcso;
- struct vc5_sampler_state *sampler = vc5_sampler_state(psampler);
+ struct v3d_sampler_state *sampler = v3d_sampler_state(psampler);
- vc5_bo_unreference(&sampler->bo);
+ v3d_bo_unreference(&sampler->bo);
free(psampler);
}
#endif
static struct pipe_sampler_view *
-vc5_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
+v3d_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
const struct pipe_sampler_view *cso)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_screen *screen = vc5->screen;
- struct vc5_sampler_view *so = CALLOC_STRUCT(vc5_sampler_view);
- struct vc5_resource *rsc = vc5_resource(prsc);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_screen *screen = v3d->screen;
+ struct v3d_sampler_view *so = CALLOC_STRUCT(v3d_sampler_view);
+ struct v3d_resource *rsc = v3d_resource(prsc);
if (!so)
return NULL;
cso->swizzle_a
};
const uint8_t *fmt_swizzle =
- vc5_get_format_swizzle(&screen->devinfo, so->base.format);
+ v3d_get_format_swizzle(&screen->devinfo, so->base.format);
util_format_compose_swizzles(fmt_swizzle, view_swizzle, so->swizzle);
so->base.texture = prsc;
int msaa_scale = prsc->nr_samples > 1 ? 2 : 1;
#if V3D_VERSION >= 40
- so->bo = vc5_bo_alloc(vc5->screen, cl_packet_length(SAMPLER_STATE),
+ so->bo = v3d_bo_alloc(v3d->screen, cl_packet_length(SAMPLER_STATE),
"sampler");
- void *map = vc5_bo_map(so->bo);
+ void *map = v3d_bo_map(so->bo);
v3dx_pack(map, TEXTURE_SHADER_STATE, tex) {
#else /* V3D_VERSION < 40 */
* to catch failures.
*
* We explicitly allow remapping S8Z24 to RGBA8888 for
- * vc5_blit.c's stencil blits.
+ * v3d_blit.c's stencil blits.
*/
assert((util_format_linear(cso->format) ==
util_format_linear(prsc->format)) ||
(prsc->format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
cso->format == PIPE_FORMAT_R8G8B8A8_UNORM));
uint32_t output_image_format =
- vc5_get_rt_format(&screen->devinfo, cso->format);
+ v3d_get_rt_format(&screen->devinfo, cso->format);
uint32_t internal_type;
uint32_t internal_bpp;
- vc5_get_internal_type_bpp_for_output_format(&screen->devinfo,
+ v3d_get_internal_type_bpp_for_output_format(&screen->devinfo,
output_image_format,
&internal_type,
&internal_bpp);
*/
tex.srgb = false;
} else {
- tex.texture_type = vc5_get_tex_format(&screen->devinfo,
+ tex.texture_type = v3d_get_tex_format(&screen->devinfo,
cso->format);
}
}
static void
-vc5_sampler_view_destroy(struct pipe_context *pctx,
+v3d_sampler_view_destroy(struct pipe_context *pctx,
struct pipe_sampler_view *psview)
{
- struct vc5_sampler_view *sview = vc5_sampler_view(psview);
+ struct v3d_sampler_view *sview = v3d_sampler_view(psview);
- vc5_bo_unreference(&sview->bo);
+ v3d_bo_unreference(&sview->bo);
pipe_resource_reference(&psview->texture, NULL);
free(psview);
}
static void
-vc5_set_sampler_views(struct pipe_context *pctx,
+v3d_set_sampler_views(struct pipe_context *pctx,
enum pipe_shader_type shader,
unsigned start, unsigned nr,
struct pipe_sampler_view **views)
{
- struct vc5_context *vc5 = vc5_context(pctx);
- struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader);
+ struct v3d_context *v3d = v3d_context(pctx);
+ struct v3d_texture_stateobj *stage_tex = v3d_get_stage_tex(v3d, shader);
unsigned i;
unsigned new_nr = 0;
}
static struct pipe_stream_output_target *
-vc5_create_stream_output_target(struct pipe_context *pctx,
+v3d_create_stream_output_target(struct pipe_context *pctx,
struct pipe_resource *prsc,
unsigned buffer_offset,
unsigned buffer_size)
}
static void
-vc5_stream_output_target_destroy(struct pipe_context *pctx,
+v3d_stream_output_target_destroy(struct pipe_context *pctx,
struct pipe_stream_output_target *target)
{
pipe_resource_reference(&target->buffer, NULL);
}
static void
-vc5_set_stream_output_targets(struct pipe_context *pctx,
+v3d_set_stream_output_targets(struct pipe_context *pctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
- struct vc5_context *ctx = vc5_context(pctx);
- struct vc5_streamout_stateobj *so = &ctx->streamout;
+ struct v3d_context *ctx = v3d_context(pctx);
+ struct v3d_streamout_stateobj *so = &ctx->streamout;
unsigned i;
assert(num_targets <= ARRAY_SIZE(so->targets));
void
v3dX(state_init)(struct pipe_context *pctx)
{
- pctx->set_blend_color = vc5_set_blend_color;
- pctx->set_stencil_ref = vc5_set_stencil_ref;
- pctx->set_clip_state = vc5_set_clip_state;
- pctx->set_sample_mask = vc5_set_sample_mask;
- pctx->set_constant_buffer = vc5_set_constant_buffer;
- pctx->set_framebuffer_state = vc5_set_framebuffer_state;
- pctx->set_polygon_stipple = vc5_set_polygon_stipple;
- pctx->set_scissor_states = vc5_set_scissor_states;
- pctx->set_viewport_states = vc5_set_viewport_states;
-
- pctx->set_vertex_buffers = vc5_set_vertex_buffers;
-
- pctx->create_blend_state = vc5_create_blend_state;
- pctx->bind_blend_state = vc5_blend_state_bind;
- pctx->delete_blend_state = vc5_generic_cso_state_delete;
-
- pctx->create_rasterizer_state = vc5_create_rasterizer_state;
- pctx->bind_rasterizer_state = vc5_rasterizer_state_bind;
- pctx->delete_rasterizer_state = vc5_generic_cso_state_delete;
-
- pctx->create_depth_stencil_alpha_state = vc5_create_depth_stencil_alpha_state;
- pctx->bind_depth_stencil_alpha_state = vc5_zsa_state_bind;
- pctx->delete_depth_stencil_alpha_state = vc5_generic_cso_state_delete;
-
- pctx->create_vertex_elements_state = vc5_vertex_state_create;
- pctx->delete_vertex_elements_state = vc5_generic_cso_state_delete;
- pctx->bind_vertex_elements_state = vc5_vertex_state_bind;
-
- pctx->create_sampler_state = vc5_create_sampler_state;
- pctx->delete_sampler_state = vc5_sampler_state_delete;
- pctx->bind_sampler_states = vc5_sampler_states_bind;
-
- pctx->create_sampler_view = vc5_create_sampler_view;
- pctx->sampler_view_destroy = vc5_sampler_view_destroy;
- pctx->set_sampler_views = vc5_set_sampler_views;
-
- pctx->create_stream_output_target = vc5_create_stream_output_target;
- pctx->stream_output_target_destroy = vc5_stream_output_target_destroy;
- pctx->set_stream_output_targets = vc5_set_stream_output_targets;
+ pctx->set_blend_color = v3d_set_blend_color;
+ pctx->set_stencil_ref = v3d_set_stencil_ref;
+ pctx->set_clip_state = v3d_set_clip_state;
+ pctx->set_sample_mask = v3d_set_sample_mask;
+ pctx->set_constant_buffer = v3d_set_constant_buffer;
+ pctx->set_framebuffer_state = v3d_set_framebuffer_state;
+ pctx->set_polygon_stipple = v3d_set_polygon_stipple;
+ pctx->set_scissor_states = v3d_set_scissor_states;
+ pctx->set_viewport_states = v3d_set_viewport_states;
+
+ pctx->set_vertex_buffers = v3d_set_vertex_buffers;
+
+ pctx->create_blend_state = v3d_create_blend_state;
+ pctx->bind_blend_state = v3d_blend_state_bind;
+ pctx->delete_blend_state = v3d_generic_cso_state_delete;
+
+ pctx->create_rasterizer_state = v3d_create_rasterizer_state;
+ pctx->bind_rasterizer_state = v3d_rasterizer_state_bind;
+ pctx->delete_rasterizer_state = v3d_generic_cso_state_delete;
+
+ pctx->create_depth_stencil_alpha_state = v3d_create_depth_stencil_alpha_state;
+ pctx->bind_depth_stencil_alpha_state = v3d_zsa_state_bind;
+ pctx->delete_depth_stencil_alpha_state = v3d_generic_cso_state_delete;
+
+ pctx->create_vertex_elements_state = v3d_vertex_state_create;
+ pctx->delete_vertex_elements_state = v3d_generic_cso_state_delete;
+ pctx->bind_vertex_elements_state = v3d_vertex_state_bind;
+
+ pctx->create_sampler_state = v3d_create_sampler_state;
+ pctx->delete_sampler_state = v3d_sampler_state_delete;
+ pctx->bind_sampler_states = v3d_sampler_states_bind;
+
+ pctx->create_sampler_view = v3d_create_sampler_view;
+ pctx->sampler_view_destroy = v3d_sampler_view_destroy;
+ pctx->set_sampler_views = v3d_set_sampler_views;
+
+ pctx->create_stream_output_target = v3d_create_stream_output_target;
+ pctx->stream_output_target_destroy = v3d_stream_output_target_destroy;
+ pctx->set_stream_output_targets = v3d_set_stream_output_targets;
}