*/
#include "r600_pipe.h"
#include "r600_public.h"
+#include "r600_isa.h"
+#include "evergreen_compute.h"
+#include "r600d.h"
#include <errno.h>
#include "pipe/p_shader_tokens.h"
#include "util/u_blitter.h"
+#include "util/u_debug.h"
#include "util/u_format_s3tc.h"
+#include "util/u_memory.h"
#include "util/u_simple_shaders.h"
#include "util/u_upload_mgr.h"
+#include "util/u_math.h"
#include "vl/vl_decoder.h"
#include "vl/vl_video_buffer.h"
#include "os/os_time.h"
+static const struct debug_named_value debug_options[] = {
+ /* logging */
+ { "texdepth", DBG_TEX_DEPTH, "Print texture depth info" },
+ { "compute", DBG_COMPUTE, "Print compute info" },
+
+ /* shaders */
+ { "fs", DBG_FS, "Print fetch shaders" },
+ { "vs", DBG_VS, "Print vertex shaders" },
+ { "gs", DBG_GS, "Print geometry shaders" },
+ { "ps", DBG_PS, "Print pixel shaders" },
+ { "cs", DBG_CS, "Print compute shaders" },
+
+ /* features */
+ { "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
+#if defined(R600_USE_LLVM)
+ { "nollvm", DBG_NO_LLVM, "Disable the LLVM shader compiler" },
+#endif
+
+ DEBUG_NAMED_VALUE_END /* must be last */
+};
+
/*
* pipe_context
*/
R600_ERR("r600: failed to create bo for fence objects\n");
goto out;
}
- rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
- rctx->cs,
- PIPE_TRANSFER_READ_WRITE);
+ rscreen->fences.data = r600_buffer_mmap_sync_with_rings(rctx, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE);
}
if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 1);
/* Add the fence as a dummy relocation. */
- r600_context_bo_reloc(rctx, fence->sleep_bo, RADEON_USAGE_READWRITE);
+ r600_context_bo_reloc(rctx, &rctx->rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
out:
pipe_mutex_unlock(rscreen->fences.mutex);
return fence;
}
-
-void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
- unsigned flags)
+static void r600_flush(struct pipe_context *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_fence **rfence = (struct r600_fence**)fence;
struct pipe_query *render_cond = NULL;
unsigned render_cond_mode = 0;
- if (rfence)
- *rfence = r600_create_fence(rctx);
-
+ rctx->rings.gfx.flushing = true;
/* Disable render condition. */
if (rctx->current_render_cond) {
render_cond = rctx->current_render_cond;
}
r600_context_flush(rctx, flags);
+ rctx->rings.gfx.flushing = false;
+ r600_begin_new_cs(rctx);
/* Re-enable render condition. */
if (render_cond) {
}
static void r600_flush_from_st(struct pipe_context *ctx,
- struct pipe_fence_handle **fence)
+ struct pipe_fence_handle **fence,
+ enum pipe_flush_flags flags)
{
- r600_flush(ctx, fence, 0);
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_fence **rfence = (struct r600_fence**)fence;
+ unsigned fflags;
+
+ fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
+ if (rfence) {
+ *rfence = r600_create_fence(rctx);
+ }
+ /* flush gfx & dma ring, order does not matter as only one can be live */
+ if (rctx->rings.dma.cs) {
+ rctx->rings.dma.flush(rctx, fflags);
+ }
+ rctx->rings.gfx.flush(rctx, fflags);
+}
+
+static void r600_flush_gfx_ring(void *ctx, unsigned flags)
+{
+ r600_flush((struct pipe_context*)ctx, flags);
+}
+
+static void r600_flush_dma_ring(void *ctx, unsigned flags)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
+ unsigned padding_dw, i;
+
+ if (!cs->cdw) {
+ return;
+ }
+
+ /* Pad the DMA CS to a multiple of 8 dwords. */
+ padding_dw = 8 - cs->cdw % 8;
+ if (padding_dw < 8) {
+ for (i = 0; i < padding_dw; i++) {
+ cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ }
+ }
+
+ rctx->rings.dma.flushing = true;
+ rctx->ws->cs_flush(cs, flags);
+ rctx->rings.dma.flushing = false;
+}
+
+boolean r600_rings_is_buffer_referenced(struct r600_context *ctx,
+ struct radeon_winsys_cs_handle *buf,
+ enum radeon_bo_usage usage)
+{
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
+ return TRUE;
+ }
+ if (ctx->rings.dma.cs) {
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx,
+ struct r600_resource *resource,
+ unsigned usage)
+{
+ enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
+ unsigned flags = 0;
+ bool sync_flush = TRUE;
+
+ if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ }
+
+ if (!(usage & PIPE_TRANSFER_WRITE)) {
+ /* have to wait for pending read */
+ rusage = RADEON_USAGE_WRITE;
+ }
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ flags |= RADEON_FLUSH_ASYNC;
+ }
+
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, resource->cs_buf, rusage) && ctx->rings.gfx.cs->cdw) {
+ ctx->rings.gfx.flush(ctx, flags);
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ return NULL;
+ }
+ }
+ if (ctx->rings.dma.cs) {
+ if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, resource->cs_buf, rusage) && ctx->rings.dma.cs->cdw) {
+ ctx->rings.dma.flush(ctx, flags);
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ return NULL;
+ }
+ }
+ }
+
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ return NULL;
+ }
+ }
+ if (sync_flush) {
+ /* Try to avoid busy-waiting in radeon_bo_wait. */
+ ctx->ws->cs_sync_flush(ctx->rings.gfx.cs);
+ if (ctx->rings.dma.cs) {
+ ctx->ws->cs_sync_flush(ctx->rings.dma.cs);
+ }
+ }
+ ctx->ws->buffer_wait(resource->buf, rusage);
+
+ /* at this point everything is synchronized */
+ return ctx->ws->buffer_map(resource->cs_buf, NULL, usage | PIPE_TRANSFER_UNSYNCHRONIZED);
}
static void r600_flush_from_winsys(void *ctx, unsigned flags)
{
- r600_flush((struct pipe_context*)ctx, NULL, flags);
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ rctx->rings.gfx.flush(rctx, flags);
+}
+
+static void r600_flush_dma_from_winsys(void *ctx, unsigned flags)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+
+ rctx->rings.dma.flush(rctx, flags);
}
static void r600_destroy_context(struct pipe_context *context)
{
struct r600_context *rctx = (struct r600_context *)context;
+ r600_isa_destroy(rctx->isa);
+
+ pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
+ pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
+
if (rctx->dummy_pixel_shader) {
rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader);
}
if (rctx->custom_blend_resolve) {
rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_resolve);
}
- util_unreference_framebuffer_state(&rctx->framebuffer);
+ if (rctx->custom_blend_decompress) {
+ rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_decompress);
+ }
+ if (rctx->custom_blend_fmask_decompress) {
+ rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_fmask_decompress);
+ }
+ util_unreference_framebuffer_state(&rctx->framebuffer.state);
r600_context_fini(rctx);
if (rctx->blitter) {
util_blitter_destroy(rctx->blitter);
}
- for (int i = 0; i < R600_PIPE_NSTATES; i++) {
- free(rctx->states[i]);
- }
-
if (rctx->uploader) {
u_upload_destroy(rctx->uploader);
}
+ if (rctx->allocator_so_filled_size) {
+ u_suballocator_destroy(rctx->allocator_so_filled_size);
+ }
+ if (rctx->allocator_fetch_shader) {
+ u_suballocator_destroy(rctx->allocator_fetch_shader);
+ }
util_slab_destroy(&rctx->pool_transfers);
r600_release_command_buffer(&rctx->start_cs_cmd);
- if (rctx->cs) {
- rctx->ws->cs_destroy(rctx->cs);
+ if (rctx->rings.gfx.cs) {
+ rctx->ws->cs_destroy(rctx->rings.gfx.cs);
+ }
+ if (rctx->rings.dma.cs) {
+ rctx->ws->cs_destroy(rctx->rings.dma.cs);
}
FREE(rctx->range);
rctx->ws = rscreen->ws;
rctx->family = rscreen->family;
rctx->chip_class = rscreen->chip_class;
+ rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12;
- LIST_INITHEAD(&rctx->dirty_states);
- LIST_INITHEAD(&rctx->active_timer_queries);
LIST_INITHEAD(&rctx->active_nontimer_queries);
LIST_INITHEAD(&rctx->dirty);
LIST_INITHEAD(&rctx->enable_list);
r600_init_query_functions(rctx);
r600_init_context_resource_functions(rctx);
r600_init_surface_functions(rctx);
- rctx->context.draw_vbo = r600_draw_vbo;
+
rctx->context.create_video_decoder = vl_create_decoder;
rctx->context.create_video_buffer = vl_video_buffer_create;
- r600_init_common_atoms(rctx);
+ r600_init_common_state_functions(rctx);
switch (rctx->chip_class) {
case R600:
if (r600_context_init(rctx))
goto fail;
rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
+ rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx)
+ : r600_create_resolve_blend(rctx);
+ rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 ||
rctx->family == CHIP_RV620 ||
rctx->family == CHIP_RS780 ||
goto fail;
rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
+ rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
+ rctx->custom_blend_fmask_decompress = evergreen_create_fmask_decompress_blend(rctx);
rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR ||
rctx->family == CHIP_PALM ||
rctx->family == CHIP_SUMO ||
goto fail;
}
- rctx->cs = rctx->ws->cs_create(rctx->ws);
- rctx->ws->cs_set_flush_callback(rctx->cs, r600_flush_from_winsys, rctx);
- r600_emit_atom(rctx, &rctx->start_cs_cmd.atom);
+ rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX);
+ rctx->rings.gfx.flush = r600_flush_gfx_ring;
+ rctx->ws->cs_set_flush_callback(rctx->rings.gfx.cs, r600_flush_from_winsys, rctx);
+ rctx->rings.gfx.flushing = false;
+
+ rctx->rings.dma.cs = NULL;
+ if (rscreen->info.r600_has_dma) {
+ rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA);
+ rctx->rings.dma.flush = r600_flush_dma_ring;
+ rctx->ws->cs_set_flush_callback(rctx->rings.dma.cs, r600_flush_dma_from_winsys, rctx);
+ rctx->rings.dma.flushing = false;
+ }
+
+ rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
+ PIPE_BIND_INDEX_BUFFER |
+ PIPE_BIND_CONSTANT_BUFFER);
+ if (!rctx->uploader)
+ goto fail;
+
+ rctx->allocator_fetch_shader = u_suballocator_create(&rctx->context, 64 * 1024, 256,
+ 0, PIPE_USAGE_STATIC, FALSE);
+ if (!rctx->allocator_fetch_shader)
+ goto fail;
+
+ rctx->allocator_so_filled_size = u_suballocator_create(&rctx->context, 4096, 4,
+ 0, PIPE_USAGE_STATIC, TRUE);
+ if (!rctx->allocator_so_filled_size)
+ goto fail;
- rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
- PIPE_BIND_INDEX_BUFFER |
- PIPE_BIND_CONSTANT_BUFFER);
- if (!rctx->uploader)
- goto fail;
+ rctx->isa = calloc(1, sizeof(struct r600_isa));
+ if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
+ goto fail;
rctx->blitter = util_blitter_create(&rctx->context);
if (rctx->blitter == NULL)
goto fail;
+ util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
+ rctx->blitter->draw_rectangle = r600_draw_rectangle;
+ r600_begin_new_cs(rctx);
r600_get_backend_mask(rctx); /* this emits commands and must be last */
- if (rctx->chip_class == R600)
- r600_set_max_scissor(rctx);
-
rctx->dummy_pixel_shader =
util_make_fragment_cloneinput_shader(&rctx->context, 0,
TGSI_SEMANTIC_GENERIC,
case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
case PIPE_CAP_BLEND_EQUATION_SEPARATE:
case PIPE_CAP_TEXTURE_SWIZZLE:
- case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
case PIPE_CAP_DEPTH_CLIP_DISABLE:
case PIPE_CAP_SHADER_STENCIL_EXPORT:
case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
case PIPE_CAP_COMPUTE:
case PIPE_CAP_START_INSTANCE:
case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
+ case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
return 1;
+ case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
+ return R600_MAP_BUFFER_ALIGNMENT;
+
case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
return 256;
case PIPE_CAP_GLSL_FEATURE_LEVEL:
- return 130;
+ return 140;
+
+ case PIPE_CAP_TEXTURE_MULTISAMPLE:
+ return rscreen->msaa_texture_support != MSAA_TEXTURE_SAMPLE_ZERO;
/* Supported except the original R600. */
case PIPE_CAP_INDEP_BLEND_ENABLE:
/* Supported on Evergreen. */
case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
+ case PIPE_CAP_CUBE_MAP_ARRAY:
return family >= CHIP_CEDAR ? 1 : 0;
/* Unsupported features. */
case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
case PIPE_CAP_VERTEX_COLOR_CLAMPED:
case PIPE_CAP_USER_VERTEX_BUFFERS:
- case PIPE_CAP_QUERY_TIMESTAMP:
+ case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
return 0;
/* Stream output. */
return rscreen->has_streamout ? 1 : 0;
case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
- return 16*4;
+ return 32*4;
/* Texturing. */
case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
return 8;
/* Timer queries, present when the clock frequency is non zero. */
- case PIPE_CAP_TIMER_QUERY:
+ case PIPE_CAP_QUERY_TIME_ELAPSED:
return rscreen->info.r600_clock_crystal_freq != 0;
+ case PIPE_CAP_QUERY_TIMESTAMP:
+ return rscreen->info.drm_minor >= 20 &&
+ rscreen->info.r600_clock_crystal_freq != 0;
case PIPE_CAP_MIN_TEXEL_OFFSET:
return -8;
return 0;
}
- /* XXX: all these should be fixed, since r600 surely supports much more! */
switch (param) {
case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
return 16384;
case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
- return 8; /* XXX */
+ return 32;
case PIPE_SHADER_CAP_MAX_INPUTS:
- if(shader == PIPE_SHADER_FRAGMENT)
- return 34;
- else
- return 32;
+ return 32;
case PIPE_SHADER_CAP_MAX_TEMPS:
return 256; /* Max native temporaries. */
case PIPE_SHADER_CAP_MAX_ADDRS:
case PIPE_SHADER_CAP_MAX_CONSTS:
return R600_MAX_CONST_BUFFER_SIZE;
case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
- return R600_MAX_CONST_BUFFERS-1;
+ return R600_MAX_USER_CONST_BUFFERS;
case PIPE_SHADER_CAP_MAX_PREDS:
return 0; /* nothing uses this */
case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
return 1;
+ case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
+ return 0;
case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
if (ret) {
uint64_t * max_global_size = ret;
- /* XXX: This is 64kb for now until we get the
- * compute memory pool working correctly.
- */
- *max_global_size = 1024 * 16 * 4;
+ /* XXX: This is what the proprietary driver reports, we
+ * may want to use a different value. */
+ *max_global_size = 201326592;
}
return sizeof(uint64_t);
}
return sizeof(uint64_t);
+ case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
+ if (ret) {
+ uint64_t max_global_size;
+ uint64_t * max_mem_alloc_size = ret;
+ r600_get_compute_param(screen,
+ PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE,
+ &max_global_size);
+ /* OpenCL requres this value be at least
+ * max(MAX_GLOBAL_SIZE / 4, 128 * 1024 *1024)
+ * I'm really not sure what value to report here, but
+ * MAX_GLOBAL_SIZE / 4 seems resonable.
+ */
+ *max_mem_alloc_size = max_global_size / 4;
+ }
+ return sizeof(uint64_t);
+
default:
fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
return 0;
rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
}
+#if R600_TRACE_CS
+ if (rscreen->trace_bo) {
+ rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
+ pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
+ }
+#endif
pipe_mutex_destroy(rscreen->fences.mutex);
rscreen->ws->destroy(rscreen->ws);
struct r600_screen *rscreen = (struct r600_screen *)pscreen;
struct r600_fence *rfence = (struct r600_fence*)fence;
- return rscreen->fences.data[rfence->index];
+ return rscreen->fences.data[rfence->index] != 0;
}
static boolean r600_fence_finish(struct pipe_screen *pscreen,
}
}
-static unsigned radeon_family_from_device(unsigned device)
+static uint64_t r600_get_timestamp(struct pipe_screen *screen)
{
- switch (device) {
-#define CHIPSET(pciid, name, family) case pciid: return CHIP_##family;
-#include "pci_ids/r600_pci_ids.h"
-#undef CHIPSET
- default:
- return CHIP_UNKNOWN;
- }
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
+
+ return 1000000 * rscreen->ws->query_timestamp(rscreen->ws) /
+ rscreen->info.r600_clock_crystal_freq;
}
struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
rscreen->ws = ws;
ws->query_info(ws, &rscreen->info);
- rscreen->family = radeon_family_from_device(rscreen->info.pci_id);
+ rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", debug_options, 0);
+ rscreen->family = rscreen->info.family;
+ rscreen->chip_class = rscreen->info.chip_class;
+
if (rscreen->family == CHIP_UNKNOWN) {
fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id);
FREE(rscreen);
return NULL;
}
- /* setup class */
- if (rscreen->family >= CHIP_CAYMAN) {
- rscreen->chip_class = CAYMAN;
- } else if (rscreen->family >= CHIP_CEDAR) {
- rscreen->chip_class = EVERGREEN;
- } else if (rscreen->family >= CHIP_RV770) {
- rscreen->chip_class = R700;
- } else {
- rscreen->chip_class = R600;
- }
-
/* Figure out streamout kernel support. */
switch (rscreen->chip_class) {
case R600:
+ if (rscreen->family < CHIP_RS780) {
+ rscreen->has_streamout = rscreen->info.drm_minor >= 14;
+ } else {
+ rscreen->has_streamout = rscreen->info.drm_minor >= 23;
+ }
+ break;
+ case R700:
+ rscreen->has_streamout = rscreen->info.drm_minor >= 17;
+ break;
case EVERGREEN:
+ case CAYMAN:
rscreen->has_streamout = rscreen->info.drm_minor >= 14;
break;
+ default:
+ rscreen->has_streamout = FALSE;
+ break;
+ }
+
+ /* MSAA support. */
+ switch (rscreen->chip_class) {
+ case R600:
case R700:
- rscreen->has_streamout = rscreen->info.drm_minor >= 17;
+ rscreen->has_msaa = rscreen->info.drm_minor >= 22;
+ rscreen->msaa_texture_support = MSAA_TEXTURE_DECOMPRESSED;
+ break;
+ case EVERGREEN:
+ rscreen->has_msaa = rscreen->info.drm_minor >= 19;
+ rscreen->msaa_texture_support =
+ rscreen->info.drm_minor >= 24 ? MSAA_TEXTURE_COMPRESSED :
+ MSAA_TEXTURE_DECOMPRESSED;
+ break;
+ case CAYMAN:
+ rscreen->has_msaa = rscreen->info.drm_minor >= 19;
+ /* We should be able to read compressed MSAA textures, but it doesn't work. */
+ rscreen->msaa_texture_support = MSAA_TEXTURE_SAMPLE_ZERO;
break;
- /* TODO: Cayman */
default:
- rscreen->has_streamout = debug_get_bool_option("R600_STREAMOUT", FALSE);
+ rscreen->has_msaa = FALSE;
+ rscreen->msaa_texture_support = 0;
+ break;
}
+ rscreen->has_cp_dma = rscreen->info.drm_minor >= 27;
+
if (r600_init_tiling(rscreen)) {
FREE(rscreen);
return NULL;
rscreen->screen.get_paramf = r600_get_paramf;
rscreen->screen.get_video_param = r600_get_video_param;
rscreen->screen.get_compute_param = r600_get_compute_param;
+ rscreen->screen.get_timestamp = r600_get_timestamp;
if (rscreen->chip_class >= EVERGREEN) {
rscreen->screen.is_format_supported = evergreen_is_format_supported;
+ rscreen->dma_blit = &evergreen_dma_blit;
} else {
rscreen->screen.is_format_supported = r600_is_format_supported;
+ rscreen->dma_blit = &r600_dma_blit;
}
rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
rscreen->screen.context_create = r600_create_context;
rscreen->global_pool = compute_memory_pool_new(rscreen);
+#if R600_TRACE_CS
+ rscreen->cs_count = 0;
+ if (rscreen->info.drm_minor >= 28) {
+ rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->screen,
+ PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING,
+ 4096);
+ if (rscreen->trace_bo) {
+ rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ }
+ }
+#endif
+
return &rscreen->screen;
}