struct compute_memory_pool* pool = (struct compute_memory_pool*)
CALLOC(sizeof(struct compute_memory_pool), 1);
+ COMPUTE_DBG("* compute_memory_pool_new() initial_size_in_dw = %ld\n",
+ initial_size_in_dw);
+
pool->next_id = 1;
pool->size_in_dw = initial_size_in_dw;
pool->screen = rscreen;
*/
void compute_memory_pool_delete(struct compute_memory_pool* pool)
{
+ COMPUTE_DBG("* compute_memory_pool_delete()\n");
free(pool->shadow);
if (pool->bo) {
pool->screen->screen.resource_destroy((struct pipe_screen *)
int last_end = 0;
+ COMPUTE_DBG("* compute_memory_prealloc_chunk() size_in_dw = %ld\n",
+ size_in_dw);
+
for (item = pool->item_list; item; item = item->next) {
if (item->start_in_dw > -1) {
if (item->start_in_dw-last_end > size_in_dw) {
{
struct compute_memory_item* item;
+ COMPUTE_DBG("* compute_memory_postalloc_chunck() start_in_dw = %ld\n",
+ start_in_dw);
+
for (item = pool->item_list; item; item = item->next) {
if (item->next) {
if (item->start_in_dw < start_in_dw
void compute_memory_grow_pool(struct compute_memory_pool* pool,
struct pipe_context * pipe, int new_size_in_dw)
{
+ COMPUTE_DBG("* compute_memory_grow_pool() new_size_in_dw = %d\n",
+ new_size_in_dw);
+
assert(new_size_in_dw >= pool->size_in_dw);
new_size_in_dw += 1024 - (new_size_in_dw % 1024);
+ COMPUTE_DBG(" Aligned size = %d\n", new_size_in_dw);
+
if (pool->bo) {
compute_memory_shadow(pool, pipe, 1);
}
{
struct compute_memory_item chunk;
+ COMPUTE_DBG("* compute_memory_shadow() device_to_host = %d\n",
+ device_to_host);
+
chunk.id = 0;
chunk.start_in_dw = 0;
chunk.size_in_dw = pool->size_in_dw;
int64_t allocated = 0;
int64_t unallocated = 0;
+ COMPUTE_DBG("* compute_memory_finalize_pending()\n");
+
for (item = pool->item_list; item; item = item->next) {
COMPUTE_DBG("list: %i %p\n", item->start_in_dw, item->next);
}
{
struct compute_memory_item *item, *next;
+ COMPUTE_DBG("* compute_memory_free() id + %ld \n", id);
+
for (item = pool->item_list; item; item = next) {
next = item->next;
{
struct compute_memory_item *new_item;
- COMPUTE_DBG("Alloc: %i\n", size_in_dw);
+ COMPUTE_DBG("* compute_memory_alloc() size_in_dw = %ld\n", size_in_dw);
new_item = (struct compute_memory_item *)
CALLOC(sizeof(struct compute_memory_item), 1);
assert(gart);
+ COMPUTE_DBG("* compute_memory_transfer() device_to_host = %d, "
+ "offset_in_chunk = %d, size = %d\n", device_to_host,
+ offset_in_chunk, size);
+
if (device_to_host)
{
xfer = pipe->get_transfer(pipe, gart, 0, PIPE_TRANSFER_READ,
const struct pipe_llvm_program_header * header;
const unsigned char * code;
+ COMPUTE_DBG("*** evergreen_create_compute_state\n");
+
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
#endif
{
struct r600_context *ctx = (struct r600_context *)ctx_;
+ COMPUTE_DBG("*** evergreen_bind_compute_state\n");
+
ctx->cs_shader = (struct r600_pipe_compute *)state;
if (!ctx->cs_shader->shader_code_bo) {
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_surface **resources = (struct r600_surface **)surfaces;
+
+ COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
+ start, count);
+
for (int i = 0; i < count; i++) {
if (resources[i]) {
struct r600_resource_global *buffer =
struct r600_resource_global **buffers =
(struct r600_resource_global **)resources;
+ COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
+ first, n);
+
if (!resources) {
/* XXX: Unset */
return;
CALLOC(sizeof(struct r600_resource_global), 1);
struct r600_screen* rscreen = (struct r600_screen*)screen;
+ COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
+ COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
+ templ->array_size);
+
result->base.b.vtbl = &r600_global_buffer_vtbl;
result->base.b.b.screen = screen;
result->base.b.b = *templ;