2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Adam Rak <adam.rak@streamnovation.com>
29 #include "pipe/p_defines.h"
30 #include "pipe/p_state.h"
31 #include "pipe/p_context.h"
32 #include "util/u_blitter.h"
33 #include "util/u_double_list.h"
34 #include "util/u_transfer.h"
35 #include "util/u_surface.h"
36 #include "util/u_pack_color.h"
37 #include "util/u_memory.h"
38 #include "util/u_inlines.h"
39 #include "util/u_framebuffer.h"
40 #include "pipebuffer/pb_buffer.h"
42 #include "evergreend.h"
43 #include "r600_resource.h"
44 #include "r600_shader.h"
45 #include "r600_pipe.h"
46 #include "r600_formats.h"
47 #include "evergreen_compute.h"
48 #include "r600_hw_context_priv.h"
49 #include "evergreen_compute_internal.h"
50 #include "compute_memory_pool.h"
52 #include "llvm_wrapper.h"
56 RAT0 is for global binding write
57 VTX1 is for global binding read
59 for wrting images RAT1...
60 for reading images TEX2...
63 TEX2... consumes the same fetch resources, that VTX2... would consume
65 CONST0 and VTX0 is for parameters
66 CONST0 is binding smaller input parameter buffer, and for constant indexing,
68 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69 the constant cache can handle
71 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72 because we reserve RAT0 for global bindings. With byteaddressing enabled,
73 we should reserve another one too.=> 10 image binding for writing max.
76 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
77 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
79 so 10 for writing is enough. 176 is the max for reading according to the docs
81 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82 writable images will consume TEX slots, VTX slots too because of linear indexing
86 static void evergreen_cs_set_vertex_buffer(
87 struct r600_context
* rctx
,
90 struct pipe_resource
* buffer
)
92 struct r600_vertexbuf_state
*state
= &rctx
->cs_vertex_buffer_state
;
93 struct pipe_vertex_buffer
*vb
= &state
->vb
[vb_index
];
95 vb
->buffer_offset
= offset
;
97 vb
->user_buffer
= NULL
;
99 r600_inval_vertex_cache(rctx
);
100 state
->enabled_mask
|= 1 << vb_index
;
101 state
->dirty_mask
|= 1 << vb_index
;
102 r600_atom_dirty(rctx
, &state
->atom
);
105 const struct u_resource_vtbl r600_global_buffer_vtbl
=
107 u_default_resource_get_handle
, /* get_handle */
108 r600_compute_global_buffer_destroy
, /* resource_destroy */
109 r600_compute_global_get_transfer
, /* get_transfer */
110 r600_compute_global_transfer_destroy
, /* transfer_destroy */
111 r600_compute_global_transfer_map
, /* transfer_map */
112 r600_compute_global_transfer_flush_region
,/* transfer_flush_region */
113 r600_compute_global_transfer_unmap
, /* transfer_unmap */
114 r600_compute_global_transfer_inline_write
/* transfer_inline_write */
118 void *evergreen_create_compute_state(
119 struct pipe_context
*ctx_
,
120 const const struct pipe_compute_state
*cso
)
122 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
123 struct r600_pipe_compute
*shader
= CALLOC_STRUCT(r600_pipe_compute
);
127 const struct pipe_llvm_program_header
* header
;
128 const unsigned char * code
;
130 COMPUTE_DBG("*** evergreen_create_compute_state\n");
133 code
= cso
->prog
+ sizeof(struct pipe_llvm_program_header
);
136 shader
->ctx
= (struct r600_context
*)ctx
;
137 shader
->resources
= (struct evergreen_compute_resource
*)
138 CALLOC(sizeof(struct evergreen_compute_resource
),
139 get_compute_resource_num());
140 shader
->local_size
= cso
->req_local_mem
; ///TODO: assert it
141 shader
->private_size
= cso
->req_private_mem
;
142 shader
->input_size
= cso
->req_input_mem
;
145 shader
->mod
= llvm_parse_bitcode(code
, header
->num_bytes
);
147 r600_compute_shader_create(ctx_
, shader
->mod
, &shader
->bc
);
149 shader
->shader_code_bo
= r600_compute_buffer_alloc_vram(ctx
->screen
,
152 p
= ctx
->ws
->buffer_map(shader
->shader_code_bo
->cs_buf
, ctx
->cs
,
153 PIPE_TRANSFER_WRITE
);
155 memcpy(p
, shader
->bc
.bytecode
, shader
->bc
.ndw
* 4);
156 ctx
->ws
->buffer_unmap(shader
->shader_code_bo
->cs_buf
);
160 void evergreen_delete_compute_state(struct pipe_context
*ctx
, void* state
)
162 struct r600_pipe_compute
*shader
= (struct r600_pipe_compute
*)state
;
164 free(shader
->resources
);
168 static void evergreen_bind_compute_state(struct pipe_context
*ctx_
, void *state
)
170 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
172 COMPUTE_DBG("*** evergreen_bind_compute_state\n");
174 ctx
->cs_shader_state
.shader
= (struct r600_pipe_compute
*)state
;
177 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
178 * kernel parameters there are inplicit parameters that need to be stored
179 * in the vertex buffer as well. Here is how these parameters are organized in
182 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
183 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
184 * DWORDS 6-8: Number of work items within each work group in each dimension
186 * DWORDS 9+ : Kernel parameters
188 void evergreen_compute_upload_input(
189 struct pipe_context
*ctx_
,
190 const uint
*block_layout
,
191 const uint
*grid_layout
,
194 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
195 struct r600_pipe_compute
*shader
= ctx
->cs_shader_state
.shader
;
197 unsigned kernel_parameters_offset_bytes
= 36;
198 uint32_t * num_work_groups_start
;
199 uint32_t * global_size_start
;
200 uint32_t * local_size_start
;
201 uint32_t * kernel_parameters_start
;
203 if (shader
->input_size
== 0) {
207 if (!shader
->kernel_param
) {
208 unsigned buffer_size
= shader
->input_size
;
210 /* Add space for the grid dimensions */
211 buffer_size
+= kernel_parameters_offset_bytes
* sizeof(uint
);
212 shader
->kernel_param
= r600_compute_buffer_alloc_vram(
213 ctx
->screen
, buffer_size
);
216 num_work_groups_start
= ctx
->ws
->buffer_map(
217 shader
->kernel_param
->cs_buf
, ctx
->cs
, PIPE_TRANSFER_WRITE
);
218 global_size_start
= num_work_groups_start
+ (3 * (sizeof(uint
) /4));
219 local_size_start
= global_size_start
+ (3 * (sizeof(uint
)) / 4);
220 kernel_parameters_start
= local_size_start
+ (3 * (sizeof(uint
)) / 4);
222 /* Copy the work group size */
223 memcpy(num_work_groups_start
, grid_layout
, 3 * sizeof(uint
));
225 /* Copy the global size */
226 for (i
= 0; i
< 3; i
++) {
227 global_size_start
[i
] = grid_layout
[i
] * block_layout
[i
];
230 /* Copy the local dimensions */
231 memcpy(local_size_start
, block_layout
, 3 * sizeof(uint
));
233 /* Copy the kernel inputs */
234 memcpy(kernel_parameters_start
, input
, shader
->input_size
);
236 for (i
= 0; i
< (kernel_parameters_offset_bytes
/ 4) +
237 (shader
->input_size
/ 4); i
++) {
238 COMPUTE_DBG("input %i : %i\n", i
,
239 ((unsigned*)num_work_groups_start
)[i
]);
242 ctx
->ws
->buffer_unmap(shader
->kernel_param
->cs_buf
);
244 ///ID=0 is reserved for the parameters
245 evergreen_cs_set_vertex_buffer(ctx
, 0, 0,
246 (struct pipe_resource
*)shader
->kernel_param
);
247 ///ID=0 is reserved for parameters
248 evergreen_set_const_cache(shader
, 0, shader
->kernel_param
,
249 shader
->input_size
, 0);
252 void evergreen_direct_dispatch(
253 struct pipe_context
*ctx_
,
254 const uint
*block_layout
, const uint
*grid_layout
)
256 /* This struct r600_context* must be called rctx, because the
257 * r600_pipe_state_add_reg macro assumes there is a local variable
258 * of type struct r600_context* called rctx.
260 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
261 struct r600_pipe_compute
*shader
= rctx
->cs_shader_state
.shader
;
265 struct evergreen_compute_resource
* res
= get_empty_res(shader
,
266 COMPUTE_RESOURCE_DISPATCH
, 0);
268 /* Set CB_TARGET_MASK */
269 evergreen_reg_set(res
, R_028238_CB_TARGET_MASK
, rctx
->compute_cb_target_mask
);
271 evergreen_reg_set(res
, R_00899C_VGT_COMPUTE_START_X
, 0);
272 evergreen_reg_set(res
, R_0089A0_VGT_COMPUTE_START_Y
, 0);
273 evergreen_reg_set(res
, R_0089A4_VGT_COMPUTE_START_Z
, 0);
275 evergreen_reg_set(res
, R_0286EC_SPI_COMPUTE_NUM_THREAD_X
, block_layout
[0]);
276 evergreen_reg_set(res
, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y
, block_layout
[1]);
277 evergreen_reg_set(res
, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z
, block_layout
[2]);
283 for (i
= 0; i
< 3; i
++) {
284 group_size
*= block_layout
[i
];
287 for (i
= 0; i
< 3; i
++) {
288 grid_size
*= grid_layout
[i
];
291 evergreen_reg_set(res
, R_008970_VGT_NUM_INDICES
, group_size
);
292 evergreen_reg_set(res
, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE
, group_size
);
294 evergreen_emit_raw_value(res
, PKT3C(PKT3_DISPATCH_DIRECT
, 3, 0));
295 evergreen_emit_raw_value(res
, grid_layout
[0]);
296 evergreen_emit_raw_value(res
, grid_layout
[1]);
297 evergreen_emit_raw_value(res
, grid_layout
[2]);
298 ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
299 evergreen_emit_raw_value(res
, 1);
302 static void compute_emit_cs(struct r600_context
*ctx
)
304 struct radeon_winsys_cs
*cs
= ctx
->cs
;
307 struct r600_resource
*onebo
= NULL
;
308 struct r600_pipe_state
*cb_state
;
309 struct evergreen_compute_resource
*resources
=
310 ctx
->cs_shader_state
.shader
->resources
;
312 /* Initialize all the registers common to both 3D and compute. Some
313 * 3D only register will be initialized by this atom as well, but
314 * this is OK for now.
316 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
317 * evergreen_state.c for the list of registers that are intialized by
318 * the start_cs_cmd atom.
320 r600_emit_atom(ctx
, &ctx
->start_cs_cmd
.atom
);
322 /* Initialize all the compute specific registers.
324 * See evergreen_init_atom_start_compute_cs() in this file for the list
325 * of registers initialized by the start_compuet_cs_cmd atom.
327 r600_emit_atom(ctx
, &ctx
->start_compute_cs_cmd
.atom
);
330 cb_state
= ctx
->states
[R600_PIPE_STATE_FRAMEBUFFER
];
331 r600_context_pipe_state_emit(ctx
, cb_state
, RADEON_CP_PACKET3_COMPUTE_MODE
);
333 /* Emit vertex buffer state */
334 ctx
->cs_vertex_buffer_state
.atom
.num_dw
= 12 * util_bitcount(ctx
->cs_vertex_buffer_state
.dirty_mask
);
335 r600_emit_atom(ctx
, &ctx
->cs_vertex_buffer_state
.atom
);
337 /* Emit compute shader state */
338 r600_emit_atom(ctx
, &ctx
->cs_shader_state
.atom
);
340 for (i
= 0; i
< get_compute_resource_num(); i
++) {
341 if (resources
[i
].enabled
) {
343 COMPUTE_DBG("resnum: %i, cdw: %i\n", i
, cs
->cdw
);
345 for (j
= 0; j
< resources
[i
].cs_end
; j
++) {
346 if (resources
[i
].do_reloc
[j
]) {
347 assert(resources
[i
].bo
);
348 evergreen_emit_ctx_reloc(ctx
,
353 cs
->buf
[cs
->cdw
++] = resources
[i
].cs
[j
];
356 if (resources
[i
].bo
) {
357 onebo
= resources
[i
].bo
;
358 evergreen_emit_ctx_reloc(ctx
,
362 ///special case for textures
363 if (resources
[i
].do_reloc
364 [resources
[i
].cs_end
] == 2) {
365 evergreen_emit_ctx_reloc(ctx
,
373 /* r600_flush_framebuffer() updates the cb_flush_flags and then
374 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
375 * a SURFACE_SYNC packet via r600_emit_surface_sync().
377 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
378 * 0xffffffff, so we will need to add a field to struct
379 * r600_surface_sync_cmd if we want to manually set this value.
381 r600_flush_framebuffer(ctx
, true /* Flush now */);
384 COMPUTE_DBG("cdw: %i\n", cs
->cdw
);
385 for (i
= 0; i
< cs
->cdw
; i
++) {
386 COMPUTE_DBG("%4i : 0x%08X\n", i
, ctx
->cs
->buf
[i
]);
390 ctx
->ws
->cs_flush(ctx
->cs
, RADEON_FLUSH_ASYNC
| RADEON_FLUSH_COMPUTE
);
392 ctx
->pm4_dirty_cdwords
= 0;
395 COMPUTE_DBG("shader started\n");
397 ctx
->ws
->buffer_wait(onebo
->buf
, 0);
399 COMPUTE_DBG("...\n");
401 ctx
->streamout_start
= TRUE
;
402 ctx
->streamout_append_bitmask
= ~0;
408 * Emit function for r600_cs_shader_state atom
410 void evergreen_emit_cs_shader(
411 struct r600_context
*rctx
,
412 struct r600_atom
*atom
)
414 struct r600_cs_shader_state
*state
=
415 (struct r600_cs_shader_state
*)atom
;
416 struct r600_pipe_compute
*shader
= state
->shader
;
417 struct radeon_winsys_cs
*cs
= rctx
->cs
;
420 va
= r600_resource_va(&rctx
->screen
->screen
, &shader
->shader_code_bo
->b
.b
);
422 r600_write_compute_context_reg_seq(cs
, R_0288D0_SQ_PGM_START_LS
, 3);
423 r600_write_value(cs
, va
>> 8); /* R_0288D0_SQ_PGM_START_LS */
424 r600_write_value(cs
, /* R_0288D4_SQ_PGM_RESOURCES_LS */
425 S_0288D4_NUM_GPRS(shader
->bc
.ngpr
)
426 | S_0288D4_STACK_SIZE(shader
->bc
.nstack
));
427 r600_write_value(cs
, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
429 r600_write_value(cs
, PKT3C(PKT3_NOP
, 0, 0));
430 r600_write_value(cs
, r600_context_bo_reloc(rctx
, shader
->shader_code_bo
,
433 r600_inval_shader_cache(rctx
);
436 static void evergreen_launch_grid(
437 struct pipe_context
*ctx_
,
438 const uint
*block_layout
, const uint
*grid_layout
,
439 uint32_t pc
, const void *input
)
441 COMPUTE_DBG("PC: %i\n", pc
);
443 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
445 unsigned num_pipes
= ctx
->screen
->info
.r600_max_pipes
;
446 unsigned wave_divisor
= (16 * num_pipes
);
448 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
449 num_waves
= (block_layout
[0] * block_layout
[1] * block_layout
[2] +
450 wave_divisor
- 1) / wave_divisor
;
452 COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
453 num_pipes
, num_waves
);
455 evergreen_set_lds(ctx
->cs_shader_state
.shader
, 0, 0, num_waves
);
456 evergreen_compute_upload_input(ctx_
, block_layout
, grid_layout
, input
);
457 evergreen_direct_dispatch(ctx_
, block_layout
, grid_layout
);
458 compute_emit_cs(ctx
);
461 static void evergreen_set_compute_resources(struct pipe_context
* ctx_
,
462 unsigned start
, unsigned count
,
463 struct pipe_surface
** surfaces
)
465 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
466 struct r600_surface
**resources
= (struct r600_surface
**)surfaces
;
468 COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
471 for (int i
= 0; i
< count
; i
++) {
472 /* The First two vertex buffers are reserved for parameters and
474 unsigned vtx_id
= 2 + i
;
476 struct r600_resource_global
*buffer
=
477 (struct r600_resource_global
*)
478 resources
[i
]->base
.texture
;
479 if (resources
[i
]->base
.writable
) {
482 evergreen_set_rat(ctx
->cs_shader_state
.shader
, i
+1,
483 (struct r600_resource
*)resources
[i
]->base
.texture
,
484 buffer
->chunk
->start_in_dw
*4,
485 resources
[i
]->base
.texture
->width0
);
488 evergreen_cs_set_vertex_buffer(ctx
, vtx_id
,
489 buffer
->chunk
->start_in_dw
* 4,
490 resources
[i
]->base
.texture
);
495 static void evergreen_set_cs_sampler_view(struct pipe_context
*ctx_
,
496 unsigned start_slot
, unsigned count
,
497 struct pipe_sampler_view
**views
)
499 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
500 struct r600_pipe_sampler_view
**resource
=
501 (struct r600_pipe_sampler_view
**)views
;
503 for (int i
= 0; i
< count
; i
++) {
506 ///FETCH0 = VTX0 (param buffer),
507 //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
508 evergreen_set_tex_resource(ctx
->cs_shader_state
.shader
, resource
[i
], i
+2);
513 static void evergreen_bind_compute_sampler_states(
514 struct pipe_context
*ctx_
,
516 unsigned num_samplers
,
519 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
520 struct compute_sampler_state
** samplers
=
521 (struct compute_sampler_state
**)samplers_
;
523 for (int i
= 0; i
< num_samplers
; i
++) {
525 evergreen_set_sampler_resource(
526 ctx
->cs_shader_state
.shader
, samplers
[i
], i
);
531 static void evergreen_set_global_binding(
532 struct pipe_context
*ctx_
, unsigned first
, unsigned n
,
533 struct pipe_resource
**resources
,
536 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
537 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
538 struct r600_resource_global
**buffers
=
539 (struct r600_resource_global
**)resources
;
541 COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
549 compute_memory_finalize_pending(pool
, ctx_
);
551 for (int i
= 0; i
< n
; i
++)
553 assert(resources
[i
]->target
== PIPE_BUFFER
);
554 assert(resources
[i
]->bind
& PIPE_BIND_GLOBAL
);
556 *(handles
[i
]) = buffers
[i
]->chunk
->start_in_dw
* 4;
559 evergreen_set_rat(ctx
->cs_shader_state
.shader
, 0, pool
->bo
, 0, pool
->size_in_dw
* 4);
560 evergreen_cs_set_vertex_buffer(ctx
, 1, 0,
561 (struct pipe_resource
*)pool
->bo
);
565 * This function initializes all the compute specific registers that need to
566 * be initialized for each compute command stream. Registers that are common
567 * to both compute and 3D will be initialized at the beginning of each compute
568 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
569 * packet requires that the shader type bit be set, we must initialize all
570 * context registers needed for compute in this function. The registers
571 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
572 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
575 void evergreen_init_atom_start_compute_cs(struct r600_context
*ctx
)
577 struct r600_command_buffer
*cb
= &ctx
->start_compute_cs_cmd
;
579 int num_stack_entries
;
581 /* We aren't passing the EMIT_EARLY flag as the third argument
582 * because we will be emitting this atom manually in order to
583 * ensure it gets emitted after the start_cs_cmd atom.
585 r600_init_command_buffer(cb
, 256, 0);
586 cb
->pkt_flags
= RADEON_CP_PACKET3_COMPUTE_MODE
;
588 switch (ctx
->family
) {
592 num_stack_entries
= 256;
596 num_stack_entries
= 256;
600 num_stack_entries
= 512;
605 num_stack_entries
= 512;
609 num_stack_entries
= 256;
613 num_stack_entries
= 256;
617 num_stack_entries
= 512;
621 num_stack_entries
= 512;
625 num_stack_entries
= 256;
629 num_stack_entries
= 256;
633 /* Config Registers */
635 /* The primitive type always needs to be POINTLIST for compute. */
636 r600_store_config_reg(cb
, R_008958_VGT_PRIMITIVE_TYPE
,
637 V_008958_DI_PT_POINTLIST
);
639 if (ctx
->chip_class
< CAYMAN
) {
641 /* These registers control which simds can be used by each stage.
642 * The default for these registers is 0xffffffff, which means
643 * all simds are available for each stage. It's possible we may
644 * want to play around with these in the future, but for now
645 * the default value is fine.
647 * R_008E20_SQ_STATIC_THREAD_MGMT1
648 * R_008E24_SQ_STATIC_THREAD_MGMT2
649 * R_008E28_SQ_STATIC_THREAD_MGMT3
652 /* XXX: We may need to adjust the thread and stack resouce
653 * values for 3D/compute interop */
655 r600_store_config_reg_seq(cb
, R_008C18_SQ_THREAD_RESOURCE_MGMT_1
, 5);
657 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
658 * Set the number of threads used by the PS/VS/GS/ES stage to
661 r600_store_value(cb
, 0);
663 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
664 * Set the number of threads used by the CS (aka LS) stage to
665 * the maximum number of threads and set the number of threads
666 * for the HS stage to 0. */
667 r600_store_value(cb
, S_008C1C_NUM_LS_THREADS(num_threads
));
669 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
670 * Set the Control Flow stack entries to 0 for PS/VS stages */
671 r600_store_value(cb
, 0);
673 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
674 * Set the Control Flow stack entries to 0 for GS/ES stages */
675 r600_store_value(cb
, 0);
677 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
678 * Set the Contol Flow stack entries to 0 for the HS stage, and
679 * set it to the maximum value for the CS (aka LS) stage. */
681 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries
));
684 /* Context Registers */
686 if (ctx
->chip_class
< CAYMAN
) {
687 /* workaround for hw issues with dyn gpr - must set all limits
688 * to 240 instead of 0, 0x1e == 240 / 8
690 r600_store_context_reg(cb
, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1
,
691 S_028838_PS_GPRS(0x1e) |
692 S_028838_VS_GPRS(0x1e) |
693 S_028838_GS_GPRS(0x1e) |
694 S_028838_ES_GPRS(0x1e) |
695 S_028838_HS_GPRS(0x1e) |
696 S_028838_LS_GPRS(0x1e));
699 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
700 r600_store_context_reg(cb
, R_028A40_VGT_GS_MODE
,
701 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
703 r600_store_context_reg(cb
, R_028B54_VGT_SHADER_STAGES_EN
, 2/*CS_ON*/);
705 r600_store_context_reg(cb
, R_0286E8_SPI_COMPUTE_INPUT_CNTL
,
706 S_0286E8_TID_IN_GROUP_ENA
708 | S_0286E8_DISABLE_INDEX_PACK
)
711 /* The LOOP_CONST registers are an optimizations for loops that allows
712 * you to store the initial counter, increment value, and maximum
713 * counter value in a register so that hardware can calculate the
714 * correct number of iterations for the loop, so that you don't need
715 * to have the loop counter in your shader code. We don't currently use
716 * this optimization, so we must keep track of the counter in the
717 * shader and use a break instruction to exit loops. However, the
718 * hardware will still uses this register to determine when to exit a
719 * loop, so we need to initialize the counter to 0, set the increment
720 * value to 1 and the maximum counter value to the 4095 (0xfff) which
721 * is the maximum value allowed. This gives us a maximum of 4096
722 * iterations for our loops, but hopefully our break instruction will
723 * execute before some time before the 4096th iteration.
725 eg_store_loop_const(cb
, R_03A200_SQ_LOOP_CONST_0
+ (160 * 4), 0x1000FFF);
728 void evergreen_init_compute_state_functions(struct r600_context
*ctx
)
730 ctx
->context
.create_compute_state
= evergreen_create_compute_state
;
731 ctx
->context
.delete_compute_state
= evergreen_delete_compute_state
;
732 ctx
->context
.bind_compute_state
= evergreen_bind_compute_state
;
733 // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
734 ctx
->context
.set_compute_resources
= evergreen_set_compute_resources
;
735 ctx
->context
.set_compute_sampler_views
= evergreen_set_cs_sampler_view
;
736 ctx
->context
.bind_compute_sampler_states
= evergreen_bind_compute_sampler_states
;
737 ctx
->context
.set_global_binding
= evergreen_set_global_binding
;
738 ctx
->context
.launch_grid
= evergreen_launch_grid
;
740 /* We always use at least two vertex buffers for compute, one for
741 * parameters and one for global memory */
742 ctx
->cs_vertex_buffer_state
.enabled_mask
=
743 ctx
->cs_vertex_buffer_state
.dirty_mask
= 1 | 2;
747 struct pipe_resource
*r600_compute_global_buffer_create(
748 struct pipe_screen
*screen
,
749 const struct pipe_resource
*templ
)
751 assert(templ
->target
== PIPE_BUFFER
);
752 assert(templ
->bind
& PIPE_BIND_GLOBAL
);
753 assert(templ
->array_size
== 1 || templ
->array_size
== 0);
754 assert(templ
->depth0
== 1 || templ
->depth0
== 0);
755 assert(templ
->height0
== 1 || templ
->height0
== 0);
757 struct r600_resource_global
* result
= (struct r600_resource_global
*)
758 CALLOC(sizeof(struct r600_resource_global
), 1);
759 struct r600_screen
* rscreen
= (struct r600_screen
*)screen
;
761 COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
762 COMPUTE_DBG("width = %u array_size = %u\n", templ
->width0
,
765 result
->base
.b
.vtbl
= &r600_global_buffer_vtbl
;
766 result
->base
.b
.b
.screen
= screen
;
767 result
->base
.b
.b
= *templ
;
768 pipe_reference_init(&result
->base
.b
.b
.reference
, 1);
770 int size_in_dw
= (templ
->width0
+3) / 4;
772 result
->chunk
= compute_memory_alloc(rscreen
->global_pool
, size_in_dw
);
774 if (result
->chunk
== NULL
)
780 return &result
->base
.b
.b
;
783 void r600_compute_global_buffer_destroy(
784 struct pipe_screen
*screen
,
785 struct pipe_resource
*res
)
787 assert(res
->target
== PIPE_BUFFER
);
788 assert(res
->bind
& PIPE_BIND_GLOBAL
);
790 struct r600_resource_global
* buffer
= (struct r600_resource_global
*)res
;
791 struct r600_screen
* rscreen
= (struct r600_screen
*)screen
;
793 compute_memory_free(rscreen
->global_pool
, buffer
->chunk
->id
);
795 buffer
->chunk
= NULL
;
799 void* r600_compute_global_transfer_map(
800 struct pipe_context
*ctx_
,
801 struct pipe_transfer
* transfer
)
803 assert(transfer
->resource
->target
== PIPE_BUFFER
);
804 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
805 assert(transfer
->box
.x
>= 0);
806 assert(transfer
->box
.y
== 0);
807 assert(transfer
->box
.z
== 0);
809 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
810 struct r600_resource_global
* buffer
=
811 (struct r600_resource_global
*)transfer
->resource
;
814 ///TODO: do it better, mapping is not possible if the pool is too big
816 if (!(map
= ctx
->ws
->buffer_map(buffer
->chunk
->pool
->bo
->cs_buf
,
817 ctx
->cs
, transfer
->usage
))) {
821 COMPUTE_DBG("buffer start: %lli\n", buffer
->chunk
->start_in_dw
);
822 return ((char*)(map
+ buffer
->chunk
->start_in_dw
)) + transfer
->box
.x
;
825 void r600_compute_global_transfer_unmap(
826 struct pipe_context
*ctx_
,
827 struct pipe_transfer
* transfer
)
829 assert(transfer
->resource
->target
== PIPE_BUFFER
);
830 assert(transfer
->resource
->bind
& PIPE_BIND_GLOBAL
);
832 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
833 struct r600_resource_global
* buffer
=
834 (struct r600_resource_global
*)transfer
->resource
;
836 ctx
->ws
->buffer_unmap(buffer
->chunk
->pool
->bo
->cs_buf
);
839 struct pipe_transfer
* r600_compute_global_get_transfer(
840 struct pipe_context
*ctx_
,
841 struct pipe_resource
*resource
,
844 const struct pipe_box
*box
)
846 struct r600_context
*ctx
= (struct r600_context
*)ctx_
;
847 struct compute_memory_pool
*pool
= ctx
->screen
->global_pool
;
849 compute_memory_finalize_pending(pool
, ctx_
);
851 assert(resource
->target
== PIPE_BUFFER
);
852 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
853 struct pipe_transfer
*transfer
= util_slab_alloc(&rctx
->pool_transfers
);
855 transfer
->resource
= resource
;
856 transfer
->level
= level
;
857 transfer
->usage
= usage
;
858 transfer
->box
= *box
;
859 transfer
->stride
= 0;
860 transfer
->layer_stride
= 0;
861 transfer
->data
= NULL
;
863 /* Note strides are zero, this is ok for buffers, but not for
864 * textures 2d & higher at least.
869 void r600_compute_global_transfer_destroy(
870 struct pipe_context
*ctx_
,
871 struct pipe_transfer
*transfer
)
873 struct r600_context
*rctx
= (struct r600_context
*)ctx_
;
874 util_slab_free(&rctx
->pool_transfers
, transfer
);
877 void r600_compute_global_transfer_flush_region(
878 struct pipe_context
*ctx_
,
879 struct pipe_transfer
*transfer
,
880 const struct pipe_box
*box
)
885 void r600_compute_global_transfer_inline_write(
886 struct pipe_context
*pipe
,
887 struct pipe_resource
*resource
,
890 const struct pipe_box
*box
,
893 unsigned layer_stride
)