radeonsi: convert constant buffers to si_descriptors
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26 #include "../radeon/r600_cs.h"
27 #include "radeonsi_pipe.h"
28 #include "radeonsi_resource.h"
29 #include "radeonsi_shader.h"
30
31 #include "util/u_memory.h"
32
33 #define SI_NUM_CONTEXTS 256
34
35 static uint32_t null_desc[8]; /* zeros */
36
37 /* Set this if you want the 3D engine to wait until CP DMA is done.
38 * It should be set on the last CP DMA packet. */
39 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
40
41 /* Set this if the source data was used as a destination in a previous CP DMA
42 * packet. It's for preventing a read-after-write (RAW) hazard between two
43 * CP DMA packets. */
44 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
45
46 /* Emit a CP DMA packet to do a copy from one buffer to another.
47 * The size must fit in bits [20:0]. Notes:
48 */
49 static void si_emit_cp_dma_copy_buffer(struct r600_context *rctx,
50 uint64_t dst_va, uint64_t src_va,
51 unsigned size, unsigned flags)
52 {
53 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
54 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
55 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
56
57 assert(size);
58 assert((size & ((1<<21)-1)) == size);
59
60 if (rctx->b.chip_class >= CIK) {
61 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
62 radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
63 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
64 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
65 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
66 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
67 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
68 } else {
69 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
70 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
71 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
72 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
73 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
74 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
75 }
76 }
77
78 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
79 static void si_emit_cp_dma_clear_buffer(struct r600_context *rctx,
80 uint64_t dst_va, unsigned size,
81 uint32_t clear_value, unsigned flags)
82 {
83 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
84 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
85 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
86
87 assert(size);
88 assert((size & ((1<<21)-1)) == size);
89
90 if (rctx->b.chip_class >= CIK) {
91 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
92 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
93 radeon_emit(cs, clear_value); /* DATA [31:0] */
94 radeon_emit(cs, 0);
95 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
96 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
97 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
98 } else {
99 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
100 radeon_emit(cs, clear_value); /* DATA [31:0] */
101 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
102 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
103 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
104 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
105 }
106 }
107
108 static void si_init_descriptors(struct r600_context *rctx,
109 struct si_descriptors *desc,
110 unsigned shader_userdata_reg,
111 unsigned element_dw_size,
112 unsigned num_elements,
113 void (*emit_func)(struct r600_context *ctx, struct r600_atom *state))
114 {
115 uint64_t va;
116
117 assert(num_elements <= sizeof(desc->enabled_mask)*8);
118 assert(num_elements <= sizeof(desc->dirty_mask)*8);
119
120 desc->atom.emit = (void*)emit_func;
121 desc->shader_userdata_reg = shader_userdata_reg;
122 desc->element_dw_size = element_dw_size;
123 desc->num_elements = num_elements;
124 desc->context_size = num_elements * element_dw_size * 4;
125
126 desc->buffer = (struct r600_resource*)
127 pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
128 PIPE_USAGE_STATIC,
129 SI_NUM_CONTEXTS * desc->context_size);
130
131 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
132 va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b);
133
134 /* We don't check for CS space here, because this should be called
135 * only once at context initialization. */
136 si_emit_cp_dma_clear_buffer(rctx, va, desc->buffer->b.b.width0, 0,
137 R600_CP_DMA_SYNC);
138 }
139
140 static void si_release_descriptors(struct si_descriptors *desc)
141 {
142 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
143 }
144
145 static void si_update_descriptors(struct si_descriptors *desc)
146 {
147 if (desc->dirty_mask) {
148 desc->atom.num_dw =
149 7 + /* copy */
150 (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
151 4; /* pointer update */
152 desc->atom.dirty = true;
153 } else {
154 desc->atom.dirty = false;
155 }
156 }
157
158 static void si_emit_shader_pointer(struct r600_context *rctx,
159 struct si_descriptors *desc)
160 {
161 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
162 uint64_t va = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b) +
163 desc->current_context_id * desc->context_size;
164
165 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
166 radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
167 radeon_emit(cs, va);
168 radeon_emit(cs, va >> 32);
169 }
170
171 static void si_emit_descriptors(struct r600_context *rctx,
172 struct si_descriptors *desc,
173 uint32_t **descriptors)
174 {
175 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
176 uint64_t va_base;
177 int packet_start;
178 int packet_size = 0;
179 int last_index = desc->num_elements; /* point to a non-existing element */
180 unsigned dirty_mask = desc->dirty_mask;
181 unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
182
183 assert(dirty_mask);
184
185 va_base = r600_resource_va(rctx->b.b.screen, &desc->buffer->b.b);
186
187 /* Copy the descriptors to a new context slot. */
188 si_emit_cp_dma_copy_buffer(rctx,
189 va_base + new_context_id * desc->context_size,
190 va_base + desc->current_context_id * desc->context_size,
191 desc->context_size, R600_CP_DMA_SYNC);
192
193 va_base += new_context_id * desc->context_size;
194
195 /* Update the descriptors.
196 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
197 *
198 * XXX When unbinding lots of resources, consider clearing the memory
199 * with CP DMA instead of emitting zeros.
200 */
201 while (dirty_mask) {
202 int i = u_bit_scan(&dirty_mask);
203
204 assert(i < desc->num_elements);
205
206 if (last_index+1 == i && packet_size) {
207 /* Append new data at the end of the last packet. */
208 packet_size += desc->element_dw_size;
209 cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
210 } else {
211 /* Start a new packet. */
212 uint64_t va = va_base + i * desc->element_dw_size * 4;
213
214 packet_start = cs->cdw;
215 packet_size = 2 + desc->element_dw_size;
216
217 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
218 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
219 PKT3_WRITE_DATA_WR_CONFIRM |
220 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
221 radeon_emit(cs, va & 0xFFFFFFFFUL);
222 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
223 }
224
225 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
226
227 last_index = i;
228 }
229
230 desc->dirty_mask = 0;
231 desc->current_context_id = new_context_id;
232
233 /* Now update the shader userdata pointer. */
234 si_emit_shader_pointer(rctx, desc);
235 }
236
237 static unsigned si_get_shader_user_data_base(unsigned shader)
238 {
239 switch (shader) {
240 case PIPE_SHADER_VERTEX:
241 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
242 case PIPE_SHADER_GEOMETRY:
243 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
244 case PIPE_SHADER_FRAGMENT:
245 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
246 default:
247 assert(0);
248 return 0;
249 }
250 }
251
252 /* SAMPLER VIEWS */
253
254 static void si_emit_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
255 {
256 struct si_sampler_views *views = (struct si_sampler_views*)atom;
257
258 si_emit_descriptors(rctx, &views->desc, views->desc_data);
259 }
260
261 static void si_init_sampler_views(struct r600_context *rctx,
262 struct si_sampler_views *views,
263 unsigned shader)
264 {
265 si_init_descriptors(rctx, &views->desc,
266 si_get_shader_user_data_base(shader) +
267 SI_SGPR_RESOURCE * 4,
268 8, NUM_SAMPLER_VIEWS, si_emit_sampler_views);
269 }
270
271 static void si_release_sampler_views(struct si_sampler_views *views)
272 {
273 int i;
274
275 for (i = 0; i < Elements(views->views); i++) {
276 pipe_sampler_view_reference(&views->views[i], NULL);
277 }
278 si_release_descriptors(&views->desc);
279 }
280
281 static void si_sampler_views_begin_new_cs(struct r600_context *rctx,
282 struct si_sampler_views *views)
283 {
284 unsigned mask = views->desc.enabled_mask;
285
286 /* Add relocations to the CS. */
287 while (mask) {
288 int i = u_bit_scan(&mask);
289 struct si_pipe_sampler_view *rview =
290 (struct si_pipe_sampler_view*)views->views[i];
291
292 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
293 }
294
295 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
296
297 si_emit_shader_pointer(rctx, &views->desc);
298 }
299
300 void si_set_sampler_view(struct r600_context *rctx, unsigned shader,
301 unsigned slot, struct pipe_sampler_view *view,
302 unsigned *view_desc)
303 {
304 struct si_sampler_views *views = &rctx->samplers[shader].views;
305
306 if (views->views[slot] == view)
307 return;
308
309 if (view) {
310 struct si_pipe_sampler_view *rview =
311 (struct si_pipe_sampler_view*)view;
312
313 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
314
315 pipe_sampler_view_reference(&views->views[slot], view);
316 views->desc_data[slot] = view_desc;
317 views->desc.enabled_mask |= 1 << slot;
318 } else {
319 pipe_sampler_view_reference(&views->views[slot], NULL);
320 views->desc_data[slot] = null_desc;
321 views->desc.enabled_mask &= ~(1 << slot);
322 }
323
324 views->desc.dirty_mask |= 1 << slot;
325 si_update_descriptors(&views->desc);
326 }
327
328 /* BUFFER RESOURCES */
329
330 static void si_emit_buffer_resources(struct r600_context *rctx, struct r600_atom *atom)
331 {
332 struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
333
334 si_emit_descriptors(rctx, &buffers->desc, buffers->desc_data);
335 }
336
337 static void si_init_buffer_resources(struct r600_context *rctx,
338 struct si_buffer_resources *buffers,
339 unsigned num_buffers, unsigned shader,
340 unsigned shader_userdata_index,
341 enum radeon_bo_usage shader_usage)
342 {
343 int i;
344
345 buffers->num_buffers = num_buffers;
346 buffers->shader_usage = shader_usage;
347 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
348 buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
349
350 /* si_emit_descriptors only accepts an array of arrays.
351 * This adds such an array. */
352 buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
353 for (i = 0; i < num_buffers; i++) {
354 buffers->desc_data[i] = &buffers->desc_storage[i*4];
355 }
356
357 si_init_descriptors(rctx, &buffers->desc,
358 si_get_shader_user_data_base(shader) +
359 shader_userdata_index*4, 4, num_buffers,
360 si_emit_buffer_resources);
361 }
362
363 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
364 {
365 int i;
366
367 for (i = 0; i < Elements(buffers->buffers); i++) {
368 pipe_resource_reference(&buffers->buffers[i], NULL);
369 }
370
371 FREE(buffers->buffers);
372 FREE(buffers->desc_storage);
373 FREE(buffers->desc_data);
374 si_release_descriptors(&buffers->desc);
375 }
376
377 static void si_buffer_resources_begin_new_cs(struct r600_context *rctx,
378 struct si_buffer_resources *buffers)
379 {
380 unsigned mask = buffers->desc.enabled_mask;
381
382 /* Add relocations to the CS. */
383 while (mask) {
384 int i = u_bit_scan(&mask);
385
386 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
387 (struct r600_resource*)buffers->buffers[i],
388 buffers->shader_usage);
389 }
390
391 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
392 buffers->desc.buffer, RADEON_USAGE_READWRITE);
393
394 si_emit_shader_pointer(rctx, &buffers->desc);
395 }
396
397 /* CONSTANT BUFFERS */
398
399 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
400 struct pipe_constant_buffer *input)
401 {
402 struct r600_context *rctx = (struct r600_context *)ctx;
403 struct si_buffer_resources *buffers = &rctx->const_buffers[shader];
404
405 if (shader >= SI_NUM_SHADERS)
406 return;
407
408 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
409
410 assert(slot < buffers->num_buffers);
411 pipe_resource_reference(&buffers->buffers[slot], NULL);
412
413 if (input && (input->buffer || input->user_buffer)) {
414 struct pipe_resource *buffer = NULL;
415 uint64_t va;
416
417 /* Upload the user buffer if needed. */
418 if (input->user_buffer) {
419 unsigned buffer_offset;
420
421 r600_upload_const_buffer(rctx,
422 (struct r600_resource**)&buffer, input->user_buffer,
423 input->buffer_size, &buffer_offset);
424 va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
425 } else {
426 pipe_resource_reference(&buffer, input->buffer);
427 va = r600_resource_va(ctx->screen, buffer) + input->buffer_offset;
428 }
429
430 /* Set the descriptor. */
431 uint32_t *desc = buffers->desc_data[slot];
432 desc[0] = va;
433 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
434 S_008F04_STRIDE(0);
435 desc[2] = input->buffer_size;
436 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
437 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
438 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
439 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
440 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
441 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
442
443 buffers->buffers[slot] = buffer;
444 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx,
445 (struct r600_resource*)buffer, buffers->shader_usage);
446 buffers->desc.enabled_mask |= 1 << slot;
447 } else {
448 /* Clear the descriptor. */
449 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
450 buffers->desc.enabled_mask &= ~(1 << slot);
451 }
452
453 buffers->desc.dirty_mask |= 1 << slot;
454 si_update_descriptors(&buffers->desc);
455 }
456
457 /* INIT/DEINIT */
458
459 void si_init_all_descriptors(struct r600_context *rctx)
460 {
461 int i;
462
463 for (i = 0; i < SI_NUM_SHADERS; i++) {
464 si_init_buffer_resources(rctx, &rctx->const_buffers[i],
465 NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
466 RADEON_USAGE_READ);
467
468 si_init_sampler_views(rctx, &rctx->samplers[i].views, i);
469
470 rctx->atoms.const_buffers[i] = &rctx->const_buffers[i].desc.atom;
471 rctx->atoms.sampler_views[i] = &rctx->samplers[i].views.desc.atom;
472 }
473
474 /* Set pipe_context functions. */
475 rctx->b.b.set_constant_buffer = si_set_constant_buffer;
476 }
477
478 void si_release_all_descriptors(struct r600_context *rctx)
479 {
480 int i;
481
482 for (i = 0; i < SI_NUM_SHADERS; i++) {
483 si_release_buffer_resources(&rctx->const_buffers[i]);
484 si_release_sampler_views(&rctx->samplers[i].views);
485 }
486 }
487
488 void si_all_descriptors_begin_new_cs(struct r600_context *rctx)
489 {
490 int i;
491
492 for (i = 0; i < SI_NUM_SHADERS; i++) {
493 si_buffer_resources_begin_new_cs(rctx, &rctx->const_buffers[i]);
494 si_sampler_views_begin_new_cs(rctx, &rctx->samplers[i].views);
495 }
496 }