232a7d5aa5cd144c51fb8cefb70816649c30c3dc
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26 #include "../radeon/r600_cs.h"
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "sid.h"
30
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 #define SI_NUM_CONTEXTS 16
35
36 static uint32_t null_desc[8]; /* zeros */
37
38 /* Set this if you want the 3D engine to wait until CP DMA is done.
39 * It should be set on the last CP DMA packet. */
40 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
41
42 /* Set this if the source data was used as a destination in a previous CP DMA
43 * packet. It's for preventing a read-after-write (RAW) hazard between two
44 * CP DMA packets. */
45 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
46
47 /* Emit a CP DMA packet to do a copy from one buffer to another.
48 * The size must fit in bits [20:0].
49 */
50 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
51 uint64_t dst_va, uint64_t src_va,
52 unsigned size, unsigned flags)
53 {
54 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
55 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
56 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
57
58 assert(size);
59 assert((size & ((1<<21)-1)) == size);
60
61 if (sctx->b.chip_class >= CIK) {
62 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
63 radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
64 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
65 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
66 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
67 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
68 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
69 } else {
70 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
71 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
72 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
73 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
74 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
75 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
76 }
77 }
78
79 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
80 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
81 uint64_t dst_va, unsigned size,
82 uint32_t clear_value, unsigned flags)
83 {
84 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
85 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
86 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
87
88 assert(size);
89 assert((size & ((1<<21)-1)) == size);
90
91 if (sctx->b.chip_class >= CIK) {
92 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
93 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
94 radeon_emit(cs, clear_value); /* DATA [31:0] */
95 radeon_emit(cs, 0);
96 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
97 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
98 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
99 } else {
100 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
101 radeon_emit(cs, clear_value); /* DATA [31:0] */
102 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
103 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
105 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
106 }
107 }
108
109 static void si_init_descriptors(struct si_context *sctx,
110 struct si_descriptors *desc,
111 unsigned shader_userdata_reg,
112 unsigned element_dw_size,
113 unsigned num_elements,
114 void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
115 {
116 uint64_t va;
117
118 assert(num_elements <= sizeof(desc->enabled_mask)*8);
119 assert(num_elements <= sizeof(desc->dirty_mask)*8);
120
121 desc->atom.emit = (void*)emit_func;
122 desc->shader_userdata_reg = shader_userdata_reg;
123 desc->element_dw_size = element_dw_size;
124 desc->num_elements = num_elements;
125 desc->context_size = num_elements * element_dw_size * 4;
126
127 desc->buffer = (struct r600_resource*)
128 pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
129 PIPE_USAGE_STATIC,
130 SI_NUM_CONTEXTS * desc->context_size);
131
132 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
133 va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
134
135 /* We don't check for CS space here, because this should be called
136 * only once at context initialization. */
137 si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0,
138 R600_CP_DMA_SYNC);
139 }
140
141 static void si_release_descriptors(struct si_descriptors *desc)
142 {
143 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
144 }
145
146 static void si_update_descriptors(struct si_context *sctx,
147 struct si_descriptors *desc)
148 {
149 if (desc->dirty_mask) {
150 desc->atom.num_dw =
151 7 + /* copy */
152 (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
153 4; /* pointer update */
154 desc->atom.dirty = true;
155 /* The descriptors are read with the K cache. */
156 sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
157 } else {
158 desc->atom.dirty = false;
159 }
160 }
161
162 static void si_emit_shader_pointer(struct si_context *sctx,
163 struct si_descriptors *desc)
164 {
165 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
166 uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) +
167 desc->current_context_id * desc->context_size;
168
169 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
170 radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
171 radeon_emit(cs, va);
172 radeon_emit(cs, va >> 32);
173 }
174
175 static void si_emit_descriptors(struct si_context *sctx,
176 struct si_descriptors *desc,
177 uint32_t **descriptors)
178 {
179 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
180 uint64_t va_base;
181 int packet_start;
182 int packet_size = 0;
183 int last_index = desc->num_elements; /* point to a non-existing element */
184 unsigned dirty_mask = desc->dirty_mask;
185 unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
186
187 assert(dirty_mask);
188
189 va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
190
191 /* Copy the descriptors to a new context slot. */
192 /* XXX Consider using TC or L2 for this copy on CIK. */
193 si_emit_cp_dma_copy_buffer(sctx,
194 va_base + new_context_id * desc->context_size,
195 va_base + desc->current_context_id * desc->context_size,
196 desc->context_size, R600_CP_DMA_SYNC);
197
198 va_base += new_context_id * desc->context_size;
199
200 /* Update the descriptors.
201 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
202 *
203 * XXX When unbinding lots of resources, consider clearing the memory
204 * with CP DMA instead of emitting zeros.
205 */
206 while (dirty_mask) {
207 int i = u_bit_scan(&dirty_mask);
208
209 assert(i < desc->num_elements);
210
211 if (last_index+1 == i && packet_size) {
212 /* Append new data at the end of the last packet. */
213 packet_size += desc->element_dw_size;
214 cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
215 } else {
216 /* Start a new packet. */
217 uint64_t va = va_base + i * desc->element_dw_size * 4;
218
219 packet_start = cs->cdw;
220 packet_size = 2 + desc->element_dw_size;
221
222 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
223 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2) |
224 PKT3_WRITE_DATA_WR_CONFIRM |
225 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
226 radeon_emit(cs, va & 0xFFFFFFFFUL);
227 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
228 }
229
230 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
231
232 last_index = i;
233 }
234
235 desc->dirty_mask = 0;
236 desc->current_context_id = new_context_id;
237
238 /* Now update the shader userdata pointer. */
239 si_emit_shader_pointer(sctx, desc);
240 }
241
242 static unsigned si_get_shader_user_data_base(unsigned shader)
243 {
244 switch (shader) {
245 case SI_SHADER_EXPORT:
246 return R_00B330_SPI_SHADER_USER_DATA_ES_0;
247 case PIPE_SHADER_VERTEX:
248 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
249 case PIPE_SHADER_GEOMETRY:
250 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
251 case PIPE_SHADER_FRAGMENT:
252 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
253 default:
254 assert(0);
255 return 0;
256 }
257 }
258
259 /* SAMPLER VIEWS */
260
261 static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
262 {
263 struct si_sampler_views *views = (struct si_sampler_views*)atom;
264
265 si_emit_descriptors(sctx, &views->desc, views->desc_data);
266 }
267
268 static void si_init_sampler_views(struct si_context *sctx,
269 struct si_sampler_views *views,
270 unsigned shader)
271 {
272 si_init_descriptors(sctx, &views->desc,
273 si_get_shader_user_data_base(shader) +
274 SI_SGPR_RESOURCE * 4,
275 8, NUM_SAMPLER_VIEWS, si_emit_sampler_views);
276 }
277
278 static void si_release_sampler_views(struct si_sampler_views *views)
279 {
280 int i;
281
282 for (i = 0; i < Elements(views->views); i++) {
283 pipe_sampler_view_reference(&views->views[i], NULL);
284 }
285 si_release_descriptors(&views->desc);
286 }
287
288 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
289 struct si_sampler_views *views)
290 {
291 unsigned mask = views->desc.enabled_mask;
292
293 /* Add relocations to the CS. */
294 while (mask) {
295 int i = u_bit_scan(&mask);
296 struct si_pipe_sampler_view *rview =
297 (struct si_pipe_sampler_view*)views->views[i];
298
299 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
300 }
301
302 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
303
304 si_emit_shader_pointer(sctx, &views->desc);
305 }
306
307 void si_set_sampler_view(struct si_context *sctx, unsigned shader,
308 unsigned slot, struct pipe_sampler_view *view,
309 unsigned *view_desc)
310 {
311 struct si_sampler_views *views = &sctx->samplers[shader].views;
312
313 if (views->views[slot] == view)
314 return;
315
316 if (view) {
317 struct si_pipe_sampler_view *rview =
318 (struct si_pipe_sampler_view*)view;
319
320 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
321
322 pipe_sampler_view_reference(&views->views[slot], view);
323 views->desc_data[slot] = view_desc;
324 views->desc.enabled_mask |= 1 << slot;
325 } else {
326 pipe_sampler_view_reference(&views->views[slot], NULL);
327 views->desc_data[slot] = null_desc;
328 views->desc.enabled_mask &= ~(1 << slot);
329 }
330
331 views->desc.dirty_mask |= 1 << slot;
332 si_update_descriptors(sctx, &views->desc);
333 }
334
335 /* BUFFER RESOURCES */
336
337 static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
338 {
339 struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
340
341 si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
342 }
343
344 static void si_init_buffer_resources(struct si_context *sctx,
345 struct si_buffer_resources *buffers,
346 unsigned num_buffers, unsigned shader,
347 unsigned shader_userdata_index,
348 enum radeon_bo_usage shader_usage)
349 {
350 int i;
351
352 buffers->num_buffers = num_buffers;
353 buffers->shader_usage = shader_usage;
354 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
355 buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
356
357 /* si_emit_descriptors only accepts an array of arrays.
358 * This adds such an array. */
359 buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
360 for (i = 0; i < num_buffers; i++) {
361 buffers->desc_data[i] = &buffers->desc_storage[i*4];
362 }
363
364 si_init_descriptors(sctx, &buffers->desc,
365 si_get_shader_user_data_base(shader) +
366 shader_userdata_index*4, 4, num_buffers,
367 si_emit_buffer_resources);
368 }
369
370 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
371 {
372 int i;
373
374 for (i = 0; i < Elements(buffers->buffers); i++) {
375 pipe_resource_reference(&buffers->buffers[i], NULL);
376 }
377
378 FREE(buffers->buffers);
379 FREE(buffers->desc_storage);
380 FREE(buffers->desc_data);
381 si_release_descriptors(&buffers->desc);
382 }
383
384 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
385 struct si_buffer_resources *buffers)
386 {
387 unsigned mask = buffers->desc.enabled_mask;
388
389 /* Add relocations to the CS. */
390 while (mask) {
391 int i = u_bit_scan(&mask);
392
393 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
394 (struct r600_resource*)buffers->buffers[i],
395 buffers->shader_usage);
396 }
397
398 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
399 buffers->desc.buffer, RADEON_USAGE_READWRITE);
400
401 si_emit_shader_pointer(sctx, &buffers->desc);
402 }
403
404 /* CONSTANT BUFFERS */
405
406 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
407 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
408 {
409 if (SI_BIG_ENDIAN) {
410 uint32_t *tmpPtr;
411 unsigned i;
412
413 if (!(tmpPtr = malloc(size))) {
414 R600_ERR("Failed to allocate BE swap buffer.\n");
415 return;
416 }
417
418 for (i = 0; i < size / 4; ++i) {
419 tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]);
420 }
421
422 u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset,
423 (struct pipe_resource**)rbuffer);
424
425 free(tmpPtr);
426 } else {
427 u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset,
428 (struct pipe_resource**)rbuffer);
429 }
430 }
431
432 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
433 struct pipe_constant_buffer *input)
434 {
435 struct si_context *sctx = (struct si_context *)ctx;
436 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
437
438 if (shader >= SI_NUM_SHADERS)
439 return;
440
441 assert(slot < buffers->num_buffers);
442 pipe_resource_reference(&buffers->buffers[slot], NULL);
443
444 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
445 * with a NULL buffer). We need to use a dummy buffer instead. */
446 if (sctx->b.chip_class == CIK &&
447 (!input || (!input->buffer && !input->user_buffer)))
448 input = &sctx->null_const_buf;
449
450 if (input && (input->buffer || input->user_buffer)) {
451 struct pipe_resource *buffer = NULL;
452 uint64_t va;
453
454 /* Upload the user buffer if needed. */
455 if (input->user_buffer) {
456 unsigned buffer_offset;
457
458 si_upload_const_buffer(sctx,
459 (struct r600_resource**)&buffer, input->user_buffer,
460 input->buffer_size, &buffer_offset);
461 va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
462 } else {
463 pipe_resource_reference(&buffer, input->buffer);
464 va = r600_resource_va(ctx->screen, buffer) + input->buffer_offset;
465 }
466
467 /* Set the descriptor. */
468 uint32_t *desc = buffers->desc_data[slot];
469 desc[0] = va;
470 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
471 S_008F04_STRIDE(0);
472 desc[2] = input->buffer_size;
473 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
474 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
475 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
476 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
477 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
478 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
479
480 buffers->buffers[slot] = buffer;
481 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
482 (struct r600_resource*)buffer, buffers->shader_usage);
483 buffers->desc.enabled_mask |= 1 << slot;
484 } else {
485 /* Clear the descriptor. */
486 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
487 buffers->desc.enabled_mask &= ~(1 << slot);
488 }
489
490 buffers->desc.dirty_mask |= 1 << slot;
491 si_update_descriptors(sctx, &buffers->desc);
492 }
493
494 /* RING BUFFERS */
495
496 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
497 struct pipe_constant_buffer *input,
498 unsigned stride, unsigned num_records,
499 bool add_tid, bool swizzle,
500 unsigned element_size, unsigned index_stride)
501 {
502 struct si_context *sctx = (struct si_context *)ctx;
503 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
504
505 if (shader >= SI_NUM_SHADERS)
506 return;
507
508 /* The stride field in the resource descriptor has 14 bits */
509 assert(stride < (1 << 14));
510
511 slot += NUM_PIPE_CONST_BUFFERS + 1;
512 assert(slot < buffers->num_buffers);
513 pipe_resource_reference(&buffers->buffers[slot], NULL);
514
515 if (input && input->buffer) {
516 uint64_t va;
517
518 va = r600_resource_va(ctx->screen, input->buffer);
519
520 switch (element_size) {
521 default:
522 assert(!"Unsupported ring buffer element size");
523 case 0:
524 case 2:
525 element_size = 0;
526 break;
527 case 4:
528 element_size = 1;
529 break;
530 case 8:
531 element_size = 2;
532 break;
533 case 16:
534 element_size = 3;
535 break;
536 }
537
538 switch (index_stride) {
539 default:
540 assert(!"Unsupported ring buffer index stride");
541 case 0:
542 case 8:
543 index_stride = 0;
544 break;
545 case 16:
546 index_stride = 1;
547 break;
548 case 32:
549 index_stride = 2;
550 break;
551 case 64:
552 index_stride = 3;
553 break;
554 }
555
556 /* Set the descriptor. */
557 uint32_t *desc = buffers->desc_data[slot];
558 desc[0] = va;
559 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
560 S_008F04_STRIDE(stride) |
561 S_008F04_SWIZZLE_ENABLE(swizzle);
562 desc[2] = num_records;
563 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
564 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
565 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
566 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
567 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
568 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
569 S_008F0C_ELEMENT_SIZE(element_size) |
570 S_008F0C_INDEX_STRIDE(index_stride) |
571 S_008F0C_ADD_TID_ENABLE(add_tid);
572
573 pipe_resource_reference(&buffers->buffers[slot], input->buffer);
574 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
575 (struct r600_resource*)input->buffer,
576 buffers->shader_usage);
577 buffers->desc.enabled_mask |= 1 << slot;
578 } else {
579 /* Clear the descriptor. */
580 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
581 buffers->desc.enabled_mask &= ~(1 << slot);
582 }
583
584 buffers->desc.dirty_mask |= 1 << slot;
585 si_update_descriptors(sctx, &buffers->desc);
586 }
587
588 /* STREAMOUT BUFFERS */
589
590 static void si_set_streamout_targets(struct pipe_context *ctx,
591 unsigned num_targets,
592 struct pipe_stream_output_target **targets,
593 unsigned append_bitmask)
594 {
595 struct si_context *sctx = (struct si_context *)ctx;
596 struct si_buffer_resources *buffers = &sctx->streamout_buffers;
597 unsigned old_num_targets = sctx->b.streamout.num_targets;
598 unsigned i;
599
600 /* Streamout buffers must be bound in 2 places:
601 * 1) in VGT by setting the VGT_STRMOUT registers
602 * 2) as shader resources
603 */
604
605 /* Set the VGT regs. */
606 r600_set_streamout_targets(ctx, num_targets, targets, append_bitmask);
607
608 /* Set the shader resources.*/
609 for (i = 0; i < num_targets; i++) {
610 if (targets[i]) {
611 struct pipe_resource *buffer = targets[i]->buffer;
612 uint64_t va = r600_resource_va(ctx->screen, buffer);
613
614 /* Set the descriptor. */
615 uint32_t *desc = buffers->desc_data[i];
616 desc[0] = va;
617 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
618 desc[2] = 0xffffffff;
619 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
620 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
621 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
622 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
623
624 /* Set the resource. */
625 pipe_resource_reference(&buffers->buffers[i], buffer);
626 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
627 (struct r600_resource*)buffer,
628 buffers->shader_usage);
629 buffers->desc.enabled_mask |= 1 << i;
630 } else {
631 /* Clear the descriptor and unset the resource. */
632 memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
633 pipe_resource_reference(&buffers->buffers[i], NULL);
634 buffers->desc.enabled_mask &= ~(1 << i);
635 }
636 buffers->desc.dirty_mask |= 1 << i;
637 }
638 for (; i < old_num_targets; i++) {
639 /* Clear the descriptor and unset the resource. */
640 memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
641 pipe_resource_reference(&buffers->buffers[i], NULL);
642 buffers->desc.enabled_mask &= ~(1 << i);
643 buffers->desc.dirty_mask |= 1 << i;
644 }
645
646 si_update_descriptors(sctx, &buffers->desc);
647 }
648
649 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
650 uint32_t *desc, uint64_t old_buf_va,
651 struct pipe_resource *new_buf)
652 {
653 /* Retrieve the buffer offset from the descriptor. */
654 uint64_t old_desc_va =
655 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
656
657 assert(old_buf_va <= old_desc_va);
658 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
659
660 /* Update the descriptor. */
661 uint64_t va = r600_resource_va(ctx->screen, new_buf) + offset_within_buffer;
662
663 desc[0] = va;
664 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
665 S_008F04_BASE_ADDRESS_HI(va >> 32);
666 }
667
668 /* BUFFER DISCARD/INVALIDATION */
669
670 /* Reallocate a buffer a update all resource bindings where the buffer is
671 * bound.
672 *
673 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
674 * idle by discarding its contents. Apps usually tell us when to do this using
675 * map_buffer flags, for example.
676 */
677 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
678 {
679 struct si_context *sctx = (struct si_context*)ctx;
680 struct r600_resource *rbuffer = r600_resource(buf);
681 unsigned i, shader, alignment = rbuffer->buf->alignment;
682 uint64_t old_va = r600_resource_va(ctx->screen, buf);
683
684 /* Discard the buffer. */
685 pb_reference(&rbuffer->buf, NULL);
686
687 /* Create a new one in the same pipe_resource. */
688 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0, alignment,
689 TRUE, rbuffer->b.b.usage);
690
691 /* We changed the buffer, now we need to bind it where the old one
692 * was bound. This consists of 2 things:
693 * 1) Updating the resource descriptor and dirtying it.
694 * 2) Adding a relocation to the CS, so that it's usable.
695 */
696
697 /* Vertex buffers. */
698 /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
699
700 /* Streamout buffers. */
701 for (i = 0; i < sctx->streamout_buffers.num_buffers; i++) {
702 if (sctx->streamout_buffers.buffers[i] == buf) {
703 /* Update the descriptor. */
704 si_desc_reset_buffer_offset(ctx, sctx->streamout_buffers.desc_data[i],
705 old_va, buf);
706
707 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
708 (struct r600_resource*)buf,
709 sctx->streamout_buffers.shader_usage);
710 sctx->streamout_buffers.desc.dirty_mask |= 1 << i;
711 si_update_descriptors(sctx, &sctx->streamout_buffers.desc);
712
713 /* Update the streamout state. */
714 if (sctx->b.streamout.begin_emitted) {
715 r600_emit_streamout_end(&sctx->b);
716 }
717 sctx->b.streamout.append_bitmask = sctx->b.streamout.enabled_mask;
718 r600_streamout_buffers_dirty(&sctx->b);
719 }
720 }
721
722 /* Constant buffers. */
723 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
724 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
725 bool found = false;
726 uint32_t mask = buffers->desc.enabled_mask;
727
728 while (mask) {
729 unsigned i = u_bit_scan(&mask);
730 if (buffers->buffers[i] == buf) {
731 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
732 old_va, buf);
733
734 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
735 rbuffer, buffers->shader_usage);
736
737 buffers->desc.dirty_mask |= 1 << i;
738 found = true;
739 }
740 }
741 if (found) {
742 si_update_descriptors(sctx, &buffers->desc);
743 }
744 }
745
746 /* Texture buffers. */
747 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
748 struct si_sampler_views *views = &sctx->samplers[shader].views;
749 bool found = false;
750 uint32_t mask = views->desc.enabled_mask;
751
752 while (mask) {
753 unsigned i = u_bit_scan(&mask);
754 if (views->views[i]->texture == buf) {
755 /* This updates the sampler view directly. */
756 si_desc_reset_buffer_offset(ctx, views->desc_data[i],
757 old_va, buf);
758
759 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
760 rbuffer, RADEON_USAGE_READ);
761
762 views->desc.dirty_mask |= 1 << i;
763 found = true;
764 }
765 }
766 if (found) {
767 si_update_descriptors(sctx, &views->desc);
768 }
769 }
770 }
771
772 /* CP DMA */
773
774 /* The max number of bytes to copy per packet. */
775 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
776
777 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
778 unsigned offset, unsigned size, unsigned value)
779 {
780 struct si_context *sctx = (struct si_context*)ctx;
781
782 if (!size)
783 return;
784
785 /* Mark the buffer range of destination as valid (initialized),
786 * so that transfer_map knows it should wait for the GPU when mapping
787 * that range. */
788 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
789 offset + size);
790
791 /* Fallback for unaligned clears. */
792 if (offset % 4 != 0 || size % 4 != 0) {
793 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
794 sctx->b.rings.gfx.cs,
795 PIPE_TRANSFER_WRITE);
796 size /= 4;
797 for (unsigned i = 0; i < size; i++)
798 *map++ = value;
799 return;
800 }
801
802 uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset;
803
804 /* Flush the caches where the resource is bound. */
805 /* XXX only flush the caches where the buffer is bound. */
806 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
807 R600_CONTEXT_INV_CONST_CACHE |
808 R600_CONTEXT_FLUSH_AND_INV_CB |
809 R600_CONTEXT_FLUSH_AND_INV_DB |
810 R600_CONTEXT_FLUSH_AND_INV_CB_META |
811 R600_CONTEXT_FLUSH_AND_INV_DB_META;
812 sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
813
814 while (size) {
815 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
816 unsigned dma_flags = 0;
817
818 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
819 FALSE);
820
821 /* This must be done after need_cs_space. */
822 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
823 (struct r600_resource*)dst, RADEON_USAGE_WRITE);
824
825 /* Flush the caches for the first copy only.
826 * Also wait for the previous CP DMA operations. */
827 if (sctx->b.flags) {
828 si_emit_cache_flush(&sctx->b, NULL);
829 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
830 }
831
832 /* Do the synchronization after the last copy, so that all data is written to memory. */
833 if (size == byte_count)
834 dma_flags |= R600_CP_DMA_SYNC;
835
836 /* Emit the clear packet. */
837 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
838
839 size -= byte_count;
840 va += byte_count;
841 }
842
843 /* Flush the caches again in case the 3D engine has been prefetching
844 * the resource. */
845 /* XXX only flush the caches where the buffer is bound. */
846 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
847 R600_CONTEXT_INV_CONST_CACHE |
848 R600_CONTEXT_FLUSH_AND_INV_CB |
849 R600_CONTEXT_FLUSH_AND_INV_DB |
850 R600_CONTEXT_FLUSH_AND_INV_CB_META |
851 R600_CONTEXT_FLUSH_AND_INV_DB_META;
852 }
853
854 void si_copy_buffer(struct si_context *sctx,
855 struct pipe_resource *dst, struct pipe_resource *src,
856 uint64_t dst_offset, uint64_t src_offset, unsigned size)
857 {
858 if (!size)
859 return;
860
861 /* Mark the buffer range of destination as valid (initialized),
862 * so that transfer_map knows it should wait for the GPU when mapping
863 * that range. */
864 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
865 dst_offset + size);
866
867 dst_offset += r600_resource_va(&sctx->screen->b.b, dst);
868 src_offset += r600_resource_va(&sctx->screen->b.b, src);
869
870 /* Flush the caches where the resource is bound. */
871 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
872 R600_CONTEXT_INV_CONST_CACHE |
873 R600_CONTEXT_FLUSH_AND_INV_CB |
874 R600_CONTEXT_FLUSH_AND_INV_DB |
875 R600_CONTEXT_FLUSH_AND_INV_CB_META |
876 R600_CONTEXT_FLUSH_AND_INV_DB_META |
877 R600_CONTEXT_WAIT_3D_IDLE;
878
879 while (size) {
880 unsigned sync_flags = 0;
881 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
882
883 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
884
885 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
886 if (sctx->b.flags) {
887 si_emit_cache_flush(&sctx->b, NULL);
888 sync_flags |= SI_CP_DMA_RAW_WAIT;
889 }
890
891 /* Do the synchronization after the last copy, so that all data is written to memory. */
892 if (size == byte_count) {
893 sync_flags |= R600_CP_DMA_SYNC;
894 }
895
896 /* This must be done after r600_need_cs_space. */
897 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
898 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
899
900 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
901
902 size -= byte_count;
903 src_offset += byte_count;
904 dst_offset += byte_count;
905 }
906
907 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
908 R600_CONTEXT_INV_CONST_CACHE |
909 R600_CONTEXT_FLUSH_AND_INV_CB |
910 R600_CONTEXT_FLUSH_AND_INV_DB |
911 R600_CONTEXT_FLUSH_AND_INV_CB_META |
912 R600_CONTEXT_FLUSH_AND_INV_DB_META;
913 }
914
915 /* INIT/DEINIT */
916
917 void si_init_all_descriptors(struct si_context *sctx)
918 {
919 int i;
920
921 for (i = 0; i < SI_NUM_SHADERS; i++) {
922 si_init_buffer_resources(sctx, &sctx->const_buffers[i],
923 NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
924 RADEON_USAGE_READWRITE);
925
926 si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
927
928 sctx->atoms.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
929 sctx->atoms.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
930 }
931
932 si_init_buffer_resources(sctx, &sctx->streamout_buffers, 4, PIPE_SHADER_VERTEX,
933 SI_SGPR_SO_BUFFER, RADEON_USAGE_WRITE);
934 sctx->atoms.streamout_buffers = &sctx->streamout_buffers.desc.atom;
935
936 /* Set pipe_context functions. */
937 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
938 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
939 sctx->b.clear_buffer = si_clear_buffer;
940 sctx->b.invalidate_buffer = si_invalidate_buffer;
941 }
942
943 void si_release_all_descriptors(struct si_context *sctx)
944 {
945 int i;
946
947 for (i = 0; i < SI_NUM_SHADERS; i++) {
948 si_release_buffer_resources(&sctx->const_buffers[i]);
949 si_release_sampler_views(&sctx->samplers[i].views);
950 }
951 si_release_buffer_resources(&sctx->streamout_buffers);
952 }
953
954 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
955 {
956 int i;
957
958 for (i = 0; i < SI_NUM_SHADERS; i++) {
959 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
960 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
961 }
962 si_buffer_resources_begin_new_cs(sctx, &sctx->streamout_buffers);
963 }