gallium: remove PIPE_USAGE_STATIC
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26 #include "../radeon/r600_cs.h"
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "sid.h"
30
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 #define SI_NUM_CONTEXTS 16
35
36 static uint32_t null_desc[8]; /* zeros */
37
38 /* Set this if you want the 3D engine to wait until CP DMA is done.
39 * It should be set on the last CP DMA packet. */
40 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
41
42 /* Set this if the source data was used as a destination in a previous CP DMA
43 * packet. It's for preventing a read-after-write (RAW) hazard between two
44 * CP DMA packets. */
45 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
46
47 /* Emit a CP DMA packet to do a copy from one buffer to another.
48 * The size must fit in bits [20:0].
49 */
50 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
51 uint64_t dst_va, uint64_t src_va,
52 unsigned size, unsigned flags)
53 {
54 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
55 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
56 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
57
58 assert(size);
59 assert((size & ((1<<21)-1)) == size);
60
61 if (sctx->b.chip_class >= CIK) {
62 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
63 radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
64 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
65 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
66 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
67 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
68 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
69 } else {
70 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
71 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
72 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
73 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
74 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
75 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
76 }
77 }
78
79 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
80 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
81 uint64_t dst_va, unsigned size,
82 uint32_t clear_value, unsigned flags)
83 {
84 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
85 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
86 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
87
88 assert(size);
89 assert((size & ((1<<21)-1)) == size);
90
91 if (sctx->b.chip_class >= CIK) {
92 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
93 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
94 radeon_emit(cs, clear_value); /* DATA [31:0] */
95 radeon_emit(cs, 0);
96 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
97 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
98 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
99 } else {
100 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
101 radeon_emit(cs, clear_value); /* DATA [31:0] */
102 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
103 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
105 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
106 }
107 }
108
109 static void si_init_descriptors(struct si_context *sctx,
110 struct si_descriptors *desc,
111 unsigned shader_userdata_reg,
112 unsigned element_dw_size,
113 unsigned num_elements,
114 void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
115 {
116 uint64_t va;
117
118 assert(num_elements <= sizeof(desc->enabled_mask)*8);
119 assert(num_elements <= sizeof(desc->dirty_mask)*8);
120
121 desc->atom.emit = (void*)emit_func;
122 desc->shader_userdata_reg = shader_userdata_reg;
123 desc->element_dw_size = element_dw_size;
124 desc->num_elements = num_elements;
125 desc->context_size = num_elements * element_dw_size * 4;
126
127 desc->buffer = (struct r600_resource*)
128 pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
129 PIPE_USAGE_DEFAULT,
130 SI_NUM_CONTEXTS * desc->context_size);
131
132 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
133 va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
134
135 /* We don't check for CS space here, because this should be called
136 * only once at context initialization. */
137 si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0,
138 R600_CP_DMA_SYNC);
139 }
140
141 static void si_release_descriptors(struct si_descriptors *desc)
142 {
143 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
144 }
145
146 static void si_update_descriptors(struct si_context *sctx,
147 struct si_descriptors *desc)
148 {
149 if (desc->dirty_mask) {
150 desc->atom.num_dw =
151 7 + /* copy */
152 (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
153 4; /* pointer update */
154 #if HAVE_LLVM >= 0x0305
155 if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
156 desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0)
157 desc->atom.num_dw += 4; /* second pointer update */
158 #endif
159 desc->atom.dirty = true;
160 /* The descriptors are read with the K cache. */
161 sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
162 } else {
163 desc->atom.dirty = false;
164 }
165 }
166
167 static void si_emit_shader_pointer(struct si_context *sctx,
168 struct si_descriptors *desc)
169 {
170 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
171 uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) +
172 desc->current_context_id * desc->context_size;
173
174 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
175 radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
176 radeon_emit(cs, va);
177 radeon_emit(cs, va >> 32);
178
179 #if HAVE_LLVM >= 0x0305
180 if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
181 desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0) {
182 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
183 radeon_emit(cs, (desc->shader_userdata_reg +
184 (R_00B330_SPI_SHADER_USER_DATA_ES_0 -
185 R_00B130_SPI_SHADER_USER_DATA_VS_0) -
186 SI_SH_REG_OFFSET) >> 2);
187 radeon_emit(cs, va);
188 radeon_emit(cs, va >> 32);
189 }
190 #endif
191 }
192
193 static void si_emit_descriptors(struct si_context *sctx,
194 struct si_descriptors *desc,
195 uint32_t **descriptors)
196 {
197 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
198 uint64_t va_base;
199 int packet_start;
200 int packet_size = 0;
201 int last_index = desc->num_elements; /* point to a non-existing element */
202 unsigned dirty_mask = desc->dirty_mask;
203 unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
204
205 assert(dirty_mask);
206
207 va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
208
209 /* Copy the descriptors to a new context slot. */
210 /* XXX Consider using TC or L2 for this copy on CIK. */
211 si_emit_cp_dma_copy_buffer(sctx,
212 va_base + new_context_id * desc->context_size,
213 va_base + desc->current_context_id * desc->context_size,
214 desc->context_size, R600_CP_DMA_SYNC);
215
216 va_base += new_context_id * desc->context_size;
217
218 /* Update the descriptors.
219 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
220 *
221 * XXX When unbinding lots of resources, consider clearing the memory
222 * with CP DMA instead of emitting zeros.
223 */
224 while (dirty_mask) {
225 int i = u_bit_scan(&dirty_mask);
226
227 assert(i < desc->num_elements);
228
229 if (last_index+1 == i && packet_size) {
230 /* Append new data at the end of the last packet. */
231 packet_size += desc->element_dw_size;
232 cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
233 } else {
234 /* Start a new packet. */
235 uint64_t va = va_base + i * desc->element_dw_size * 4;
236
237 packet_start = cs->cdw;
238 packet_size = 2 + desc->element_dw_size;
239
240 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
241 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2) |
242 PKT3_WRITE_DATA_WR_CONFIRM |
243 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
244 radeon_emit(cs, va & 0xFFFFFFFFUL);
245 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
246 }
247
248 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
249
250 last_index = i;
251 }
252
253 desc->dirty_mask = 0;
254 desc->current_context_id = new_context_id;
255
256 /* Now update the shader userdata pointer. */
257 si_emit_shader_pointer(sctx, desc);
258 }
259
260 static unsigned si_get_shader_user_data_base(unsigned shader)
261 {
262 switch (shader) {
263 case PIPE_SHADER_VERTEX:
264 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
265 case PIPE_SHADER_GEOMETRY:
266 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
267 case PIPE_SHADER_FRAGMENT:
268 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
269 default:
270 assert(0);
271 return 0;
272 }
273 }
274
275 /* SAMPLER VIEWS */
276
277 static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
278 {
279 struct si_sampler_views *views = (struct si_sampler_views*)atom;
280
281 si_emit_descriptors(sctx, &views->desc, views->desc_data);
282 }
283
284 static void si_init_sampler_views(struct si_context *sctx,
285 struct si_sampler_views *views,
286 unsigned shader)
287 {
288 si_init_descriptors(sctx, &views->desc,
289 si_get_shader_user_data_base(shader) +
290 SI_SGPR_RESOURCE * 4,
291 8, NUM_SAMPLER_VIEWS, si_emit_sampler_views);
292 }
293
294 static void si_release_sampler_views(struct si_sampler_views *views)
295 {
296 int i;
297
298 for (i = 0; i < Elements(views->views); i++) {
299 pipe_sampler_view_reference(&views->views[i], NULL);
300 }
301 si_release_descriptors(&views->desc);
302 }
303
304 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
305 struct si_sampler_views *views)
306 {
307 unsigned mask = views->desc.enabled_mask;
308
309 /* Add relocations to the CS. */
310 while (mask) {
311 int i = u_bit_scan(&mask);
312 struct si_pipe_sampler_view *rview =
313 (struct si_pipe_sampler_view*)views->views[i];
314
315 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
316 }
317
318 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
319
320 si_emit_shader_pointer(sctx, &views->desc);
321 }
322
323 void si_set_sampler_view(struct si_context *sctx, unsigned shader,
324 unsigned slot, struct pipe_sampler_view *view,
325 unsigned *view_desc)
326 {
327 struct si_sampler_views *views = &sctx->samplers[shader].views;
328
329 if (views->views[slot] == view)
330 return;
331
332 if (view) {
333 struct si_pipe_sampler_view *rview =
334 (struct si_pipe_sampler_view*)view;
335
336 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
337
338 pipe_sampler_view_reference(&views->views[slot], view);
339 views->desc_data[slot] = view_desc;
340 views->desc.enabled_mask |= 1 << slot;
341 } else {
342 pipe_sampler_view_reference(&views->views[slot], NULL);
343 views->desc_data[slot] = null_desc;
344 views->desc.enabled_mask &= ~(1 << slot);
345 }
346
347 views->desc.dirty_mask |= 1 << slot;
348 si_update_descriptors(sctx, &views->desc);
349 }
350
351 /* BUFFER RESOURCES */
352
353 static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
354 {
355 struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
356
357 si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
358 }
359
360 static void si_init_buffer_resources(struct si_context *sctx,
361 struct si_buffer_resources *buffers,
362 unsigned num_buffers, unsigned shader,
363 unsigned shader_userdata_index,
364 enum radeon_bo_usage shader_usage)
365 {
366 int i;
367
368 buffers->num_buffers = num_buffers;
369 buffers->shader_usage = shader_usage;
370 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
371 buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
372
373 /* si_emit_descriptors only accepts an array of arrays.
374 * This adds such an array. */
375 buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
376 for (i = 0; i < num_buffers; i++) {
377 buffers->desc_data[i] = &buffers->desc_storage[i*4];
378 }
379
380 si_init_descriptors(sctx, &buffers->desc,
381 si_get_shader_user_data_base(shader) +
382 shader_userdata_index*4, 4, num_buffers,
383 si_emit_buffer_resources);
384 }
385
386 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
387 {
388 int i;
389
390 for (i = 0; i < Elements(buffers->buffers); i++) {
391 pipe_resource_reference(&buffers->buffers[i], NULL);
392 }
393
394 FREE(buffers->buffers);
395 FREE(buffers->desc_storage);
396 FREE(buffers->desc_data);
397 si_release_descriptors(&buffers->desc);
398 }
399
400 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
401 struct si_buffer_resources *buffers)
402 {
403 unsigned mask = buffers->desc.enabled_mask;
404
405 /* Add relocations to the CS. */
406 while (mask) {
407 int i = u_bit_scan(&mask);
408
409 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
410 (struct r600_resource*)buffers->buffers[i],
411 buffers->shader_usage);
412 }
413
414 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
415 buffers->desc.buffer, RADEON_USAGE_READWRITE);
416
417 si_emit_shader_pointer(sctx, &buffers->desc);
418 }
419
420 /* CONSTANT BUFFERS */
421
422 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
423 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
424 {
425 if (SI_BIG_ENDIAN) {
426 uint32_t *tmpPtr;
427 unsigned i;
428
429 if (!(tmpPtr = malloc(size))) {
430 R600_ERR("Failed to allocate BE swap buffer.\n");
431 return;
432 }
433
434 for (i = 0; i < size / 4; ++i) {
435 tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]);
436 }
437
438 u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset,
439 (struct pipe_resource**)rbuffer);
440
441 free(tmpPtr);
442 } else {
443 u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset,
444 (struct pipe_resource**)rbuffer);
445 }
446 }
447
448 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
449 struct pipe_constant_buffer *input)
450 {
451 struct si_context *sctx = (struct si_context *)ctx;
452 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
453
454 if (shader >= SI_NUM_SHADERS)
455 return;
456
457 assert(slot < buffers->num_buffers);
458 pipe_resource_reference(&buffers->buffers[slot], NULL);
459
460 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
461 * with a NULL buffer). We need to use a dummy buffer instead. */
462 if (sctx->b.chip_class == CIK &&
463 (!input || (!input->buffer && !input->user_buffer)))
464 input = &sctx->null_const_buf;
465
466 if (input && (input->buffer || input->user_buffer)) {
467 struct pipe_resource *buffer = NULL;
468 uint64_t va;
469
470 /* Upload the user buffer if needed. */
471 if (input->user_buffer) {
472 unsigned buffer_offset;
473
474 si_upload_const_buffer(sctx,
475 (struct r600_resource**)&buffer, input->user_buffer,
476 input->buffer_size, &buffer_offset);
477 va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
478 } else {
479 pipe_resource_reference(&buffer, input->buffer);
480 va = r600_resource_va(ctx->screen, buffer) + input->buffer_offset;
481 }
482
483 /* Set the descriptor. */
484 uint32_t *desc = buffers->desc_data[slot];
485 desc[0] = va;
486 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
487 S_008F04_STRIDE(0);
488 desc[2] = input->buffer_size;
489 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
490 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
491 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
492 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
493 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
494 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
495
496 buffers->buffers[slot] = buffer;
497 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
498 (struct r600_resource*)buffer, buffers->shader_usage);
499 buffers->desc.enabled_mask |= 1 << slot;
500 } else {
501 /* Clear the descriptor. */
502 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
503 buffers->desc.enabled_mask &= ~(1 << slot);
504 }
505
506 buffers->desc.dirty_mask |= 1 << slot;
507 si_update_descriptors(sctx, &buffers->desc);
508 }
509
510 /* RING BUFFERS */
511
512 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
513 struct pipe_constant_buffer *input,
514 unsigned stride, unsigned num_records,
515 bool add_tid, bool swizzle,
516 unsigned element_size, unsigned index_stride)
517 {
518 struct si_context *sctx = (struct si_context *)ctx;
519 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
520
521 if (shader >= SI_NUM_SHADERS)
522 return;
523
524 /* The stride field in the resource descriptor has 14 bits */
525 assert(stride < (1 << 14));
526
527 assert(slot < buffers->num_buffers);
528 pipe_resource_reference(&buffers->buffers[slot], NULL);
529
530 if (input && input->buffer) {
531 uint64_t va;
532
533 va = r600_resource_va(ctx->screen, input->buffer);
534
535 switch (element_size) {
536 default:
537 assert(!"Unsupported ring buffer element size");
538 case 0:
539 case 2:
540 element_size = 0;
541 break;
542 case 4:
543 element_size = 1;
544 break;
545 case 8:
546 element_size = 2;
547 break;
548 case 16:
549 element_size = 3;
550 break;
551 }
552
553 switch (index_stride) {
554 default:
555 assert(!"Unsupported ring buffer index stride");
556 case 0:
557 case 8:
558 index_stride = 0;
559 break;
560 case 16:
561 index_stride = 1;
562 break;
563 case 32:
564 index_stride = 2;
565 break;
566 case 64:
567 index_stride = 3;
568 break;
569 }
570
571 /* Set the descriptor. */
572 uint32_t *desc = buffers->desc_data[slot];
573 desc[0] = va;
574 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
575 S_008F04_STRIDE(stride) |
576 S_008F04_SWIZZLE_ENABLE(swizzle);
577 desc[2] = num_records;
578 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
579 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
580 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
581 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
582 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
583 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
584 S_008F0C_ELEMENT_SIZE(element_size) |
585 S_008F0C_INDEX_STRIDE(index_stride) |
586 S_008F0C_ADD_TID_ENABLE(add_tid);
587
588 pipe_resource_reference(&buffers->buffers[slot], input->buffer);
589 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
590 (struct r600_resource*)input->buffer,
591 buffers->shader_usage);
592 buffers->desc.enabled_mask |= 1 << slot;
593 } else {
594 /* Clear the descriptor. */
595 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
596 buffers->desc.enabled_mask &= ~(1 << slot);
597 }
598
599 buffers->desc.dirty_mask |= 1 << slot;
600 si_update_descriptors(sctx, &buffers->desc);
601 }
602
603 /* STREAMOUT BUFFERS */
604
605 static void si_set_streamout_targets(struct pipe_context *ctx,
606 unsigned num_targets,
607 struct pipe_stream_output_target **targets,
608 unsigned append_bitmask)
609 {
610 struct si_context *sctx = (struct si_context *)ctx;
611 struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
612 unsigned old_num_targets = sctx->b.streamout.num_targets;
613 unsigned i, bufidx;
614
615 /* Streamout buffers must be bound in 2 places:
616 * 1) in VGT by setting the VGT_STRMOUT registers
617 * 2) as shader resources
618 */
619
620 /* Set the VGT regs. */
621 r600_set_streamout_targets(ctx, num_targets, targets, append_bitmask);
622
623 /* Set the shader resources.*/
624 for (i = 0; i < num_targets; i++) {
625 bufidx = SI_RW_SO + i;
626
627 if (targets[i]) {
628 struct pipe_resource *buffer = targets[i]->buffer;
629 uint64_t va = r600_resource_va(ctx->screen, buffer);
630
631 /* Set the descriptor. */
632 uint32_t *desc = buffers->desc_data[bufidx];
633 desc[0] = va;
634 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
635 desc[2] = 0xffffffff;
636 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
637 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
638 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
639 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
640
641 /* Set the resource. */
642 pipe_resource_reference(&buffers->buffers[bufidx],
643 buffer);
644 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
645 (struct r600_resource*)buffer,
646 buffers->shader_usage);
647 buffers->desc.enabled_mask |= 1 << bufidx;
648 } else {
649 /* Clear the descriptor and unset the resource. */
650 memset(buffers->desc_data[bufidx], 0,
651 sizeof(uint32_t) * 4);
652 pipe_resource_reference(&buffers->buffers[bufidx],
653 NULL);
654 buffers->desc.enabled_mask &= ~(1 << bufidx);
655 }
656 buffers->desc.dirty_mask |= 1 << bufidx;
657 }
658 for (; i < old_num_targets; i++) {
659 bufidx = SI_RW_SO + i;
660 /* Clear the descriptor and unset the resource. */
661 memset(buffers->desc_data[bufidx], 0, sizeof(uint32_t) * 4);
662 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
663 buffers->desc.enabled_mask &= ~(1 << bufidx);
664 buffers->desc.dirty_mask |= 1 << bufidx;
665 }
666
667 si_update_descriptors(sctx, &buffers->desc);
668 }
669
670 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
671 uint32_t *desc, uint64_t old_buf_va,
672 struct pipe_resource *new_buf)
673 {
674 /* Retrieve the buffer offset from the descriptor. */
675 uint64_t old_desc_va =
676 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
677
678 assert(old_buf_va <= old_desc_va);
679 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
680
681 /* Update the descriptor. */
682 uint64_t va = r600_resource_va(ctx->screen, new_buf) + offset_within_buffer;
683
684 desc[0] = va;
685 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
686 S_008F04_BASE_ADDRESS_HI(va >> 32);
687 }
688
689 /* BUFFER DISCARD/INVALIDATION */
690
691 /* Reallocate a buffer a update all resource bindings where the buffer is
692 * bound.
693 *
694 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
695 * idle by discarding its contents. Apps usually tell us when to do this using
696 * map_buffer flags, for example.
697 */
698 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
699 {
700 struct si_context *sctx = (struct si_context*)ctx;
701 struct r600_resource *rbuffer = r600_resource(buf);
702 unsigned i, shader, alignment = rbuffer->buf->alignment;
703 uint64_t old_va = r600_resource_va(ctx->screen, buf);
704
705 /* Discard the buffer. */
706 pb_reference(&rbuffer->buf, NULL);
707
708 /* Create a new one in the same pipe_resource. */
709 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
710 alignment, TRUE);
711
712 /* We changed the buffer, now we need to bind it where the old one
713 * was bound. This consists of 2 things:
714 * 1) Updating the resource descriptor and dirtying it.
715 * 2) Adding a relocation to the CS, so that it's usable.
716 */
717
718 /* Vertex buffers. */
719 /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
720
721 /* Read/Write buffers. */
722 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
723 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
724 bool found = false;
725 uint32_t mask = buffers->desc.enabled_mask;
726
727 while (mask) {
728 i = u_bit_scan(&mask);
729 if (buffers->buffers[i] == buf) {
730 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
731 old_va, buf);
732
733 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
734 rbuffer, buffers->shader_usage);
735
736 buffers->desc.dirty_mask |= 1 << i;
737 found = true;
738
739 if (i >= SI_RW_SO && shader == PIPE_SHADER_VERTEX) {
740 /* Update the streamout state. */
741 if (sctx->b.streamout.begin_emitted) {
742 r600_emit_streamout_end(&sctx->b);
743 }
744 sctx->b.streamout.append_bitmask =
745 sctx->b.streamout.enabled_mask;
746 r600_streamout_buffers_dirty(&sctx->b);
747 }
748 }
749 }
750 if (found) {
751 si_update_descriptors(sctx, &buffers->desc);
752 }
753 }
754
755 /* Constant buffers. */
756 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
757 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
758 bool found = false;
759 uint32_t mask = buffers->desc.enabled_mask;
760
761 while (mask) {
762 unsigned i = u_bit_scan(&mask);
763 if (buffers->buffers[i] == buf) {
764 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
765 old_va, buf);
766
767 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
768 rbuffer, buffers->shader_usage);
769
770 buffers->desc.dirty_mask |= 1 << i;
771 found = true;
772 }
773 }
774 if (found) {
775 si_update_descriptors(sctx, &buffers->desc);
776 }
777 }
778
779 /* Texture buffers. */
780 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
781 struct si_sampler_views *views = &sctx->samplers[shader].views;
782 bool found = false;
783 uint32_t mask = views->desc.enabled_mask;
784
785 while (mask) {
786 unsigned i = u_bit_scan(&mask);
787 if (views->views[i]->texture == buf) {
788 /* This updates the sampler view directly. */
789 si_desc_reset_buffer_offset(ctx, views->desc_data[i],
790 old_va, buf);
791
792 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
793 rbuffer, RADEON_USAGE_READ);
794
795 views->desc.dirty_mask |= 1 << i;
796 found = true;
797 }
798 }
799 if (found) {
800 si_update_descriptors(sctx, &views->desc);
801 }
802 }
803 }
804
805 /* CP DMA */
806
807 /* The max number of bytes to copy per packet. */
808 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
809
810 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
811 unsigned offset, unsigned size, unsigned value)
812 {
813 struct si_context *sctx = (struct si_context*)ctx;
814
815 if (!size)
816 return;
817
818 /* Mark the buffer range of destination as valid (initialized),
819 * so that transfer_map knows it should wait for the GPU when mapping
820 * that range. */
821 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
822 offset + size);
823
824 /* Fallback for unaligned clears. */
825 if (offset % 4 != 0 || size % 4 != 0) {
826 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
827 sctx->b.rings.gfx.cs,
828 PIPE_TRANSFER_WRITE);
829 size /= 4;
830 for (unsigned i = 0; i < size; i++)
831 *map++ = value;
832 return;
833 }
834
835 uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset;
836
837 /* Flush the caches where the resource is bound. */
838 /* XXX only flush the caches where the buffer is bound. */
839 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
840 R600_CONTEXT_INV_CONST_CACHE |
841 R600_CONTEXT_FLUSH_AND_INV_CB |
842 R600_CONTEXT_FLUSH_AND_INV_DB |
843 R600_CONTEXT_FLUSH_AND_INV_CB_META |
844 R600_CONTEXT_FLUSH_AND_INV_DB_META;
845 sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
846
847 while (size) {
848 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
849 unsigned dma_flags = 0;
850
851 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
852 FALSE);
853
854 /* This must be done after need_cs_space. */
855 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
856 (struct r600_resource*)dst, RADEON_USAGE_WRITE);
857
858 /* Flush the caches for the first copy only.
859 * Also wait for the previous CP DMA operations. */
860 if (sctx->b.flags) {
861 si_emit_cache_flush(&sctx->b, NULL);
862 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
863 }
864
865 /* Do the synchronization after the last copy, so that all data is written to memory. */
866 if (size == byte_count)
867 dma_flags |= R600_CP_DMA_SYNC;
868
869 /* Emit the clear packet. */
870 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
871
872 size -= byte_count;
873 va += byte_count;
874 }
875
876 /* Flush the caches again in case the 3D engine has been prefetching
877 * the resource. */
878 /* XXX only flush the caches where the buffer is bound. */
879 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
880 R600_CONTEXT_INV_CONST_CACHE |
881 R600_CONTEXT_FLUSH_AND_INV_CB |
882 R600_CONTEXT_FLUSH_AND_INV_DB |
883 R600_CONTEXT_FLUSH_AND_INV_CB_META |
884 R600_CONTEXT_FLUSH_AND_INV_DB_META;
885 }
886
887 void si_copy_buffer(struct si_context *sctx,
888 struct pipe_resource *dst, struct pipe_resource *src,
889 uint64_t dst_offset, uint64_t src_offset, unsigned size)
890 {
891 if (!size)
892 return;
893
894 /* Mark the buffer range of destination as valid (initialized),
895 * so that transfer_map knows it should wait for the GPU when mapping
896 * that range. */
897 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
898 dst_offset + size);
899
900 dst_offset += r600_resource_va(&sctx->screen->b.b, dst);
901 src_offset += r600_resource_va(&sctx->screen->b.b, src);
902
903 /* Flush the caches where the resource is bound. */
904 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
905 R600_CONTEXT_INV_CONST_CACHE |
906 R600_CONTEXT_FLUSH_AND_INV_CB |
907 R600_CONTEXT_FLUSH_AND_INV_DB |
908 R600_CONTEXT_FLUSH_AND_INV_CB_META |
909 R600_CONTEXT_FLUSH_AND_INV_DB_META |
910 R600_CONTEXT_WAIT_3D_IDLE;
911
912 while (size) {
913 unsigned sync_flags = 0;
914 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
915
916 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
917
918 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
919 if (sctx->b.flags) {
920 si_emit_cache_flush(&sctx->b, NULL);
921 sync_flags |= SI_CP_DMA_RAW_WAIT;
922 }
923
924 /* Do the synchronization after the last copy, so that all data is written to memory. */
925 if (size == byte_count) {
926 sync_flags |= R600_CP_DMA_SYNC;
927 }
928
929 /* This must be done after r600_need_cs_space. */
930 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
931 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
932
933 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
934
935 size -= byte_count;
936 src_offset += byte_count;
937 dst_offset += byte_count;
938 }
939
940 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
941 R600_CONTEXT_INV_CONST_CACHE |
942 R600_CONTEXT_FLUSH_AND_INV_CB |
943 R600_CONTEXT_FLUSH_AND_INV_DB |
944 R600_CONTEXT_FLUSH_AND_INV_CB_META |
945 R600_CONTEXT_FLUSH_AND_INV_DB_META;
946 }
947
948 /* INIT/DEINIT */
949
950 void si_init_all_descriptors(struct si_context *sctx)
951 {
952 int i;
953
954 for (i = 0; i < SI_NUM_SHADERS; i++) {
955 si_init_buffer_resources(sctx, &sctx->const_buffers[i],
956 NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
957 RADEON_USAGE_READ);
958 si_init_buffer_resources(sctx, &sctx->rw_buffers[i],
959 i == PIPE_SHADER_VERTEX ?
960 SI_RW_SO + 4 : SI_RW_SO,
961 i, SI_SGPR_RW_BUFFERS,
962 RADEON_USAGE_READWRITE);
963
964 si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
965
966 sctx->atoms.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
967 sctx->atoms.rw_buffers[i] = &sctx->rw_buffers[i].desc.atom;
968 sctx->atoms.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
969 }
970
971
972 /* Set pipe_context functions. */
973 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
974 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
975 sctx->b.clear_buffer = si_clear_buffer;
976 sctx->b.invalidate_buffer = si_invalidate_buffer;
977 }
978
979 void si_release_all_descriptors(struct si_context *sctx)
980 {
981 int i;
982
983 for (i = 0; i < SI_NUM_SHADERS; i++) {
984 si_release_buffer_resources(&sctx->const_buffers[i]);
985 si_release_buffer_resources(&sctx->rw_buffers[i]);
986 si_release_sampler_views(&sctx->samplers[i].views);
987 }
988 }
989
990 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
991 {
992 int i;
993
994 for (i = 0; i < SI_NUM_SHADERS; i++) {
995 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
996 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
997 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
998 }
999 }