radeonsi: remove nr_vertex_buffers
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26 #include "../radeon/r600_cs.h"
27 #include "si_pipe.h"
28 #include "si_shader.h"
29 #include "sid.h"
30
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
33
34 #define SI_NUM_CONTEXTS 16
35
36 static uint32_t null_desc[8]; /* zeros */
37
38 /* Set this if you want the 3D engine to wait until CP DMA is done.
39 * It should be set on the last CP DMA packet. */
40 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
41
42 /* Set this if the source data was used as a destination in a previous CP DMA
43 * packet. It's for preventing a read-after-write (RAW) hazard between two
44 * CP DMA packets. */
45 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
46
47 /* Emit a CP DMA packet to do a copy from one buffer to another.
48 * The size must fit in bits [20:0].
49 */
50 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
51 uint64_t dst_va, uint64_t src_va,
52 unsigned size, unsigned flags)
53 {
54 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
55 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
56 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
57
58 assert(size);
59 assert((size & ((1<<21)-1)) == size);
60
61 if (sctx->b.chip_class >= CIK) {
62 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
63 radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
64 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
65 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
66 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
67 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
68 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
69 } else {
70 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
71 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
72 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
73 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
74 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
75 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
76 }
77 }
78
79 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
80 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
81 uint64_t dst_va, unsigned size,
82 uint32_t clear_value, unsigned flags)
83 {
84 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
85 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
86 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
87
88 assert(size);
89 assert((size & ((1<<21)-1)) == size);
90
91 if (sctx->b.chip_class >= CIK) {
92 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
93 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
94 radeon_emit(cs, clear_value); /* DATA [31:0] */
95 radeon_emit(cs, 0);
96 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
97 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
98 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
99 } else {
100 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
101 radeon_emit(cs, clear_value); /* DATA [31:0] */
102 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
103 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
105 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
106 }
107 }
108
109 static void si_init_descriptors(struct si_context *sctx,
110 struct si_descriptors *desc,
111 unsigned shader_userdata_reg,
112 unsigned element_dw_size,
113 unsigned num_elements,
114 void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
115 {
116 uint64_t va;
117
118 assert(num_elements <= sizeof(desc->enabled_mask)*8);
119 assert(num_elements <= sizeof(desc->dirty_mask)*8);
120
121 desc->atom.emit = (void*)emit_func;
122 desc->shader_userdata_reg = shader_userdata_reg;
123 desc->element_dw_size = element_dw_size;
124 desc->num_elements = num_elements;
125 desc->context_size = num_elements * element_dw_size * 4;
126
127 desc->buffer = (struct r600_resource*)
128 pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
129 PIPE_USAGE_DEFAULT,
130 SI_NUM_CONTEXTS * desc->context_size);
131
132 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
133 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
134 va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
135
136 /* We don't check for CS space here, because this should be called
137 * only once at context initialization. */
138 si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0,
139 R600_CP_DMA_SYNC);
140 }
141
142 static void si_release_descriptors(struct si_descriptors *desc)
143 {
144 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
145 }
146
147 static void si_update_descriptors(struct si_context *sctx,
148 struct si_descriptors *desc)
149 {
150 if (desc->dirty_mask) {
151 desc->atom.num_dw =
152 7 + /* copy */
153 (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
154 4; /* pointer update */
155
156 if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
157 desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0)
158 desc->atom.num_dw += 4; /* second pointer update */
159
160 desc->atom.dirty = true;
161 /* The descriptors are read with the K cache. */
162 sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
163 } else {
164 desc->atom.dirty = false;
165 }
166 }
167
168 static void si_emit_shader_pointer(struct si_context *sctx,
169 struct r600_atom *atom)
170 {
171 struct si_descriptors *desc = (struct si_descriptors*)atom;
172 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
173 uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) +
174 desc->current_context_id * desc->context_size +
175 desc->buffer_offset;
176
177 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
178 radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
179 radeon_emit(cs, va);
180 radeon_emit(cs, va >> 32);
181
182 if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
183 desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0) {
184 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
185 radeon_emit(cs, (desc->shader_userdata_reg +
186 (R_00B330_SPI_SHADER_USER_DATA_ES_0 -
187 R_00B130_SPI_SHADER_USER_DATA_VS_0) -
188 SI_SH_REG_OFFSET) >> 2);
189 radeon_emit(cs, va);
190 radeon_emit(cs, va >> 32);
191 }
192 }
193
194 static void si_emit_descriptors(struct si_context *sctx,
195 struct si_descriptors *desc,
196 uint32_t **descriptors)
197 {
198 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
199 uint64_t va_base;
200 int packet_start;
201 int packet_size = 0;
202 int last_index = desc->num_elements; /* point to a non-existing element */
203 unsigned dirty_mask = desc->dirty_mask;
204 unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
205
206 assert(dirty_mask);
207
208 va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
209
210 /* Copy the descriptors to a new context slot. */
211 /* XXX Consider using TC or L2 for this copy on CIK. */
212 si_emit_cp_dma_copy_buffer(sctx,
213 va_base + new_context_id * desc->context_size,
214 va_base + desc->current_context_id * desc->context_size,
215 desc->context_size, R600_CP_DMA_SYNC);
216
217 va_base += new_context_id * desc->context_size;
218
219 /* Update the descriptors.
220 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
221 *
222 * XXX When unbinding lots of resources, consider clearing the memory
223 * with CP DMA instead of emitting zeros.
224 */
225 while (dirty_mask) {
226 int i = u_bit_scan(&dirty_mask);
227
228 assert(i < desc->num_elements);
229
230 if (last_index+1 == i && packet_size) {
231 /* Append new data at the end of the last packet. */
232 packet_size += desc->element_dw_size;
233 cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
234 } else {
235 /* Start a new packet. */
236 uint64_t va = va_base + i * desc->element_dw_size * 4;
237
238 packet_start = cs->cdw;
239 packet_size = 2 + desc->element_dw_size;
240
241 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
242 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2) |
243 PKT3_WRITE_DATA_WR_CONFIRM |
244 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
245 radeon_emit(cs, va & 0xFFFFFFFFUL);
246 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
247 }
248
249 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
250
251 last_index = i;
252 }
253
254 desc->dirty_mask = 0;
255 desc->current_context_id = new_context_id;
256
257 /* Now update the shader userdata pointer. */
258 si_emit_shader_pointer(sctx, &desc->atom);
259 }
260
261 static unsigned si_get_shader_user_data_base(unsigned shader)
262 {
263 switch (shader) {
264 case PIPE_SHADER_VERTEX:
265 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
266 case PIPE_SHADER_GEOMETRY:
267 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
268 case PIPE_SHADER_FRAGMENT:
269 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
270 default:
271 assert(0);
272 return 0;
273 }
274 }
275
276 /* SAMPLER VIEWS */
277
278 static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
279 {
280 struct si_sampler_views *views = (struct si_sampler_views*)atom;
281
282 si_emit_descriptors(sctx, &views->desc, views->desc_data);
283 }
284
285 static void si_init_sampler_views(struct si_context *sctx,
286 struct si_sampler_views *views,
287 unsigned shader)
288 {
289 si_init_descriptors(sctx, &views->desc,
290 si_get_shader_user_data_base(shader) +
291 SI_SGPR_RESOURCE * 4,
292 8, SI_NUM_SAMPLER_VIEWS, si_emit_sampler_views);
293 }
294
295 static void si_release_sampler_views(struct si_sampler_views *views)
296 {
297 int i;
298
299 for (i = 0; i < Elements(views->views); i++) {
300 pipe_sampler_view_reference(&views->views[i], NULL);
301 }
302 si_release_descriptors(&views->desc);
303 }
304
305 static enum radeon_bo_priority si_get_resource_ro_priority(struct r600_resource *res)
306 {
307 if (res->b.b.target == PIPE_BUFFER)
308 return RADEON_PRIO_SHADER_BUFFER_RO;
309
310 if (res->b.b.nr_samples > 1)
311 return RADEON_PRIO_SHADER_TEXTURE_MSAA;
312
313 return RADEON_PRIO_SHADER_TEXTURE_RO;
314 }
315
316 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
317 struct si_sampler_views *views)
318 {
319 unsigned mask = views->desc.enabled_mask;
320
321 /* Add relocations to the CS. */
322 while (mask) {
323 int i = u_bit_scan(&mask);
324 struct si_pipe_sampler_view *rview =
325 (struct si_pipe_sampler_view*)views->views[i];
326
327 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
328 rview->resource, RADEON_USAGE_READ,
329 si_get_resource_ro_priority(rview->resource));
330 }
331
332 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
333 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
334
335 si_emit_shader_pointer(sctx, &views->desc.atom);
336 }
337
338 static void si_set_sampler_view(struct si_context *sctx, unsigned shader,
339 unsigned slot, struct pipe_sampler_view *view,
340 unsigned *view_desc)
341 {
342 struct si_sampler_views *views = &sctx->samplers[shader].views;
343
344 if (views->views[slot] == view)
345 return;
346
347 if (view) {
348 struct si_pipe_sampler_view *rview =
349 (struct si_pipe_sampler_view*)view;
350
351 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
352 rview->resource, RADEON_USAGE_READ,
353 si_get_resource_ro_priority(rview->resource));
354
355 pipe_sampler_view_reference(&views->views[slot], view);
356 views->desc_data[slot] = view_desc;
357 views->desc.enabled_mask |= 1 << slot;
358 } else {
359 pipe_sampler_view_reference(&views->views[slot], NULL);
360 views->desc_data[slot] = null_desc;
361 views->desc.enabled_mask &= ~(1 << slot);
362 }
363
364 views->desc.dirty_mask |= 1 << slot;
365 }
366
367 static void si_set_sampler_views(struct pipe_context *ctx,
368 unsigned shader, unsigned start,
369 unsigned count,
370 struct pipe_sampler_view **views)
371 {
372 struct si_context *sctx = (struct si_context *)ctx;
373 struct si_textures_info *samplers = &sctx->samplers[shader];
374 struct si_pipe_sampler_view **rviews = (struct si_pipe_sampler_view **)views;
375 int i;
376
377 if (!count || shader >= SI_NUM_SHADERS)
378 return;
379
380 for (i = 0; i < count; i++) {
381 unsigned slot = start + i;
382
383 if (!views[i]) {
384 samplers->depth_texture_mask &= ~(1 << slot);
385 samplers->compressed_colortex_mask &= ~(1 << slot);
386 si_set_sampler_view(sctx, shader, slot, NULL, NULL);
387 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
388 NULL, NULL);
389 continue;
390 }
391
392 si_set_sampler_view(sctx, shader, slot, views[i], rviews[i]->state);
393
394 if (views[i]->texture->target != PIPE_BUFFER) {
395 struct r600_texture *rtex =
396 (struct r600_texture*)views[i]->texture;
397
398 if (rtex->is_depth && !rtex->is_flushing_texture) {
399 samplers->depth_texture_mask |= 1 << slot;
400 } else {
401 samplers->depth_texture_mask &= ~(1 << slot);
402 }
403 if (rtex->cmask.size || rtex->fmask.size) {
404 samplers->compressed_colortex_mask |= 1 << slot;
405 } else {
406 samplers->compressed_colortex_mask &= ~(1 << slot);
407 }
408
409 if (rtex->fmask.size) {
410 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
411 views[i], rviews[i]->fmask_state);
412 } else {
413 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
414 NULL, NULL);
415 }
416 }
417 }
418
419 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
420 si_update_descriptors(sctx, &samplers->views.desc);
421 }
422
423 /* SAMPLER STATES */
424
425 static void si_emit_sampler_states(struct si_context *sctx, struct r600_atom *atom)
426 {
427 struct si_sampler_states *states = (struct si_sampler_states*)atom;
428
429 si_emit_descriptors(sctx, &states->desc, states->desc_data);
430 }
431
432 static void si_sampler_states_begin_new_cs(struct si_context *sctx,
433 struct si_sampler_states *states)
434 {
435 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
436 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
437 si_emit_shader_pointer(sctx, &states->desc.atom);
438 }
439
440 void si_set_sampler_descriptors(struct si_context *sctx, unsigned shader,
441 unsigned start, unsigned count, void **states)
442 {
443 struct si_sampler_states *samplers = &sctx->samplers[shader].states;
444 struct si_pipe_sampler_state **sstates = (struct si_pipe_sampler_state**)states;
445 int i;
446
447 if (start == 0)
448 samplers->saved_states[0] = states[0];
449 if (start == 1)
450 samplers->saved_states[1] = states[0];
451 else if (start == 0 && count >= 2)
452 samplers->saved_states[1] = states[1];
453
454 for (i = 0; i < count; i++) {
455 unsigned slot = start + i;
456
457 if (!sstates[i]) {
458 samplers->desc.dirty_mask &= ~(1 << slot);
459 continue;
460 }
461
462 samplers->desc_data[slot] = sstates[i]->val;
463 samplers->desc.dirty_mask |= 1 << slot;
464 }
465
466 si_update_descriptors(sctx, &samplers->desc);
467 }
468
469 /* BUFFER RESOURCES */
470
471 static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
472 {
473 struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
474
475 si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
476 }
477
478 static void si_init_buffer_resources(struct si_context *sctx,
479 struct si_buffer_resources *buffers,
480 unsigned num_buffers, unsigned shader,
481 unsigned shader_userdata_index,
482 enum radeon_bo_usage shader_usage,
483 enum radeon_bo_priority priority)
484 {
485 int i;
486
487 buffers->num_buffers = num_buffers;
488 buffers->shader_usage = shader_usage;
489 buffers->priority = priority;
490 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
491 buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
492
493 /* si_emit_descriptors only accepts an array of arrays.
494 * This adds such an array. */
495 buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
496 for (i = 0; i < num_buffers; i++) {
497 buffers->desc_data[i] = &buffers->desc_storage[i*4];
498 }
499
500 si_init_descriptors(sctx, &buffers->desc,
501 si_get_shader_user_data_base(shader) +
502 shader_userdata_index*4, 4, num_buffers,
503 si_emit_buffer_resources);
504 }
505
506 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
507 {
508 int i;
509
510 for (i = 0; i < buffers->num_buffers; i++) {
511 pipe_resource_reference(&buffers->buffers[i], NULL);
512 }
513
514 FREE(buffers->buffers);
515 FREE(buffers->desc_storage);
516 FREE(buffers->desc_data);
517 si_release_descriptors(&buffers->desc);
518 }
519
520 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
521 struct si_buffer_resources *buffers)
522 {
523 unsigned mask = buffers->desc.enabled_mask;
524
525 /* Add relocations to the CS. */
526 while (mask) {
527 int i = u_bit_scan(&mask);
528
529 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
530 (struct r600_resource*)buffers->buffers[i],
531 buffers->shader_usage, buffers->priority);
532 }
533
534 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
535 buffers->desc.buffer, RADEON_USAGE_READWRITE,
536 RADEON_PRIO_SHADER_DATA);
537
538 si_emit_shader_pointer(sctx, &buffers->desc.atom);
539 }
540
541 /* VERTEX BUFFERS */
542
543 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
544 {
545 struct si_descriptors *desc = &sctx->vertex_buffers;
546 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
547 int i;
548
549 for (i = 0; i < count; i++) {
550 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
551
552 if (vb >= Elements(sctx->vertex_buffer))
553 continue;
554 if (!sctx->vertex_buffer[vb].buffer)
555 continue;
556
557 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
558 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
559 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
560 }
561 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
562 desc->buffer, RADEON_USAGE_READ,
563 RADEON_PRIO_SHADER_DATA);
564
565 si_emit_shader_pointer(sctx, &desc->atom);
566 }
567
568 void si_update_vertex_buffers(struct si_context *sctx)
569 {
570 struct pipe_context *ctx = &sctx->b.b;
571 struct si_descriptors *desc = &sctx->vertex_buffers;
572 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
573 unsigned i, count = sctx->vertex_elements->count;
574 uint64_t va;
575 uint32_t *ptr;
576
577 if (!count || !sctx->vertex_elements)
578 return;
579
580 /* Vertex buffer descriptors are the only ones which are uploaded
581 * directly through a staging buffer and don't go through
582 * the fine-grained upload path.
583 */
584 u_upload_alloc(sctx->b.uploader, 0, count * 16, &desc->buffer_offset,
585 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
586
587 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
588 desc->buffer, RADEON_USAGE_READ,
589 RADEON_PRIO_SHADER_DATA);
590
591 assert(count <= SI_NUM_VERTEX_BUFFERS);
592 assert(desc->current_context_id == 0);
593
594 for (i = 0; i < count; i++) {
595 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
596 struct pipe_vertex_buffer *vb;
597 struct r600_resource *rbuffer;
598 unsigned offset;
599 uint32_t *desc = &ptr[i*4];
600
601 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
602 memset(desc, 0, 16);
603 continue;
604 }
605
606 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
607 rbuffer = (struct r600_resource*)vb->buffer;
608 if (rbuffer == NULL) {
609 memset(desc, 0, 16);
610 continue;
611 }
612
613 offset = vb->buffer_offset + ve->src_offset;
614
615 va = r600_resource_va(ctx->screen, (void*)rbuffer);
616 va += offset;
617
618 /* Fill in T# buffer resource description */
619 desc[0] = va & 0xFFFFFFFF;
620 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
621 S_008F04_STRIDE(vb->stride);
622 if (vb->stride)
623 /* Round up by rounding down and adding 1 */
624 desc[2] = (vb->buffer->width0 - offset -
625 sctx->vertex_elements->format_size[i]) /
626 vb->stride + 1;
627 else
628 desc[2] = vb->buffer->width0 - offset;
629
630 desc[3] = sctx->vertex_elements->rsrc_word3[i];
631
632 if (!bound[ve->vertex_buffer_index]) {
633 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
634 (struct r600_resource*)vb->buffer,
635 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
636 bound[ve->vertex_buffer_index] = true;
637 }
638 }
639
640 desc->atom.num_dw = 8; /* update 2 shader pointers (VS+ES) */
641 desc->atom.dirty = true;
642
643 /* Don't flush the const cache. It would have a very negative effect
644 * on performance (confirmed by testing). New descriptors are always
645 * uploaded to a fresh new buffer, so I don't think flushing the const
646 * cache is needed. */
647 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
648 }
649
650
651 /* CONSTANT BUFFERS */
652
653 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
654 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
655 {
656 if (SI_BIG_ENDIAN) {
657 uint32_t *tmpPtr;
658 unsigned i;
659
660 if (!(tmpPtr = malloc(size))) {
661 R600_ERR("Failed to allocate BE swap buffer.\n");
662 return;
663 }
664
665 for (i = 0; i < size / 4; ++i) {
666 tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]);
667 }
668
669 u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset,
670 (struct pipe_resource**)rbuffer);
671
672 free(tmpPtr);
673 } else {
674 u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset,
675 (struct pipe_resource**)rbuffer);
676 }
677 }
678
679 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
680 struct pipe_constant_buffer *input)
681 {
682 struct si_context *sctx = (struct si_context *)ctx;
683 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
684
685 if (shader >= SI_NUM_SHADERS)
686 return;
687
688 assert(slot < buffers->num_buffers);
689 pipe_resource_reference(&buffers->buffers[slot], NULL);
690
691 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
692 * with a NULL buffer). We need to use a dummy buffer instead. */
693 if (sctx->b.chip_class == CIK &&
694 (!input || (!input->buffer && !input->user_buffer)))
695 input = &sctx->null_const_buf;
696
697 if (input && (input->buffer || input->user_buffer)) {
698 struct pipe_resource *buffer = NULL;
699 uint64_t va;
700
701 /* Upload the user buffer if needed. */
702 if (input->user_buffer) {
703 unsigned buffer_offset;
704
705 si_upload_const_buffer(sctx,
706 (struct r600_resource**)&buffer, input->user_buffer,
707 input->buffer_size, &buffer_offset);
708 va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
709 } else {
710 pipe_resource_reference(&buffer, input->buffer);
711 va = r600_resource_va(ctx->screen, buffer) + input->buffer_offset;
712 }
713
714 /* Set the descriptor. */
715 uint32_t *desc = buffers->desc_data[slot];
716 desc[0] = va;
717 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
718 S_008F04_STRIDE(0);
719 desc[2] = input->buffer_size;
720 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
721 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
722 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
723 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
724 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
725 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
726
727 buffers->buffers[slot] = buffer;
728 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
729 (struct r600_resource*)buffer,
730 buffers->shader_usage, buffers->priority);
731 buffers->desc.enabled_mask |= 1 << slot;
732 } else {
733 /* Clear the descriptor. */
734 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
735 buffers->desc.enabled_mask &= ~(1 << slot);
736 }
737
738 buffers->desc.dirty_mask |= 1 << slot;
739 si_update_descriptors(sctx, &buffers->desc);
740 }
741
742 /* RING BUFFERS */
743
744 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
745 struct pipe_constant_buffer *input,
746 unsigned stride, unsigned num_records,
747 bool add_tid, bool swizzle,
748 unsigned element_size, unsigned index_stride)
749 {
750 struct si_context *sctx = (struct si_context *)ctx;
751 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
752
753 if (shader >= SI_NUM_SHADERS)
754 return;
755
756 /* The stride field in the resource descriptor has 14 bits */
757 assert(stride < (1 << 14));
758
759 assert(slot < buffers->num_buffers);
760 pipe_resource_reference(&buffers->buffers[slot], NULL);
761
762 if (input && input->buffer) {
763 uint64_t va;
764
765 va = r600_resource_va(ctx->screen, input->buffer);
766
767 switch (element_size) {
768 default:
769 assert(!"Unsupported ring buffer element size");
770 case 0:
771 case 2:
772 element_size = 0;
773 break;
774 case 4:
775 element_size = 1;
776 break;
777 case 8:
778 element_size = 2;
779 break;
780 case 16:
781 element_size = 3;
782 break;
783 }
784
785 switch (index_stride) {
786 default:
787 assert(!"Unsupported ring buffer index stride");
788 case 0:
789 case 8:
790 index_stride = 0;
791 break;
792 case 16:
793 index_stride = 1;
794 break;
795 case 32:
796 index_stride = 2;
797 break;
798 case 64:
799 index_stride = 3;
800 break;
801 }
802
803 /* Set the descriptor. */
804 uint32_t *desc = buffers->desc_data[slot];
805 desc[0] = va;
806 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
807 S_008F04_STRIDE(stride) |
808 S_008F04_SWIZZLE_ENABLE(swizzle);
809 desc[2] = num_records;
810 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
811 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
812 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
813 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
814 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
815 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
816 S_008F0C_ELEMENT_SIZE(element_size) |
817 S_008F0C_INDEX_STRIDE(index_stride) |
818 S_008F0C_ADD_TID_ENABLE(add_tid);
819
820 pipe_resource_reference(&buffers->buffers[slot], input->buffer);
821 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
822 (struct r600_resource*)input->buffer,
823 buffers->shader_usage, buffers->priority);
824 buffers->desc.enabled_mask |= 1 << slot;
825 } else {
826 /* Clear the descriptor. */
827 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
828 buffers->desc.enabled_mask &= ~(1 << slot);
829 }
830
831 buffers->desc.dirty_mask |= 1 << slot;
832 si_update_descriptors(sctx, &buffers->desc);
833 }
834
835 /* STREAMOUT BUFFERS */
836
837 static void si_set_streamout_targets(struct pipe_context *ctx,
838 unsigned num_targets,
839 struct pipe_stream_output_target **targets,
840 const unsigned *offsets)
841 {
842 struct si_context *sctx = (struct si_context *)ctx;
843 struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
844 unsigned old_num_targets = sctx->b.streamout.num_targets;
845 unsigned i, bufidx;
846
847 /* Streamout buffers must be bound in 2 places:
848 * 1) in VGT by setting the VGT_STRMOUT registers
849 * 2) as shader resources
850 */
851
852 /* Set the VGT regs. */
853 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
854
855 /* Set the shader resources.*/
856 for (i = 0; i < num_targets; i++) {
857 bufidx = SI_SO_BUF_OFFSET + i;
858
859 if (targets[i]) {
860 struct pipe_resource *buffer = targets[i]->buffer;
861 uint64_t va = r600_resource_va(ctx->screen, buffer);
862
863 /* Set the descriptor. */
864 uint32_t *desc = buffers->desc_data[bufidx];
865 desc[0] = va;
866 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
867 desc[2] = 0xffffffff;
868 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
869 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
870 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
871 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
872
873 /* Set the resource. */
874 pipe_resource_reference(&buffers->buffers[bufidx],
875 buffer);
876 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
877 (struct r600_resource*)buffer,
878 buffers->shader_usage, buffers->priority);
879 buffers->desc.enabled_mask |= 1 << bufidx;
880 } else {
881 /* Clear the descriptor and unset the resource. */
882 memset(buffers->desc_data[bufidx], 0,
883 sizeof(uint32_t) * 4);
884 pipe_resource_reference(&buffers->buffers[bufidx],
885 NULL);
886 buffers->desc.enabled_mask &= ~(1 << bufidx);
887 }
888 buffers->desc.dirty_mask |= 1 << bufidx;
889 }
890 for (; i < old_num_targets; i++) {
891 bufidx = SI_SO_BUF_OFFSET + i;
892 /* Clear the descriptor and unset the resource. */
893 memset(buffers->desc_data[bufidx], 0, sizeof(uint32_t) * 4);
894 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
895 buffers->desc.enabled_mask &= ~(1 << bufidx);
896 buffers->desc.dirty_mask |= 1 << bufidx;
897 }
898
899 si_update_descriptors(sctx, &buffers->desc);
900 }
901
902 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
903 uint32_t *desc, uint64_t old_buf_va,
904 struct pipe_resource *new_buf)
905 {
906 /* Retrieve the buffer offset from the descriptor. */
907 uint64_t old_desc_va =
908 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
909
910 assert(old_buf_va <= old_desc_va);
911 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
912
913 /* Update the descriptor. */
914 uint64_t va = r600_resource_va(ctx->screen, new_buf) + offset_within_buffer;
915
916 desc[0] = va;
917 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
918 S_008F04_BASE_ADDRESS_HI(va >> 32);
919 }
920
921 /* BUFFER DISCARD/INVALIDATION */
922
923 /* Reallocate a buffer a update all resource bindings where the buffer is
924 * bound.
925 *
926 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
927 * idle by discarding its contents. Apps usually tell us when to do this using
928 * map_buffer flags, for example.
929 */
930 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
931 {
932 struct si_context *sctx = (struct si_context*)ctx;
933 struct r600_resource *rbuffer = r600_resource(buf);
934 unsigned i, shader, alignment = rbuffer->buf->alignment;
935 uint64_t old_va = r600_resource_va(ctx->screen, buf);
936
937 /* Reallocate the buffer in the same pipe_resource. */
938 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
939 alignment, TRUE);
940
941 /* We changed the buffer, now we need to bind it where the old one
942 * was bound. This consists of 2 things:
943 * 1) Updating the resource descriptor and dirtying it.
944 * 2) Adding a relocation to the CS, so that it's usable.
945 */
946
947 /* Vertex buffers. */
948 /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
949
950 /* Read/Write buffers. */
951 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
952 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
953 bool found = false;
954 uint32_t mask = buffers->desc.enabled_mask;
955
956 while (mask) {
957 i = u_bit_scan(&mask);
958 if (buffers->buffers[i] == buf) {
959 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
960 old_va, buf);
961
962 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
963 rbuffer, buffers->shader_usage,
964 buffers->priority);
965
966 buffers->desc.dirty_mask |= 1 << i;
967 found = true;
968
969 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
970 /* Update the streamout state. */
971 if (sctx->b.streamout.begin_emitted) {
972 r600_emit_streamout_end(&sctx->b);
973 }
974 sctx->b.streamout.append_bitmask =
975 sctx->b.streamout.enabled_mask;
976 r600_streamout_buffers_dirty(&sctx->b);
977 }
978 }
979 }
980 if (found) {
981 si_update_descriptors(sctx, &buffers->desc);
982 }
983 }
984
985 /* Constant buffers. */
986 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
987 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
988 bool found = false;
989 uint32_t mask = buffers->desc.enabled_mask;
990
991 while (mask) {
992 unsigned i = u_bit_scan(&mask);
993 if (buffers->buffers[i] == buf) {
994 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
995 old_va, buf);
996
997 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
998 rbuffer, buffers->shader_usage,
999 buffers->priority);
1000
1001 buffers->desc.dirty_mask |= 1 << i;
1002 found = true;
1003 }
1004 }
1005 if (found) {
1006 si_update_descriptors(sctx, &buffers->desc);
1007 }
1008 }
1009
1010 /* Texture buffers. */
1011 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1012 struct si_sampler_views *views = &sctx->samplers[shader].views;
1013 bool found = false;
1014 uint32_t mask = views->desc.enabled_mask;
1015
1016 while (mask) {
1017 unsigned i = u_bit_scan(&mask);
1018 if (views->views[i]->texture == buf) {
1019 /* This updates the sampler view directly. */
1020 si_desc_reset_buffer_offset(ctx, views->desc_data[i],
1021 old_va, buf);
1022
1023 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
1024 rbuffer, RADEON_USAGE_READ,
1025 RADEON_PRIO_SHADER_BUFFER_RO);
1026
1027 views->desc.dirty_mask |= 1 << i;
1028 found = true;
1029 }
1030 }
1031 if (found) {
1032 si_update_descriptors(sctx, &views->desc);
1033 }
1034 }
1035 }
1036
1037 /* CP DMA */
1038
1039 /* The max number of bytes to copy per packet. */
1040 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
1041
1042 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
1043 unsigned offset, unsigned size, unsigned value)
1044 {
1045 struct si_context *sctx = (struct si_context*)ctx;
1046
1047 if (!size)
1048 return;
1049
1050 /* Mark the buffer range of destination as valid (initialized),
1051 * so that transfer_map knows it should wait for the GPU when mapping
1052 * that range. */
1053 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
1054 offset + size);
1055
1056 /* Fallback for unaligned clears. */
1057 if (offset % 4 != 0 || size % 4 != 0) {
1058 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
1059 sctx->b.rings.gfx.cs,
1060 PIPE_TRANSFER_WRITE);
1061 size /= 4;
1062 for (unsigned i = 0; i < size; i++)
1063 *map++ = value;
1064 return;
1065 }
1066
1067 uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset;
1068
1069 /* Flush the caches where the resource is bound. */
1070 /* XXX only flush the caches where the buffer is bound. */
1071 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1072 R600_CONTEXT_INV_CONST_CACHE |
1073 R600_CONTEXT_FLUSH_AND_INV_CB |
1074 R600_CONTEXT_FLUSH_AND_INV_DB |
1075 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1076 R600_CONTEXT_FLUSH_AND_INV_DB_META;
1077 sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
1078
1079 while (size) {
1080 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
1081 unsigned dma_flags = 0;
1082
1083 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
1084 FALSE);
1085
1086 /* This must be done after need_cs_space. */
1087 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
1088 (struct r600_resource*)dst, RADEON_USAGE_WRITE,
1089 RADEON_PRIO_MIN);
1090
1091 /* Flush the caches for the first copy only.
1092 * Also wait for the previous CP DMA operations. */
1093 if (sctx->b.flags) {
1094 si_emit_cache_flush(&sctx->b, NULL);
1095 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
1096 }
1097
1098 /* Do the synchronization after the last copy, so that all data is written to memory. */
1099 if (size == byte_count)
1100 dma_flags |= R600_CP_DMA_SYNC;
1101
1102 /* Emit the clear packet. */
1103 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
1104
1105 size -= byte_count;
1106 va += byte_count;
1107 }
1108
1109 /* Flush the caches again in case the 3D engine has been prefetching
1110 * the resource. */
1111 /* XXX only flush the caches where the buffer is bound. */
1112 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1113 R600_CONTEXT_INV_CONST_CACHE |
1114 R600_CONTEXT_FLUSH_AND_INV_CB |
1115 R600_CONTEXT_FLUSH_AND_INV_DB |
1116 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1117 R600_CONTEXT_FLUSH_AND_INV_DB_META;
1118 }
1119
1120 void si_copy_buffer(struct si_context *sctx,
1121 struct pipe_resource *dst, struct pipe_resource *src,
1122 uint64_t dst_offset, uint64_t src_offset, unsigned size)
1123 {
1124 if (!size)
1125 return;
1126
1127 /* Mark the buffer range of destination as valid (initialized),
1128 * so that transfer_map knows it should wait for the GPU when mapping
1129 * that range. */
1130 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
1131 dst_offset + size);
1132
1133 dst_offset += r600_resource_va(&sctx->screen->b.b, dst);
1134 src_offset += r600_resource_va(&sctx->screen->b.b, src);
1135
1136 /* Flush the caches where the resource is bound. */
1137 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1138 R600_CONTEXT_INV_CONST_CACHE |
1139 R600_CONTEXT_FLUSH_AND_INV_CB |
1140 R600_CONTEXT_FLUSH_AND_INV_DB |
1141 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1142 R600_CONTEXT_FLUSH_AND_INV_DB_META |
1143 R600_CONTEXT_WAIT_3D_IDLE;
1144
1145 while (size) {
1146 unsigned sync_flags = 0;
1147 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
1148
1149 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
1150
1151 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
1152 if (sctx->b.flags) {
1153 si_emit_cache_flush(&sctx->b, NULL);
1154 sync_flags |= SI_CP_DMA_RAW_WAIT;
1155 }
1156
1157 /* Do the synchronization after the last copy, so that all data is written to memory. */
1158 if (size == byte_count) {
1159 sync_flags |= R600_CP_DMA_SYNC;
1160 }
1161
1162 /* This must be done after r600_need_cs_space. */
1163 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src,
1164 RADEON_USAGE_READ, RADEON_PRIO_MIN);
1165 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst,
1166 RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
1167
1168 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
1169
1170 size -= byte_count;
1171 src_offset += byte_count;
1172 dst_offset += byte_count;
1173 }
1174
1175 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1176 R600_CONTEXT_INV_CONST_CACHE |
1177 R600_CONTEXT_FLUSH_AND_INV_CB |
1178 R600_CONTEXT_FLUSH_AND_INV_DB |
1179 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1180 R600_CONTEXT_FLUSH_AND_INV_DB_META;
1181 }
1182
1183 /* INIT/DEINIT */
1184
1185 void si_init_all_descriptors(struct si_context *sctx)
1186 {
1187 int i;
1188
1189 for (i = 0; i < SI_NUM_SHADERS; i++) {
1190 si_init_buffer_resources(sctx, &sctx->const_buffers[i],
1191 SI_NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
1192 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
1193 si_init_buffer_resources(sctx, &sctx->rw_buffers[i],
1194 i == PIPE_SHADER_VERTEX ?
1195 SI_NUM_RW_BUFFERS : SI_NUM_RING_BUFFERS,
1196 i, SI_SGPR_RW_BUFFERS,
1197 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
1198
1199 si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
1200
1201 si_init_descriptors(sctx, &sctx->samplers[i].states.desc,
1202 si_get_shader_user_data_base(i) + SI_SGPR_SAMPLER * 4,
1203 4, SI_NUM_SAMPLER_STATES, si_emit_sampler_states);
1204
1205 sctx->atoms.s.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
1206 sctx->atoms.s.rw_buffers[i] = &sctx->rw_buffers[i].desc.atom;
1207 sctx->atoms.s.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
1208 sctx->atoms.s.sampler_states[i] = &sctx->samplers[i].states.desc.atom;
1209 }
1210
1211 si_init_descriptors(sctx, &sctx->vertex_buffers,
1212 si_get_shader_user_data_base(PIPE_SHADER_VERTEX) +
1213 SI_SGPR_VERTEX_BUFFER*4, 4, SI_NUM_VERTEX_BUFFERS,
1214 si_emit_shader_pointer);
1215 sctx->atoms.s.vertex_buffers = &sctx->vertex_buffers.atom;
1216
1217 /* Set pipe_context functions. */
1218 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1219 sctx->b.b.set_sampler_views = si_set_sampler_views;
1220 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1221 sctx->b.clear_buffer = si_clear_buffer;
1222 sctx->b.invalidate_buffer = si_invalidate_buffer;
1223 }
1224
1225 void si_release_all_descriptors(struct si_context *sctx)
1226 {
1227 int i;
1228
1229 for (i = 0; i < SI_NUM_SHADERS; i++) {
1230 si_release_buffer_resources(&sctx->const_buffers[i]);
1231 si_release_buffer_resources(&sctx->rw_buffers[i]);
1232 si_release_sampler_views(&sctx->samplers[i].views);
1233 si_release_descriptors(&sctx->samplers[i].states.desc);
1234 }
1235 si_release_descriptors(&sctx->vertex_buffers);
1236 }
1237
1238 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1239 {
1240 int i;
1241
1242 for (i = 0; i < SI_NUM_SHADERS; i++) {
1243 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1244 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1245 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1246 si_sampler_states_begin_new_cs(sctx, &sctx->samplers[i].states);
1247 }
1248 si_vertex_buffers_begin_new_cs(sctx);
1249 }