gallium/radeon: implement randomized SDMA texture copy testing (v2)
[mesa.git] / src / gallium / drivers / radeon / r600_pipe_common.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Marek Olšák <maraeo@gmail.com>
24 *
25 */
26
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/list.h"
31 #include "util/u_draw_quad.h"
32 #include "util/u_memory.h"
33 #include "util/u_format_s3tc.h"
34 #include "util/u_upload_mgr.h"
35 #include "os/os_time.h"
36 #include "vl/vl_decoder.h"
37 #include "vl/vl_video_buffer.h"
38 #include "radeon/radeon_video.h"
39 #include <inttypes.h>
40
41 #ifndef HAVE_LLVM
42 #define HAVE_LLVM 0
43 #endif
44
45 struct r600_multi_fence {
46 struct pipe_reference reference;
47 struct pipe_fence_handle *gfx;
48 struct pipe_fence_handle *sdma;
49 };
50
51 /*
52 * shader binary helpers.
53 */
54 void radeon_shader_binary_init(struct radeon_shader_binary *b)
55 {
56 memset(b, 0, sizeof(*b));
57 }
58
59 void radeon_shader_binary_clean(struct radeon_shader_binary *b)
60 {
61 if (!b)
62 return;
63 FREE(b->code);
64 FREE(b->config);
65 FREE(b->rodata);
66 FREE(b->global_symbol_offsets);
67 FREE(b->relocs);
68 FREE(b->disasm_string);
69 }
70
71 /*
72 * pipe_context
73 */
74
75 void r600_draw_rectangle(struct blitter_context *blitter,
76 int x1, int y1, int x2, int y2, float depth,
77 enum blitter_attrib_type type,
78 const union pipe_color_union *attrib)
79 {
80 struct r600_common_context *rctx =
81 (struct r600_common_context*)util_blitter_get_pipe(blitter);
82 struct pipe_viewport_state viewport;
83 struct pipe_resource *buf = NULL;
84 unsigned offset = 0;
85 float *vb;
86
87 if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) {
88 util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib);
89 return;
90 }
91
92 /* Some operations (like color resolve on r6xx) don't work
93 * with the conventional primitive types.
94 * One that works is PT_RECTLIST, which we use here. */
95
96 /* setup viewport */
97 viewport.scale[0] = 1.0f;
98 viewport.scale[1] = 1.0f;
99 viewport.scale[2] = 1.0f;
100 viewport.translate[0] = 0.0f;
101 viewport.translate[1] = 0.0f;
102 viewport.translate[2] = 0.0f;
103 rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport);
104
105 /* Upload vertices. The hw rectangle has only 3 vertices,
106 * I guess the 4th one is derived from the first 3.
107 * The vertex specification should match u_blitter's vertex element state. */
108 u_upload_alloc(rctx->uploader, 0, sizeof(float) * 24, 256, &offset, &buf, (void**)&vb);
109 if (!buf)
110 return;
111
112 vb[0] = x1;
113 vb[1] = y1;
114 vb[2] = depth;
115 vb[3] = 1;
116
117 vb[8] = x1;
118 vb[9] = y2;
119 vb[10] = depth;
120 vb[11] = 1;
121
122 vb[16] = x2;
123 vb[17] = y1;
124 vb[18] = depth;
125 vb[19] = 1;
126
127 if (attrib) {
128 memcpy(vb+4, attrib->f, sizeof(float)*4);
129 memcpy(vb+12, attrib->f, sizeof(float)*4);
130 memcpy(vb+20, attrib->f, sizeof(float)*4);
131 }
132
133 /* draw */
134 util_draw_vertex_buffer(&rctx->b, NULL, buf, blitter->vb_slot, offset,
135 R600_PRIM_RECTANGLE_LIST, 3, 2);
136 pipe_resource_reference(&buf, NULL);
137 }
138
139 void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw)
140 {
141 /* Flush the GFX IB if it's not empty. */
142 if (ctx->gfx.cs->cdw > ctx->initial_gfx_cs_size)
143 ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
144
145 /* Flush if there's not enough space. */
146 if ((num_dw + ctx->dma.cs->cdw) > ctx->dma.cs->max_dw) {
147 ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
148 assert((num_dw + ctx->dma.cs->cdw) <= ctx->dma.cs->max_dw);
149 }
150 }
151
152 static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
153 {
154 }
155
156 void r600_preflush_suspend_features(struct r600_common_context *ctx)
157 {
158 /* suspend queries */
159 if (!LIST_IS_EMPTY(&ctx->active_queries))
160 r600_suspend_queries(ctx);
161
162 ctx->streamout.suspended = false;
163 if (ctx->streamout.begin_emitted) {
164 r600_emit_streamout_end(ctx);
165 ctx->streamout.suspended = true;
166 }
167 }
168
169 void r600_postflush_resume_features(struct r600_common_context *ctx)
170 {
171 if (ctx->streamout.suspended) {
172 ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
173 r600_streamout_buffers_dirty(ctx);
174 }
175
176 /* resume queries */
177 if (!LIST_IS_EMPTY(&ctx->active_queries))
178 r600_resume_queries(ctx);
179 }
180
181 static void r600_flush_from_st(struct pipe_context *ctx,
182 struct pipe_fence_handle **fence,
183 unsigned flags)
184 {
185 struct pipe_screen *screen = ctx->screen;
186 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
187 unsigned rflags = 0;
188 struct pipe_fence_handle *gfx_fence = NULL;
189 struct pipe_fence_handle *sdma_fence = NULL;
190
191 if (flags & PIPE_FLUSH_END_OF_FRAME)
192 rflags |= RADEON_FLUSH_END_OF_FRAME;
193
194 if (rctx->dma.cs) {
195 rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
196 }
197 rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
198
199 /* Both engines can signal out of order, so we need to keep both fences. */
200 if (gfx_fence || sdma_fence) {
201 struct r600_multi_fence *multi_fence =
202 CALLOC_STRUCT(r600_multi_fence);
203 if (!multi_fence)
204 return;
205
206 multi_fence->reference.count = 1;
207 multi_fence->gfx = gfx_fence;
208 multi_fence->sdma = sdma_fence;
209
210 screen->fence_reference(screen, fence, NULL);
211 *fence = (struct pipe_fence_handle*)multi_fence;
212 }
213 }
214
215 static void r600_flush_dma_ring(void *ctx, unsigned flags,
216 struct pipe_fence_handle **fence)
217 {
218 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
219 struct radeon_winsys_cs *cs = rctx->dma.cs;
220
221 if (cs->cdw)
222 rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
223 if (fence)
224 rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
225 }
226
227 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
228 {
229 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
230 unsigned latest = rctx->ws->query_value(rctx->ws,
231 RADEON_GPU_RESET_COUNTER);
232
233 if (rctx->gpu_reset_counter == latest)
234 return PIPE_NO_RESET;
235
236 rctx->gpu_reset_counter = latest;
237 return PIPE_UNKNOWN_CONTEXT_RESET;
238 }
239
240 static void r600_set_debug_callback(struct pipe_context *ctx,
241 const struct pipe_debug_callback *cb)
242 {
243 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
244
245 if (cb)
246 rctx->debug = *cb;
247 else
248 memset(&rctx->debug, 0, sizeof(rctx->debug));
249 }
250
251 bool r600_common_context_init(struct r600_common_context *rctx,
252 struct r600_common_screen *rscreen)
253 {
254 util_slab_create(&rctx->pool_transfers,
255 sizeof(struct r600_transfer), 64,
256 UTIL_SLAB_SINGLETHREADED);
257
258 rctx->screen = rscreen;
259 rctx->ws = rscreen->ws;
260 rctx->family = rscreen->family;
261 rctx->chip_class = rscreen->chip_class;
262
263 if (rscreen->chip_class >= CIK)
264 rctx->max_db = MAX2(8, rscreen->info.num_render_backends);
265 else if (rscreen->chip_class >= EVERGREEN)
266 rctx->max_db = 8;
267 else
268 rctx->max_db = 4;
269
270 rctx->b.invalidate_resource = r600_invalidate_resource;
271 rctx->b.transfer_map = u_transfer_map_vtbl;
272 rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
273 rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
274 rctx->b.transfer_inline_write = u_default_transfer_inline_write;
275 rctx->b.memory_barrier = r600_memory_barrier;
276 rctx->b.flush = r600_flush_from_st;
277 rctx->b.set_debug_callback = r600_set_debug_callback;
278
279 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
280 rctx->b.get_device_reset_status = r600_get_reset_status;
281 rctx->gpu_reset_counter =
282 rctx->ws->query_value(rctx->ws,
283 RADEON_GPU_RESET_COUNTER);
284 }
285
286 LIST_INITHEAD(&rctx->texture_buffers);
287
288 r600_init_context_texture_functions(rctx);
289 r600_init_viewport_functions(rctx);
290 r600_streamout_init(rctx);
291 r600_query_init(rctx);
292 cayman_init_msaa(&rctx->b);
293
294 rctx->allocator_so_filled_size =
295 u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
296 4, 0, PIPE_USAGE_DEFAULT, TRUE);
297 if (!rctx->allocator_so_filled_size)
298 return false;
299
300 rctx->uploader = u_upload_create(&rctx->b, 1024 * 1024,
301 PIPE_BIND_INDEX_BUFFER |
302 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM);
303 if (!rctx->uploader)
304 return false;
305
306 rctx->ctx = rctx->ws->ctx_create(rctx->ws);
307 if (!rctx->ctx)
308 return false;
309
310 if (rscreen->info.has_sdma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
311 rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
312 r600_flush_dma_ring,
313 rctx);
314 rctx->dma.flush = r600_flush_dma_ring;
315 }
316
317 return true;
318 }
319
320 void r600_common_context_cleanup(struct r600_common_context *rctx)
321 {
322 if (rctx->gfx.cs)
323 rctx->ws->cs_destroy(rctx->gfx.cs);
324 if (rctx->dma.cs)
325 rctx->ws->cs_destroy(rctx->dma.cs);
326 if (rctx->ctx)
327 rctx->ws->ctx_destroy(rctx->ctx);
328
329 if (rctx->uploader) {
330 u_upload_destroy(rctx->uploader);
331 }
332
333 util_slab_destroy(&rctx->pool_transfers);
334
335 if (rctx->allocator_so_filled_size) {
336 u_suballocator_destroy(rctx->allocator_so_filled_size);
337 }
338 rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
339 }
340
341 void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
342 {
343 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
344 struct r600_resource *rr = (struct r600_resource *)r;
345
346 if (!r) {
347 return;
348 }
349
350 /*
351 * The idea is to compute a gross estimate of memory requirement of
352 * each draw call. After each draw call, memory will be precisely
353 * accounted. So the uncertainty is only on the current draw call.
354 * In practice this gave very good estimate (+/- 10% of the target
355 * memory limit).
356 */
357 if (rr->domains & RADEON_DOMAIN_VRAM)
358 rctx->vram += rr->buf->size;
359 else if (rr->domains & RADEON_DOMAIN_GTT)
360 rctx->gtt += rr->buf->size;
361 }
362
363 /*
364 * pipe_screen
365 */
366
367 static const struct debug_named_value common_debug_options[] = {
368 /* logging */
369 { "tex", DBG_TEX, "Print texture info" },
370 { "compute", DBG_COMPUTE, "Print compute info" },
371 { "vm", DBG_VM, "Print virtual addresses when creating resources" },
372 { "info", DBG_INFO, "Print driver information" },
373
374 /* shaders */
375 { "fs", DBG_FS, "Print fetch shaders" },
376 { "vs", DBG_VS, "Print vertex shaders" },
377 { "gs", DBG_GS, "Print geometry shaders" },
378 { "ps", DBG_PS, "Print pixel shaders" },
379 { "cs", DBG_CS, "Print compute shaders" },
380 { "tcs", DBG_TCS, "Print tessellation control shaders" },
381 { "tes", DBG_TES, "Print tessellation evaluation shaders" },
382 { "noir", DBG_NO_IR, "Don't print the LLVM IR"},
383 { "notgsi", DBG_NO_TGSI, "Don't print the TGSI"},
384 { "noasm", DBG_NO_ASM, "Don't print disassembled shaders"},
385 { "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
386
387 { "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
388
389 /* features */
390 { "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
391 { "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
392 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
393 { "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
394 { "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
395 { "notiling", DBG_NO_TILING, "Disable tiling" },
396 { "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
397 { "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
398 { "precompile", DBG_PRECOMPILE, "Compile one shader variant at shader creation." },
399 { "nowc", DBG_NO_WC, "Disable GTT write combining" },
400 { "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
401 { "nodcc", DBG_NO_DCC, "Disable DCC." },
402 { "nodccclear", DBG_NO_DCC_CLEAR, "Disable DCC fast clear." },
403 { "norbplus", DBG_NO_RB_PLUS, "Disable RB+ on Stoney." },
404 { "sisched", DBG_SI_SCHED, "Enable LLVM SI Machine Instruction Scheduler." },
405 { "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders compiled on demand" },
406 { "noce", DBG_NO_CE, "Disable the constant engine"},
407
408 DEBUG_NAMED_VALUE_END /* must be last */
409 };
410
411 static const char* r600_get_vendor(struct pipe_screen* pscreen)
412 {
413 return "X.Org";
414 }
415
416 static const char* r600_get_device_vendor(struct pipe_screen* pscreen)
417 {
418 return "AMD";
419 }
420
421 static const char* r600_get_chip_name(struct r600_common_screen *rscreen)
422 {
423 switch (rscreen->info.family) {
424 case CHIP_R600: return "AMD R600";
425 case CHIP_RV610: return "AMD RV610";
426 case CHIP_RV630: return "AMD RV630";
427 case CHIP_RV670: return "AMD RV670";
428 case CHIP_RV620: return "AMD RV620";
429 case CHIP_RV635: return "AMD RV635";
430 case CHIP_RS780: return "AMD RS780";
431 case CHIP_RS880: return "AMD RS880";
432 case CHIP_RV770: return "AMD RV770";
433 case CHIP_RV730: return "AMD RV730";
434 case CHIP_RV710: return "AMD RV710";
435 case CHIP_RV740: return "AMD RV740";
436 case CHIP_CEDAR: return "AMD CEDAR";
437 case CHIP_REDWOOD: return "AMD REDWOOD";
438 case CHIP_JUNIPER: return "AMD JUNIPER";
439 case CHIP_CYPRESS: return "AMD CYPRESS";
440 case CHIP_HEMLOCK: return "AMD HEMLOCK";
441 case CHIP_PALM: return "AMD PALM";
442 case CHIP_SUMO: return "AMD SUMO";
443 case CHIP_SUMO2: return "AMD SUMO2";
444 case CHIP_BARTS: return "AMD BARTS";
445 case CHIP_TURKS: return "AMD TURKS";
446 case CHIP_CAICOS: return "AMD CAICOS";
447 case CHIP_CAYMAN: return "AMD CAYMAN";
448 case CHIP_ARUBA: return "AMD ARUBA";
449 case CHIP_TAHITI: return "AMD TAHITI";
450 case CHIP_PITCAIRN: return "AMD PITCAIRN";
451 case CHIP_VERDE: return "AMD CAPE VERDE";
452 case CHIP_OLAND: return "AMD OLAND";
453 case CHIP_HAINAN: return "AMD HAINAN";
454 case CHIP_BONAIRE: return "AMD BONAIRE";
455 case CHIP_KAVERI: return "AMD KAVERI";
456 case CHIP_KABINI: return "AMD KABINI";
457 case CHIP_HAWAII: return "AMD HAWAII";
458 case CHIP_MULLINS: return "AMD MULLINS";
459 case CHIP_TONGA: return "AMD TONGA";
460 case CHIP_ICELAND: return "AMD ICELAND";
461 case CHIP_CARRIZO: return "AMD CARRIZO";
462 case CHIP_FIJI: return "AMD FIJI";
463 case CHIP_POLARIS10: return "AMD POLARIS10";
464 case CHIP_POLARIS11: return "AMD POLARIS11";
465 case CHIP_STONEY: return "AMD STONEY";
466 default: return "AMD unknown";
467 }
468 }
469
470 static const char* r600_get_name(struct pipe_screen* pscreen)
471 {
472 struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
473
474 return rscreen->renderer_string;
475 }
476
477 static float r600_get_paramf(struct pipe_screen* pscreen,
478 enum pipe_capf param)
479 {
480 struct r600_common_screen *rscreen = (struct r600_common_screen *)pscreen;
481
482 switch (param) {
483 case PIPE_CAPF_MAX_LINE_WIDTH:
484 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
485 case PIPE_CAPF_MAX_POINT_WIDTH:
486 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
487 if (rscreen->family >= CHIP_CEDAR)
488 return 16384.0f;
489 else
490 return 8192.0f;
491 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
492 return 16.0f;
493 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
494 return 16.0f;
495 case PIPE_CAPF_GUARD_BAND_LEFT:
496 case PIPE_CAPF_GUARD_BAND_TOP:
497 case PIPE_CAPF_GUARD_BAND_RIGHT:
498 case PIPE_CAPF_GUARD_BAND_BOTTOM:
499 return 0.0f;
500 }
501 return 0.0f;
502 }
503
504 static int r600_get_video_param(struct pipe_screen *screen,
505 enum pipe_video_profile profile,
506 enum pipe_video_entrypoint entrypoint,
507 enum pipe_video_cap param)
508 {
509 switch (param) {
510 case PIPE_VIDEO_CAP_SUPPORTED:
511 return vl_profile_supported(screen, profile, entrypoint);
512 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
513 return 1;
514 case PIPE_VIDEO_CAP_MAX_WIDTH:
515 case PIPE_VIDEO_CAP_MAX_HEIGHT:
516 return vl_video_buffer_max_size(screen);
517 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
518 return PIPE_FORMAT_NV12;
519 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
520 return false;
521 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
522 return false;
523 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
524 return true;
525 case PIPE_VIDEO_CAP_MAX_LEVEL:
526 return vl_level_supported(screen, profile);
527 default:
528 return 0;
529 }
530 }
531
532 const char *r600_get_llvm_processor_name(enum radeon_family family)
533 {
534 switch (family) {
535 case CHIP_R600:
536 case CHIP_RV630:
537 case CHIP_RV635:
538 case CHIP_RV670:
539 return "r600";
540 case CHIP_RV610:
541 case CHIP_RV620:
542 case CHIP_RS780:
543 case CHIP_RS880:
544 return "rs880";
545 case CHIP_RV710:
546 return "rv710";
547 case CHIP_RV730:
548 return "rv730";
549 case CHIP_RV740:
550 case CHIP_RV770:
551 return "rv770";
552 case CHIP_PALM:
553 case CHIP_CEDAR:
554 return "cedar";
555 case CHIP_SUMO:
556 case CHIP_SUMO2:
557 return "sumo";
558 case CHIP_REDWOOD:
559 return "redwood";
560 case CHIP_JUNIPER:
561 return "juniper";
562 case CHIP_HEMLOCK:
563 case CHIP_CYPRESS:
564 return "cypress";
565 case CHIP_BARTS:
566 return "barts";
567 case CHIP_TURKS:
568 return "turks";
569 case CHIP_CAICOS:
570 return "caicos";
571 case CHIP_CAYMAN:
572 case CHIP_ARUBA:
573 return "cayman";
574
575 case CHIP_TAHITI: return "tahiti";
576 case CHIP_PITCAIRN: return "pitcairn";
577 case CHIP_VERDE: return "verde";
578 case CHIP_OLAND: return "oland";
579 case CHIP_HAINAN: return "hainan";
580 case CHIP_BONAIRE: return "bonaire";
581 case CHIP_KABINI: return "kabini";
582 case CHIP_KAVERI: return "kaveri";
583 case CHIP_HAWAII: return "hawaii";
584 case CHIP_MULLINS:
585 return "mullins";
586 case CHIP_TONGA: return "tonga";
587 case CHIP_ICELAND: return "iceland";
588 case CHIP_CARRIZO: return "carrizo";
589 #if HAVE_LLVM <= 0x0307
590 case CHIP_FIJI: return "tonga";
591 case CHIP_STONEY: return "carrizo";
592 #else
593 case CHIP_FIJI: return "fiji";
594 case CHIP_STONEY: return "stoney";
595 #endif
596 #if HAVE_LLVM <= 0x0308
597 case CHIP_POLARIS10: return "tonga";
598 case CHIP_POLARIS11: return "tonga";
599 #else
600 case CHIP_POLARIS10: return "polaris10";
601 case CHIP_POLARIS11: return "polaris11";
602 #endif
603 default: return "";
604 }
605 }
606
607 static int r600_get_compute_param(struct pipe_screen *screen,
608 enum pipe_shader_ir ir_type,
609 enum pipe_compute_cap param,
610 void *ret)
611 {
612 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
613
614 //TODO: select these params by asic
615 switch (param) {
616 case PIPE_COMPUTE_CAP_IR_TARGET: {
617 const char *gpu;
618 const char *triple;
619 if (rscreen->family <= CHIP_ARUBA) {
620 triple = "r600--";
621 } else {
622 triple = "amdgcn--";
623 }
624 switch(rscreen->family) {
625 /* Clang < 3.6 is missing Hainan in its list of
626 * GPUs, so we need to use the name of a similar GPU.
627 */
628 default:
629 gpu = r600_get_llvm_processor_name(rscreen->family);
630 break;
631 }
632 if (ret) {
633 sprintf(ret, "%s-%s", gpu, triple);
634 }
635 /* +2 for dash and terminating NIL byte */
636 return (strlen(triple) + strlen(gpu) + 2) * sizeof(char);
637 }
638 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
639 if (ret) {
640 uint64_t *grid_dimension = ret;
641 grid_dimension[0] = 3;
642 }
643 return 1 * sizeof(uint64_t);
644
645 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
646 if (ret) {
647 uint64_t *grid_size = ret;
648 grid_size[0] = 65535;
649 grid_size[1] = 65535;
650 grid_size[2] = 65535;
651 }
652 return 3 * sizeof(uint64_t) ;
653
654 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
655 if (ret) {
656 uint64_t *block_size = ret;
657 if (rscreen->chip_class >= SI && HAVE_LLVM >= 0x309 &&
658 ir_type == PIPE_SHADER_IR_TGSI) {
659 block_size[0] = 2048;
660 block_size[1] = 2048;
661 block_size[2] = 2048;
662 } else {
663 block_size[0] = 256;
664 block_size[1] = 256;
665 block_size[2] = 256;
666 }
667 }
668 return 3 * sizeof(uint64_t);
669
670 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
671 if (ret) {
672 uint64_t *max_threads_per_block = ret;
673 if (rscreen->chip_class >= SI && HAVE_LLVM >= 0x309 &&
674 ir_type == PIPE_SHADER_IR_TGSI)
675 *max_threads_per_block = 2048;
676 else
677 *max_threads_per_block = 256;
678 }
679 return sizeof(uint64_t);
680
681 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
682 if (ret) {
683 uint64_t *max_global_size = ret;
684 uint64_t max_mem_alloc_size;
685
686 r600_get_compute_param(screen, ir_type,
687 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
688 &max_mem_alloc_size);
689
690 /* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
691 * 1/4 of the MAX_GLOBAL_SIZE. Since the
692 * MAX_MEM_ALLOC_SIZE is fixed for older kernels,
693 * make sure we never report more than
694 * 4 * MAX_MEM_ALLOC_SIZE.
695 */
696 *max_global_size = MIN2(4 * max_mem_alloc_size,
697 rscreen->info.gart_size +
698 rscreen->info.vram_size);
699 }
700 return sizeof(uint64_t);
701
702 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
703 if (ret) {
704 uint64_t *max_local_size = ret;
705 /* Value reported by the closed source driver. */
706 *max_local_size = 32768;
707 }
708 return sizeof(uint64_t);
709
710 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
711 if (ret) {
712 uint64_t *max_input_size = ret;
713 /* Value reported by the closed source driver. */
714 *max_input_size = 1024;
715 }
716 return sizeof(uint64_t);
717
718 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
719 if (ret) {
720 uint64_t *max_mem_alloc_size = ret;
721
722 /* XXX: The limit in older kernels is 256 MB. We
723 * should add a query here for newer kernels.
724 */
725 *max_mem_alloc_size = 256 * 1024 * 1024;
726 }
727 return sizeof(uint64_t);
728
729 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
730 if (ret) {
731 uint32_t *max_clock_frequency = ret;
732 *max_clock_frequency = rscreen->info.max_shader_clock;
733 }
734 return sizeof(uint32_t);
735
736 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
737 if (ret) {
738 uint32_t *max_compute_units = ret;
739 *max_compute_units = rscreen->info.num_good_compute_units;
740 }
741 return sizeof(uint32_t);
742
743 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
744 if (ret) {
745 uint32_t *images_supported = ret;
746 *images_supported = 0;
747 }
748 return sizeof(uint32_t);
749 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
750 break; /* unused */
751 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
752 if (ret) {
753 uint32_t *subgroup_size = ret;
754 *subgroup_size = r600_wavefront_size(rscreen->family);
755 }
756 return sizeof(uint32_t);
757 }
758
759 fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
760 return 0;
761 }
762
763 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
764 {
765 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
766
767 return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) /
768 rscreen->info.clock_crystal_freq;
769 }
770
771 static void r600_fence_reference(struct pipe_screen *screen,
772 struct pipe_fence_handle **dst,
773 struct pipe_fence_handle *src)
774 {
775 struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws;
776 struct r600_multi_fence **rdst = (struct r600_multi_fence **)dst;
777 struct r600_multi_fence *rsrc = (struct r600_multi_fence *)src;
778
779 if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
780 ws->fence_reference(&(*rdst)->gfx, NULL);
781 ws->fence_reference(&(*rdst)->sdma, NULL);
782 FREE(*rdst);
783 }
784 *rdst = rsrc;
785 }
786
787 static boolean r600_fence_finish(struct pipe_screen *screen,
788 struct pipe_fence_handle *fence,
789 uint64_t timeout)
790 {
791 struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
792 struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
793 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
794
795 if (rfence->sdma) {
796 if (!rws->fence_wait(rws, rfence->sdma, timeout))
797 return false;
798
799 /* Recompute the timeout after waiting. */
800 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
801 int64_t time = os_time_get_nano();
802 timeout = abs_timeout > time ? abs_timeout - time : 0;
803 }
804 }
805
806 if (!rfence->gfx)
807 return true;
808
809 return rws->fence_wait(rws, rfence->gfx, timeout);
810 }
811
812 static void r600_query_memory_info(struct pipe_screen *screen,
813 struct pipe_memory_info *info)
814 {
815 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
816 struct radeon_winsys *ws = rscreen->ws;
817 unsigned vram_usage, gtt_usage;
818
819 info->total_device_memory = rscreen->info.vram_size / 1024;
820 info->total_staging_memory = rscreen->info.gart_size / 1024;
821
822 /* The real TTM memory usage is somewhat random, because:
823 *
824 * 1) TTM delays freeing memory, because it can only free it after
825 * fences expire.
826 *
827 * 2) The memory usage can be really low if big VRAM evictions are
828 * taking place, but the real usage is well above the size of VRAM.
829 *
830 * Instead, return statistics of this process.
831 */
832 vram_usage = ws->query_value(ws, RADEON_REQUESTED_VRAM_MEMORY) / 1024;
833 gtt_usage = ws->query_value(ws, RADEON_REQUESTED_GTT_MEMORY) / 1024;
834
835 info->avail_device_memory =
836 vram_usage <= info->total_device_memory ?
837 info->total_device_memory - vram_usage : 0;
838 info->avail_staging_memory =
839 gtt_usage <= info->total_staging_memory ?
840 info->total_staging_memory - gtt_usage : 0;
841
842 info->device_memory_evicted =
843 ws->query_value(ws, RADEON_NUM_BYTES_MOVED) / 1024;
844 /* Just return the number of evicted 64KB pages. */
845 info->nr_device_memory_evictions = info->device_memory_evicted / 64;
846 }
847
848 struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
849 const struct pipe_resource *templ)
850 {
851 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
852
853 if (templ->target == PIPE_BUFFER) {
854 return r600_buffer_create(screen, templ,
855 rscreen->info.gart_page_size);
856 } else {
857 return r600_texture_create(screen, templ);
858 }
859 }
860
861 bool r600_common_screen_init(struct r600_common_screen *rscreen,
862 struct radeon_winsys *ws)
863 {
864 char llvm_string[32] = {};
865
866 ws->query_info(ws, &rscreen->info);
867
868 #if HAVE_LLVM
869 snprintf(llvm_string, sizeof(llvm_string),
870 ", LLVM %i.%i.%i", (HAVE_LLVM >> 8) & 0xff,
871 HAVE_LLVM & 0xff, MESA_LLVM_VERSION_PATCH);
872 #endif
873
874 snprintf(rscreen->renderer_string, sizeof(rscreen->renderer_string),
875 "%s (DRM %i.%i.%i%s)",
876 r600_get_chip_name(rscreen), rscreen->info.drm_major,
877 rscreen->info.drm_minor, rscreen->info.drm_patchlevel,
878 llvm_string);
879
880 rscreen->b.get_name = r600_get_name;
881 rscreen->b.get_vendor = r600_get_vendor;
882 rscreen->b.get_device_vendor = r600_get_device_vendor;
883 rscreen->b.get_compute_param = r600_get_compute_param;
884 rscreen->b.get_paramf = r600_get_paramf;
885 rscreen->b.get_timestamp = r600_get_timestamp;
886 rscreen->b.fence_finish = r600_fence_finish;
887 rscreen->b.fence_reference = r600_fence_reference;
888 rscreen->b.resource_destroy = u_resource_destroy_vtbl;
889 rscreen->b.resource_from_user_memory = r600_buffer_from_user_memory;
890 rscreen->b.query_memory_info = r600_query_memory_info;
891
892 if (rscreen->info.has_uvd) {
893 rscreen->b.get_video_param = rvid_get_video_param;
894 rscreen->b.is_video_format_supported = rvid_is_format_supported;
895 } else {
896 rscreen->b.get_video_param = r600_get_video_param;
897 rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported;
898 }
899
900 r600_init_screen_texture_functions(rscreen);
901 r600_init_screen_query_functions(rscreen);
902
903 rscreen->ws = ws;
904 rscreen->family = rscreen->info.family;
905 rscreen->chip_class = rscreen->info.chip_class;
906 rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
907
908 rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
909 if (rscreen->force_aniso >= 0) {
910 printf("radeon: Forcing anisotropy filter to %ix\n",
911 /* round down to a power of two */
912 1 << util_logbase2(rscreen->force_aniso));
913 }
914
915 util_format_s3tc_init();
916 pipe_mutex_init(rscreen->aux_context_lock);
917 pipe_mutex_init(rscreen->gpu_load_mutex);
918
919 if (rscreen->debug_flags & DBG_INFO) {
920 printf("pci_id = 0x%x\n", rscreen->info.pci_id);
921 printf("family = %i (%s)\n", rscreen->info.family,
922 r600_get_chip_name(rscreen));
923 printf("chip_class = %i\n", rscreen->info.chip_class);
924 printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
925 printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
926 printf("has_virtual_memory = %i\n", rscreen->info.has_virtual_memory);
927 printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
928 printf("has_sdma = %i\n", rscreen->info.has_sdma);
929 printf("has_uvd = %i\n", rscreen->info.has_uvd);
930 printf("vce_fw_version = %i\n", rscreen->info.vce_fw_version);
931 printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
932 printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
933 printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
934 rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
935 printf("has_userptr = %i\n", rscreen->info.has_userptr);
936
937 printf("r600_max_quad_pipes = %i\n", rscreen->info.r600_max_quad_pipes);
938 printf("max_shader_clock = %i\n", rscreen->info.max_shader_clock);
939 printf("num_good_compute_units = %i\n", rscreen->info.num_good_compute_units);
940 printf("max_se = %i\n", rscreen->info.max_se);
941 printf("max_sh_per_se = %i\n", rscreen->info.max_sh_per_se);
942
943 printf("r600_gb_backend_map = %i\n", rscreen->info.r600_gb_backend_map);
944 printf("r600_gb_backend_map_valid = %i\n", rscreen->info.r600_gb_backend_map_valid);
945 printf("r600_num_banks = %i\n", rscreen->info.r600_num_banks);
946 printf("num_render_backends = %i\n", rscreen->info.num_render_backends);
947 printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
948 printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
949 }
950 return true;
951 }
952
953 void r600_destroy_common_screen(struct r600_common_screen *rscreen)
954 {
955 r600_perfcounters_destroy(rscreen);
956 r600_gpu_load_kill_thread(rscreen);
957
958 pipe_mutex_destroy(rscreen->gpu_load_mutex);
959 pipe_mutex_destroy(rscreen->aux_context_lock);
960 rscreen->aux_context->destroy(rscreen->aux_context);
961
962 rscreen->ws->destroy(rscreen->ws);
963 FREE(rscreen);
964 }
965
966 bool r600_can_dump_shader(struct r600_common_screen *rscreen,
967 unsigned processor)
968 {
969 switch (processor) {
970 case PIPE_SHADER_VERTEX:
971 return (rscreen->debug_flags & DBG_VS) != 0;
972 case PIPE_SHADER_TESS_CTRL:
973 return (rscreen->debug_flags & DBG_TCS) != 0;
974 case PIPE_SHADER_TESS_EVAL:
975 return (rscreen->debug_flags & DBG_TES) != 0;
976 case PIPE_SHADER_GEOMETRY:
977 return (rscreen->debug_flags & DBG_GS) != 0;
978 case PIPE_SHADER_FRAGMENT:
979 return (rscreen->debug_flags & DBG_PS) != 0;
980 case PIPE_SHADER_COMPUTE:
981 return (rscreen->debug_flags & DBG_CS) != 0;
982 default:
983 return false;
984 }
985 }
986
987 void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
988 uint64_t offset, uint64_t size, unsigned value,
989 enum r600_coherency coher)
990 {
991 struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
992
993 pipe_mutex_lock(rscreen->aux_context_lock);
994 rctx->clear_buffer(&rctx->b, dst, offset, size, value, coher);
995 rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
996 pipe_mutex_unlock(rscreen->aux_context_lock);
997 }