radeon: make texture logging more useful
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_pipe.h"
24 #include "r600_public.h"
25 #include "r600_isa.h"
26 #include "evergreen_compute.h"
27 #include "r600d.h"
28
29 #include "sb/sb_public.h"
30
31 #include <errno.h>
32 #include "pipe/p_shader_tokens.h"
33 #include "util/u_blitter.h"
34 #include "util/u_debug.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_math.h"
39 #include "vl/vl_decoder.h"
40 #include "vl/vl_video_buffer.h"
41 #include "radeon/radeon_uvd.h"
42 #include "os/os_time.h"
43
44 static const struct debug_named_value r600_debug_options[] = {
45 /* features */
46 { "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
47 #if defined(R600_USE_LLVM)
48 { "nollvm", DBG_NO_LLVM, "Disable the LLVM shader compiler" },
49 #endif
50 { "nocpdma", DBG_NO_CP_DMA, "Disable CP DMA" },
51 { "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
52 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
53 { "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
54
55 /* shader backend */
56 { "nosb", DBG_NO_SB, "Disable sb backend for graphics shaders" },
57 { "sbcl", DBG_SB_CS, "Enable sb backend for compute shaders" },
58 { "sbdry", DBG_SB_DRY_RUN, "Don't use optimized bytecode (just print the dumps)" },
59 { "sbstat", DBG_SB_STAT, "Print optimization statistics for shaders" },
60 { "sbdump", DBG_SB_DUMP, "Print IR dumps after some optimization passes" },
61 { "sbnofallback", DBG_SB_NO_FALLBACK, "Abort on errors instead of fallback" },
62 { "sbdisasm", DBG_SB_DISASM, "Use sb disassembler for shader dumps" },
63 { "sbsafemath", DBG_SB_SAFEMATH, "Disable unsafe math optimizations" },
64
65 DEBUG_NAMED_VALUE_END /* must be last */
66 };
67
68 /*
69 * pipe_context
70 */
71 static struct r600_fence *r600_create_fence(struct r600_context *rctx)
72 {
73 struct r600_screen *rscreen = rctx->screen;
74 struct r600_fence *fence = NULL;
75
76 pipe_mutex_lock(rscreen->fences.mutex);
77
78 if (!rscreen->fences.bo) {
79 /* Create the shared buffer object */
80 rscreen->fences.bo = (struct r600_resource*)
81 pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM,
82 PIPE_USAGE_STAGING, 4096);
83 if (!rscreen->fences.bo) {
84 R600_ERR("r600: failed to create bo for fence objects\n");
85 goto out;
86 }
87 rscreen->fences.data = r600_buffer_map_sync_with_rings(&rctx->b, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE);
88 }
89
90 if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
91 struct r600_fence *entry;
92
93 /* Try to find a freed fence that has been signalled */
94 LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
95 if (rscreen->fences.data[entry->index] != 0) {
96 LIST_DELINIT(&entry->head);
97 fence = entry;
98 break;
99 }
100 }
101 }
102
103 if (!fence) {
104 /* Allocate a new fence */
105 struct r600_fence_block *block;
106 unsigned index;
107
108 if ((rscreen->fences.next_index + 1) >= 1024) {
109 R600_ERR("r600: too many concurrent fences\n");
110 goto out;
111 }
112
113 index = rscreen->fences.next_index++;
114
115 if (!(index % FENCE_BLOCK_SIZE)) {
116 /* Allocate a new block */
117 block = CALLOC_STRUCT(r600_fence_block);
118 if (block == NULL)
119 goto out;
120
121 LIST_ADD(&block->head, &rscreen->fences.blocks);
122 } else {
123 block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
124 }
125
126 fence = &block->fences[index % FENCE_BLOCK_SIZE];
127 fence->index = index;
128 }
129
130 pipe_reference_init(&fence->reference, 1);
131
132 rscreen->fences.data[fence->index] = 0;
133 r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
134
135 /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
136 fence->sleep_bo = (struct r600_resource*)
137 pipe_buffer_create(&rctx->screen->b.b, PIPE_BIND_CUSTOM,
138 PIPE_USAGE_STAGING, 1);
139 /* Add the fence as a dummy relocation. */
140 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
141
142 out:
143 pipe_mutex_unlock(rscreen->fences.mutex);
144 return fence;
145 }
146
147 static void r600_flush(struct pipe_context *ctx, unsigned flags)
148 {
149 struct r600_context *rctx = (struct r600_context *)ctx;
150 struct pipe_query *render_cond = NULL;
151 unsigned render_cond_mode = 0;
152 boolean render_cond_cond = FALSE;
153
154 if (rctx->b.rings.gfx.cs->cdw == rctx->initial_gfx_cs_size)
155 return;
156
157 rctx->b.rings.gfx.flushing = true;
158 /* Disable render condition. */
159 if (rctx->current_render_cond) {
160 render_cond = rctx->current_render_cond;
161 render_cond_cond = rctx->current_render_cond_cond;
162 render_cond_mode = rctx->current_render_cond_mode;
163 ctx->render_condition(ctx, NULL, FALSE, 0);
164 }
165
166 r600_context_flush(rctx, flags);
167 rctx->b.rings.gfx.flushing = false;
168 r600_begin_new_cs(rctx);
169
170 /* Re-enable render condition. */
171 if (render_cond) {
172 ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode);
173 }
174
175 rctx->initial_gfx_cs_size = rctx->b.rings.gfx.cs->cdw;
176 }
177
178 static void r600_flush_from_st(struct pipe_context *ctx,
179 struct pipe_fence_handle **fence,
180 unsigned flags)
181 {
182 struct r600_context *rctx = (struct r600_context *)ctx;
183 struct r600_fence **rfence = (struct r600_fence**)fence;
184 unsigned fflags;
185
186 fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
187 if (rfence) {
188 *rfence = r600_create_fence(rctx);
189 }
190 /* flush gfx & dma ring, order does not matter as only one can be live */
191 if (rctx->b.rings.dma.cs) {
192 rctx->b.rings.dma.flush(rctx, fflags);
193 }
194 rctx->b.rings.gfx.flush(rctx, fflags);
195 }
196
197 static void r600_flush_gfx_ring(void *ctx, unsigned flags)
198 {
199 r600_flush((struct pipe_context*)ctx, flags);
200 }
201
202 static void r600_flush_dma_ring(void *ctx, unsigned flags)
203 {
204 struct r600_context *rctx = (struct r600_context *)ctx;
205 struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
206
207 if (!cs->cdw) {
208 return;
209 }
210
211 rctx->b.rings.dma.flushing = true;
212 rctx->b.ws->cs_flush(cs, flags, 0);
213 rctx->b.rings.dma.flushing = false;
214 }
215
216 static void r600_flush_from_winsys(void *ctx, unsigned flags)
217 {
218 struct r600_context *rctx = (struct r600_context *)ctx;
219
220 rctx->b.rings.gfx.flush(rctx, flags);
221 }
222
223 static void r600_flush_dma_from_winsys(void *ctx, unsigned flags)
224 {
225 struct r600_context *rctx = (struct r600_context *)ctx;
226
227 rctx->b.rings.dma.flush(rctx, flags);
228 }
229
230 static void r600_destroy_context(struct pipe_context *context)
231 {
232 struct r600_context *rctx = (struct r600_context *)context;
233
234 r600_isa_destroy(rctx->isa);
235
236 r600_sb_context_destroy(rctx->sb_context);
237
238 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
239 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
240
241 if (rctx->dummy_pixel_shader) {
242 rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
243 }
244 if (rctx->custom_dsa_flush) {
245 rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush);
246 }
247 if (rctx->custom_blend_resolve) {
248 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve);
249 }
250 if (rctx->custom_blend_decompress) {
251 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress);
252 }
253 if (rctx->custom_blend_fastclear) {
254 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_fastclear);
255 }
256 util_unreference_framebuffer_state(&rctx->framebuffer.state);
257
258 if (rctx->blitter) {
259 util_blitter_destroy(rctx->blitter);
260 }
261 if (rctx->uploader) {
262 u_upload_destroy(rctx->uploader);
263 }
264 if (rctx->allocator_fetch_shader) {
265 u_suballocator_destroy(rctx->allocator_fetch_shader);
266 }
267 util_slab_destroy(&rctx->pool_transfers);
268
269 r600_release_command_buffer(&rctx->start_cs_cmd);
270
271 if (rctx->b.rings.gfx.cs) {
272 rctx->b.ws->cs_destroy(rctx->b.rings.gfx.cs);
273 }
274 if (rctx->b.rings.dma.cs) {
275 rctx->b.ws->cs_destroy(rctx->b.rings.dma.cs);
276 }
277
278 r600_common_context_cleanup(&rctx->b);
279 FREE(rctx);
280 }
281
282 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
283 {
284 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
285 struct r600_screen* rscreen = (struct r600_screen *)screen;
286
287 if (rctx == NULL)
288 return NULL;
289
290 util_slab_create(&rctx->pool_transfers,
291 sizeof(struct r600_transfer), 64,
292 UTIL_SLAB_SINGLETHREADED);
293
294 rctx->b.b.screen = screen;
295 rctx->b.b.priv = priv;
296 rctx->b.b.destroy = r600_destroy_context;
297 rctx->b.b.flush = r600_flush_from_st;
298
299 if (!r600_common_context_init(&rctx->b, &rscreen->b))
300 goto fail;
301
302 rctx->screen = rscreen;
303 rctx->keep_tiling_flags = rscreen->b.info.drm_minor >= 12;
304
305 LIST_INITHEAD(&rctx->active_nontimer_queries);
306
307 r600_init_blit_functions(rctx);
308 r600_init_query_functions(rctx);
309 r600_init_context_resource_functions(rctx);
310
311 if (rscreen->b.info.has_uvd) {
312 rctx->b.b.create_video_codec = r600_uvd_create_decoder;
313 rctx->b.b.create_video_buffer = r600_video_buffer_create;
314 } else {
315 rctx->b.b.create_video_codec = vl_create_decoder;
316 rctx->b.b.create_video_buffer = vl_video_buffer_create;
317 }
318
319 r600_init_common_state_functions(rctx);
320
321 switch (rctx->b.chip_class) {
322 case R600:
323 case R700:
324 r600_init_state_functions(rctx);
325 r600_init_atom_start_cs(rctx);
326 rctx->max_db = 4;
327 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
328 rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx)
329 : r600_create_resolve_blend(rctx);
330 rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
331 rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
332 rctx->b.family == CHIP_RV620 ||
333 rctx->b.family == CHIP_RS780 ||
334 rctx->b.family == CHIP_RS880 ||
335 rctx->b.family == CHIP_RV710);
336 break;
337 case EVERGREEN:
338 case CAYMAN:
339 evergreen_init_state_functions(rctx);
340 evergreen_init_atom_start_cs(rctx);
341 evergreen_init_atom_start_compute_cs(rctx);
342 rctx->max_db = 8;
343 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
344 rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
345 rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
346 rctx->custom_blend_fastclear = evergreen_create_fastclear_blend(rctx);
347 rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
348 rctx->b.family == CHIP_PALM ||
349 rctx->b.family == CHIP_SUMO ||
350 rctx->b.family == CHIP_SUMO2 ||
351 rctx->b.family == CHIP_CAICOS ||
352 rctx->b.family == CHIP_CAYMAN ||
353 rctx->b.family == CHIP_ARUBA);
354 break;
355 default:
356 R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class);
357 goto fail;
358 }
359
360 if (rscreen->trace_bo) {
361 rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, rscreen->trace_bo->cs_buf);
362 } else {
363 rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL);
364 }
365 rctx->b.rings.gfx.flush = r600_flush_gfx_ring;
366 rctx->b.ws->cs_set_flush_callback(rctx->b.rings.gfx.cs, r600_flush_from_winsys, rctx);
367 rctx->b.rings.gfx.flushing = false;
368
369 rctx->b.rings.dma.cs = NULL;
370 if (rscreen->b.info.r600_has_dma && !(rscreen->b.debug_flags & DBG_NO_ASYNC_DMA)) {
371 rctx->b.rings.dma.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_DMA, NULL);
372 rctx->b.rings.dma.flush = r600_flush_dma_ring;
373 rctx->b.ws->cs_set_flush_callback(rctx->b.rings.dma.cs, r600_flush_dma_from_winsys, rctx);
374 rctx->b.rings.dma.flushing = false;
375 }
376
377 rctx->uploader = u_upload_create(&rctx->b.b, 1024 * 1024, 256,
378 PIPE_BIND_INDEX_BUFFER |
379 PIPE_BIND_CONSTANT_BUFFER);
380 if (!rctx->uploader)
381 goto fail;
382
383 rctx->allocator_fetch_shader = u_suballocator_create(&rctx->b.b, 64 * 1024, 256,
384 0, PIPE_USAGE_STATIC, FALSE);
385 if (!rctx->allocator_fetch_shader)
386 goto fail;
387
388 rctx->isa = calloc(1, sizeof(struct r600_isa));
389 if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
390 goto fail;
391
392 rctx->blitter = util_blitter_create(&rctx->b.b);
393 if (rctx->blitter == NULL)
394 goto fail;
395 util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
396 rctx->blitter->draw_rectangle = r600_draw_rectangle;
397
398 r600_begin_new_cs(rctx);
399 r600_get_backend_mask(rctx); /* this emits commands and must be last */
400
401 rctx->dummy_pixel_shader =
402 util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
403 TGSI_SEMANTIC_GENERIC,
404 TGSI_INTERPOLATE_CONSTANT);
405 rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
406
407 return &rctx->b.b;
408
409 fail:
410 r600_destroy_context(&rctx->b.b);
411 return NULL;
412 }
413
414 /*
415 * pipe_screen
416 */
417 static const char* r600_get_vendor(struct pipe_screen* pscreen)
418 {
419 return "X.Org";
420 }
421
422 static const char *r600_get_family_name(enum radeon_family family)
423 {
424 switch(family) {
425 case CHIP_R600: return "AMD R600";
426 case CHIP_RV610: return "AMD RV610";
427 case CHIP_RV630: return "AMD RV630";
428 case CHIP_RV670: return "AMD RV670";
429 case CHIP_RV620: return "AMD RV620";
430 case CHIP_RV635: return "AMD RV635";
431 case CHIP_RS780: return "AMD RS780";
432 case CHIP_RS880: return "AMD RS880";
433 case CHIP_RV770: return "AMD RV770";
434 case CHIP_RV730: return "AMD RV730";
435 case CHIP_RV710: return "AMD RV710";
436 case CHIP_RV740: return "AMD RV740";
437 case CHIP_CEDAR: return "AMD CEDAR";
438 case CHIP_REDWOOD: return "AMD REDWOOD";
439 case CHIP_JUNIPER: return "AMD JUNIPER";
440 case CHIP_CYPRESS: return "AMD CYPRESS";
441 case CHIP_HEMLOCK: return "AMD HEMLOCK";
442 case CHIP_PALM: return "AMD PALM";
443 case CHIP_SUMO: return "AMD SUMO";
444 case CHIP_SUMO2: return "AMD SUMO2";
445 case CHIP_BARTS: return "AMD BARTS";
446 case CHIP_TURKS: return "AMD TURKS";
447 case CHIP_CAICOS: return "AMD CAICOS";
448 case CHIP_CAYMAN: return "AMD CAYMAN";
449 case CHIP_ARUBA: return "AMD ARUBA";
450 default: return "AMD unknown";
451 }
452 }
453
454 static const char* r600_get_name(struct pipe_screen* pscreen)
455 {
456 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
457
458 return r600_get_family_name(rscreen->b.family);
459 }
460
461 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
462 {
463 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
464 enum radeon_family family = rscreen->b.family;
465
466 switch (param) {
467 /* Supported features (boolean caps). */
468 case PIPE_CAP_NPOT_TEXTURES:
469 case PIPE_CAP_TWO_SIDED_STENCIL:
470 case PIPE_CAP_ANISOTROPIC_FILTER:
471 case PIPE_CAP_POINT_SPRITE:
472 case PIPE_CAP_OCCLUSION_QUERY:
473 case PIPE_CAP_TEXTURE_SHADOW_MAP:
474 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
475 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
476 case PIPE_CAP_TEXTURE_SWIZZLE:
477 case PIPE_CAP_DEPTH_CLIP_DISABLE:
478 case PIPE_CAP_SHADER_STENCIL_EXPORT:
479 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
480 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
481 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
482 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
483 case PIPE_CAP_SM3:
484 case PIPE_CAP_SEAMLESS_CUBE_MAP:
485 case PIPE_CAP_PRIMITIVE_RESTART:
486 case PIPE_CAP_CONDITIONAL_RENDER:
487 case PIPE_CAP_TEXTURE_BARRIER:
488 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
489 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
490 case PIPE_CAP_TGSI_INSTANCEID:
491 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
492 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
493 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
494 case PIPE_CAP_USER_INDEX_BUFFERS:
495 case PIPE_CAP_USER_CONSTANT_BUFFERS:
496 case PIPE_CAP_COMPUTE:
497 case PIPE_CAP_START_INSTANCE:
498 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
499 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
500 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
501 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
502 case PIPE_CAP_TEXTURE_MULTISAMPLE:
503 return 1;
504
505 case PIPE_CAP_TGSI_TEXCOORD:
506 return 0;
507
508 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
509 return MIN2(rscreen->b.info.vram_size, 0xFFFFFFFF);
510
511 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
512 return R600_MAP_BUFFER_ALIGNMENT;
513
514 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
515 return 256;
516
517 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
518 return 1;
519
520 case PIPE_CAP_GLSL_FEATURE_LEVEL:
521 return 140;
522
523 /* Supported except the original R600. */
524 case PIPE_CAP_INDEP_BLEND_ENABLE:
525 case PIPE_CAP_INDEP_BLEND_FUNC:
526 /* R600 doesn't support per-MRT blends */
527 return family == CHIP_R600 ? 0 : 1;
528
529 /* Supported on Evergreen. */
530 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
531 case PIPE_CAP_CUBE_MAP_ARRAY:
532 return family >= CHIP_CEDAR ? 1 : 0;
533
534 /* Unsupported features. */
535 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
536 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
537 case PIPE_CAP_SCALED_RESOLVE:
538 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
539 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
540 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
541 case PIPE_CAP_USER_VERTEX_BUFFERS:
542 return 0;
543
544 /* Stream output. */
545 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
546 return rscreen->has_streamout ? 4 : 0;
547 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
548 return rscreen->has_streamout ? 1 : 0;
549 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
550 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
551 return 32*4;
552
553 /* Texturing. */
554 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
555 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
556 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
557 if (family >= CHIP_CEDAR)
558 return 15;
559 else
560 return 14;
561 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
562 return rscreen->b.info.drm_minor >= 9 ?
563 (family >= CHIP_CEDAR ? 16384 : 8192) : 0;
564 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
565 return 32;
566
567 /* Render targets. */
568 case PIPE_CAP_MAX_RENDER_TARGETS:
569 /* XXX some r6xx are buggy and can only do 4 */
570 return 8;
571
572 case PIPE_CAP_MAX_VIEWPORTS:
573 return 1;
574
575 /* Timer queries, present when the clock frequency is non zero. */
576 case PIPE_CAP_QUERY_TIME_ELAPSED:
577 return rscreen->b.info.r600_clock_crystal_freq != 0;
578 case PIPE_CAP_QUERY_TIMESTAMP:
579 return rscreen->b.info.drm_minor >= 20 &&
580 rscreen->b.info.r600_clock_crystal_freq != 0;
581
582 case PIPE_CAP_MIN_TEXEL_OFFSET:
583 return -8;
584
585 case PIPE_CAP_MAX_TEXEL_OFFSET:
586 return 7;
587
588 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
589 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
590 case PIPE_CAP_ENDIANNESS:
591 return PIPE_ENDIAN_LITTLE;
592 }
593 return 0;
594 }
595
596 static float r600_get_paramf(struct pipe_screen* pscreen,
597 enum pipe_capf param)
598 {
599 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
600 enum radeon_family family = rscreen->b.family;
601
602 switch (param) {
603 case PIPE_CAPF_MAX_LINE_WIDTH:
604 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
605 case PIPE_CAPF_MAX_POINT_WIDTH:
606 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
607 if (family >= CHIP_CEDAR)
608 return 16384.0f;
609 else
610 return 8192.0f;
611 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
612 return 16.0f;
613 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
614 return 16.0f;
615 case PIPE_CAPF_GUARD_BAND_LEFT:
616 case PIPE_CAPF_GUARD_BAND_TOP:
617 case PIPE_CAPF_GUARD_BAND_RIGHT:
618 case PIPE_CAPF_GUARD_BAND_BOTTOM:
619 return 0.0f;
620 }
621 return 0.0f;
622 }
623
624 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
625 {
626 switch(shader)
627 {
628 case PIPE_SHADER_FRAGMENT:
629 case PIPE_SHADER_VERTEX:
630 case PIPE_SHADER_COMPUTE:
631 break;
632 case PIPE_SHADER_GEOMETRY:
633 /* XXX: support and enable geometry programs */
634 return 0;
635 default:
636 /* XXX: support tessellation on Evergreen */
637 return 0;
638 }
639
640 switch (param) {
641 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
642 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
643 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
644 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
645 return 16384;
646 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
647 return 32;
648 case PIPE_SHADER_CAP_MAX_INPUTS:
649 return 32;
650 case PIPE_SHADER_CAP_MAX_TEMPS:
651 return 256; /* Max native temporaries. */
652 case PIPE_SHADER_CAP_MAX_ADDRS:
653 /* XXX Isn't this equal to TEMPS? */
654 return 1; /* Max native address registers */
655 case PIPE_SHADER_CAP_MAX_CONSTS:
656 return R600_MAX_CONST_BUFFER_SIZE;
657 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
658 return R600_MAX_USER_CONST_BUFFERS;
659 case PIPE_SHADER_CAP_MAX_PREDS:
660 return 0; /* nothing uses this */
661 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
662 return 1;
663 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
664 return 0;
665 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
666 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
667 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
668 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
669 return 1;
670 case PIPE_SHADER_CAP_SUBROUTINES:
671 return 0;
672 case PIPE_SHADER_CAP_INTEGERS:
673 return 1;
674 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
675 return 16;
676 case PIPE_SHADER_CAP_PREFERRED_IR:
677 if (shader == PIPE_SHADER_COMPUTE) {
678 return PIPE_SHADER_IR_LLVM;
679 } else {
680 return PIPE_SHADER_IR_TGSI;
681 }
682 }
683 return 0;
684 }
685
686 static int r600_get_video_param(struct pipe_screen *screen,
687 enum pipe_video_profile profile,
688 enum pipe_video_entrypoint entrypoint,
689 enum pipe_video_cap param)
690 {
691 switch (param) {
692 case PIPE_VIDEO_CAP_SUPPORTED:
693 return vl_profile_supported(screen, profile, entrypoint);
694 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
695 return 1;
696 case PIPE_VIDEO_CAP_MAX_WIDTH:
697 case PIPE_VIDEO_CAP_MAX_HEIGHT:
698 return vl_video_buffer_max_size(screen);
699 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
700 return PIPE_FORMAT_NV12;
701 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
702 return false;
703 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
704 return false;
705 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
706 return true;
707 case PIPE_VIDEO_CAP_MAX_LEVEL:
708 return vl_level_supported(screen, profile);
709 default:
710 return 0;
711 }
712 }
713
714 const char * r600_llvm_gpu_string(enum radeon_family family)
715 {
716 const char * gpu_family;
717
718 switch (family) {
719 case CHIP_R600:
720 case CHIP_RV630:
721 case CHIP_RV635:
722 case CHIP_RV670:
723 gpu_family = "r600";
724 break;
725 case CHIP_RV610:
726 case CHIP_RV620:
727 case CHIP_RS780:
728 case CHIP_RS880:
729 gpu_family = "rs880";
730 break;
731 case CHIP_RV710:
732 gpu_family = "rv710";
733 break;
734 case CHIP_RV730:
735 gpu_family = "rv730";
736 break;
737 case CHIP_RV740:
738 case CHIP_RV770:
739 gpu_family = "rv770";
740 break;
741 case CHIP_PALM:
742 case CHIP_CEDAR:
743 gpu_family = "cedar";
744 break;
745 case CHIP_SUMO:
746 case CHIP_SUMO2:
747 gpu_family = "sumo";
748 break;
749 case CHIP_REDWOOD:
750 gpu_family = "redwood";
751 break;
752 case CHIP_JUNIPER:
753 gpu_family = "juniper";
754 break;
755 case CHIP_HEMLOCK:
756 case CHIP_CYPRESS:
757 gpu_family = "cypress";
758 break;
759 case CHIP_BARTS:
760 gpu_family = "barts";
761 break;
762 case CHIP_TURKS:
763 gpu_family = "turks";
764 break;
765 case CHIP_CAICOS:
766 gpu_family = "caicos";
767 break;
768 case CHIP_CAYMAN:
769 case CHIP_ARUBA:
770 gpu_family = "cayman";
771 break;
772 default:
773 gpu_family = "";
774 fprintf(stderr, "Chip not supported by r600 llvm "
775 "backend, please file a bug at " PACKAGE_BUGREPORT "\n");
776 break;
777 }
778 return gpu_family;
779 }
780
781
782 static int r600_get_compute_param(struct pipe_screen *screen,
783 enum pipe_compute_cap param,
784 void *ret)
785 {
786 struct r600_screen *rscreen = (struct r600_screen *)screen;
787 //TODO: select these params by asic
788 switch (param) {
789 case PIPE_COMPUTE_CAP_IR_TARGET: {
790 const char *gpu = r600_llvm_gpu_string(rscreen->b.family);
791 if (ret) {
792 sprintf(ret, "%s-r600--", gpu);
793 }
794 return (8 + strlen(gpu)) * sizeof(char);
795 }
796 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
797 if (ret) {
798 uint64_t * grid_dimension = ret;
799 grid_dimension[0] = 3;
800 }
801 return 1 * sizeof(uint64_t);
802
803 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
804 if (ret) {
805 uint64_t * grid_size = ret;
806 grid_size[0] = 65535;
807 grid_size[1] = 65535;
808 grid_size[2] = 1;
809 }
810 return 3 * sizeof(uint64_t) ;
811
812 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
813 if (ret) {
814 uint64_t * block_size = ret;
815 block_size[0] = 256;
816 block_size[1] = 256;
817 block_size[2] = 256;
818 }
819 return 3 * sizeof(uint64_t);
820
821 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
822 if (ret) {
823 uint64_t * max_threads_per_block = ret;
824 *max_threads_per_block = 256;
825 }
826 return sizeof(uint64_t);
827
828 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
829 if (ret) {
830 uint64_t * max_global_size = ret;
831 /* XXX: This is what the proprietary driver reports, we
832 * may want to use a different value. */
833 *max_global_size = 201326592;
834 }
835 return sizeof(uint64_t);
836
837 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
838 if (ret) {
839 uint64_t * max_input_size = ret;
840 *max_input_size = 1024;
841 }
842 return sizeof(uint64_t);
843
844 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
845 if (ret) {
846 uint64_t * max_local_size = ret;
847 /* XXX: This is what the proprietary driver reports, we
848 * may want to use a different value. */
849 *max_local_size = 32768;
850 }
851 return sizeof(uint64_t);
852
853 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
854 if (ret) {
855 uint64_t max_global_size;
856 uint64_t * max_mem_alloc_size = ret;
857 r600_get_compute_param(screen,
858 PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE,
859 &max_global_size);
860 /* OpenCL requres this value be at least
861 * max(MAX_GLOBAL_SIZE / 4, 128 * 1024 *1024)
862 * I'm really not sure what value to report here, but
863 * MAX_GLOBAL_SIZE / 4 seems resonable.
864 */
865 *max_mem_alloc_size = max_global_size / 4;
866 }
867 return sizeof(uint64_t);
868
869 default:
870 fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
871 return 0;
872 }
873 }
874
875 static void r600_destroy_screen(struct pipe_screen* pscreen)
876 {
877 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
878
879 if (rscreen == NULL)
880 return;
881
882 if (!radeon_winsys_unref(rscreen->b.ws))
883 return;
884
885 r600_common_screen_cleanup(&rscreen->b);
886
887 if (rscreen->global_pool) {
888 compute_memory_pool_delete(rscreen->global_pool);
889 }
890
891 if (rscreen->fences.bo) {
892 struct r600_fence_block *entry, *tmp;
893
894 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
895 LIST_DEL(&entry->head);
896 FREE(entry);
897 }
898
899 rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf);
900 pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
901 }
902 if (rscreen->trace_bo) {
903 rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf);
904 pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
905 }
906 pipe_mutex_destroy(rscreen->fences.mutex);
907
908 rscreen->b.ws->destroy(rscreen->b.ws);
909 FREE(rscreen);
910 }
911
912 static void r600_fence_reference(struct pipe_screen *pscreen,
913 struct pipe_fence_handle **ptr,
914 struct pipe_fence_handle *fence)
915 {
916 struct r600_fence **oldf = (struct r600_fence**)ptr;
917 struct r600_fence *newf = (struct r600_fence*)fence;
918
919 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
920 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
921 pipe_mutex_lock(rscreen->fences.mutex);
922 pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL);
923 LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
924 pipe_mutex_unlock(rscreen->fences.mutex);
925 }
926
927 *ptr = fence;
928 }
929
930 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
931 struct pipe_fence_handle *fence)
932 {
933 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
934 struct r600_fence *rfence = (struct r600_fence*)fence;
935
936 return rscreen->fences.data[rfence->index] != 0;
937 }
938
939 static boolean r600_fence_finish(struct pipe_screen *pscreen,
940 struct pipe_fence_handle *fence,
941 uint64_t timeout)
942 {
943 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
944 struct r600_fence *rfence = (struct r600_fence*)fence;
945 int64_t start_time = 0;
946 unsigned spins = 0;
947
948 if (timeout != PIPE_TIMEOUT_INFINITE) {
949 start_time = os_time_get();
950
951 /* Convert to microseconds. */
952 timeout /= 1000;
953 }
954
955 while (rscreen->fences.data[rfence->index] == 0) {
956 /* Special-case infinite timeout - wait for the dummy BO to become idle */
957 if (timeout == PIPE_TIMEOUT_INFINITE) {
958 rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
959 break;
960 }
961
962 /* The dummy BO will be busy until the CS including the fence has completed, or
963 * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
964 if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
965 break;
966
967 if (++spins % 256)
968 continue;
969 #ifdef PIPE_OS_UNIX
970 sched_yield();
971 #else
972 os_time_sleep(10);
973 #endif
974 if (timeout != PIPE_TIMEOUT_INFINITE &&
975 os_time_get() - start_time >= timeout) {
976 break;
977 }
978 }
979
980 return rscreen->fences.data[rfence->index] != 0;
981 }
982
983 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
984 {
985 struct r600_screen *rscreen = (struct r600_screen*)screen;
986
987 return 1000000 * rscreen->b.ws->query_value(rscreen->b.ws, RADEON_TIMESTAMP) /
988 rscreen->b.info.r600_clock_crystal_freq;
989 }
990
991 static int r600_get_driver_query_info(struct pipe_screen *screen,
992 unsigned index,
993 struct pipe_driver_query_info *info)
994 {
995 struct r600_screen *rscreen = (struct r600_screen*)screen;
996 struct pipe_driver_query_info list[] = {
997 {"draw-calls", R600_QUERY_DRAW_CALLS, 0},
998 {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->b.info.vram_size, TRUE},
999 {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->b.info.gart_size, TRUE},
1000 {"buffer-wait-time", R600_QUERY_BUFFER_WAIT_TIME, 0, FALSE}
1001 };
1002
1003 if (!info)
1004 return Elements(list);
1005
1006 if (index >= Elements(list))
1007 return 0;
1008
1009 *info = list[index];
1010 return 1;
1011 }
1012
1013 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
1014 {
1015 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
1016
1017 if (rscreen == NULL) {
1018 return NULL;
1019 }
1020
1021 /* Set functions first. */
1022 rscreen->b.b.context_create = r600_create_context;
1023 rscreen->b.b.destroy = r600_destroy_screen;
1024 rscreen->b.b.get_name = r600_get_name;
1025 rscreen->b.b.get_vendor = r600_get_vendor;
1026 rscreen->b.b.get_param = r600_get_param;
1027 rscreen->b.b.get_shader_param = r600_get_shader_param;
1028 rscreen->b.b.get_paramf = r600_get_paramf;
1029 rscreen->b.b.get_compute_param = r600_get_compute_param;
1030 rscreen->b.b.get_timestamp = r600_get_timestamp;
1031 if (rscreen->b.chip_class >= EVERGREEN) {
1032 rscreen->b.b.is_format_supported = evergreen_is_format_supported;
1033 } else {
1034 rscreen->b.b.is_format_supported = r600_is_format_supported;
1035 }
1036 rscreen->b.b.fence_reference = r600_fence_reference;
1037 rscreen->b.b.fence_signalled = r600_fence_signalled;
1038 rscreen->b.b.fence_finish = r600_fence_finish;
1039 rscreen->b.b.get_driver_query_info = r600_get_driver_query_info;
1040 if (rscreen->b.info.has_uvd) {
1041 rscreen->b.b.get_video_param = ruvd_get_video_param;
1042 rscreen->b.b.is_video_format_supported = ruvd_is_format_supported;
1043 } else {
1044 rscreen->b.b.get_video_param = r600_get_video_param;
1045 rscreen->b.b.is_video_format_supported = vl_video_buffer_is_format_supported;
1046 }
1047 r600_init_screen_resource_functions(&rscreen->b.b);
1048
1049 if (!r600_common_screen_init(&rscreen->b, ws)) {
1050 FREE(rscreen);
1051 return NULL;
1052 }
1053
1054 rscreen->b.debug_flags |= debug_get_flags_option("R600_DEBUG", r600_debug_options, 0);
1055 if (debug_get_bool_option("R600_DEBUG_COMPUTE", FALSE))
1056 rscreen->b.debug_flags |= DBG_COMPUTE;
1057 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE))
1058 rscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
1059 if (!debug_get_bool_option("R600_HYPERZ", TRUE))
1060 rscreen->b.debug_flags |= DBG_NO_HYPERZ;
1061 if (!debug_get_bool_option("R600_LLVM", TRUE))
1062 rscreen->b.debug_flags |= DBG_NO_LLVM;
1063
1064 if (rscreen->b.family == CHIP_UNKNOWN) {
1065 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id);
1066 FREE(rscreen);
1067 return NULL;
1068 }
1069
1070 /* Figure out streamout kernel support. */
1071 switch (rscreen->b.chip_class) {
1072 case R600:
1073 if (rscreen->b.family < CHIP_RS780) {
1074 rscreen->has_streamout = rscreen->b.info.drm_minor >= 14;
1075 } else {
1076 rscreen->has_streamout = rscreen->b.info.drm_minor >= 23;
1077 }
1078 break;
1079 case R700:
1080 rscreen->has_streamout = rscreen->b.info.drm_minor >= 17;
1081 break;
1082 case EVERGREEN:
1083 case CAYMAN:
1084 rscreen->has_streamout = rscreen->b.info.drm_minor >= 14;
1085 break;
1086 default:
1087 rscreen->has_streamout = FALSE;
1088 break;
1089 }
1090
1091 /* MSAA support. */
1092 switch (rscreen->b.chip_class) {
1093 case R600:
1094 case R700:
1095 rscreen->has_msaa = rscreen->b.info.drm_minor >= 22;
1096 rscreen->has_compressed_msaa_texturing = false;
1097 break;
1098 case EVERGREEN:
1099 rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
1100 rscreen->has_compressed_msaa_texturing = rscreen->b.info.drm_minor >= 24;
1101 break;
1102 case CAYMAN:
1103 rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
1104 rscreen->has_compressed_msaa_texturing = true;
1105 break;
1106 default:
1107 rscreen->has_msaa = FALSE;
1108 rscreen->has_compressed_msaa_texturing = false;
1109 }
1110
1111 rscreen->has_cp_dma = rscreen->b.info.drm_minor >= 27 &&
1112 !(rscreen->b.debug_flags & DBG_NO_CP_DMA);
1113
1114 rscreen->fences.bo = NULL;
1115 rscreen->fences.data = NULL;
1116 rscreen->fences.next_index = 0;
1117 LIST_INITHEAD(&rscreen->fences.pool);
1118 LIST_INITHEAD(&rscreen->fences.blocks);
1119 pipe_mutex_init(rscreen->fences.mutex);
1120
1121 rscreen->global_pool = compute_memory_pool_new(rscreen);
1122
1123 rscreen->cs_count = 0;
1124 if (rscreen->b.info.drm_minor >= 28 && (rscreen->b.debug_flags & DBG_TRACE_CS)) {
1125 rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b,
1126 PIPE_BIND_CUSTOM,
1127 PIPE_USAGE_STAGING,
1128 4096);
1129 if (rscreen->trace_bo) {
1130 rscreen->trace_ptr = rscreen->b.ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
1131 PIPE_TRANSFER_UNSYNCHRONIZED);
1132 }
1133 }
1134
1135 #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
1136 struct pipe_resource templ = {};
1137
1138 templ.width0 = 4;
1139 templ.height0 = 2048;
1140 templ.depth0 = 1;
1141 templ.array_size = 1;
1142 templ.target = PIPE_TEXTURE_2D;
1143 templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
1144 templ.usage = PIPE_USAGE_STATIC;
1145
1146 struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
1147 unsigned char *map = ws->buffer_map(res->cs_buf, NULL, PIPE_TRANSFER_WRITE);
1148
1149 memset(map, 0, 256);
1150
1151 r600_screen_clear_buffer(rscreen, &res->b.b, 4, 4, 0xCC);
1152 r600_screen_clear_buffer(rscreen, &res->b.b, 8, 4, 0xDD);
1153 r600_screen_clear_buffer(rscreen, &res->b.b, 12, 4, 0xEE);
1154 r600_screen_clear_buffer(rscreen, &res->b.b, 20, 4, 0xFF);
1155 r600_screen_clear_buffer(rscreen, &res->b.b, 32, 20, 0x87);
1156
1157 ws->buffer_wait(res->buf, RADEON_USAGE_WRITE);
1158
1159 int i;
1160 for (i = 0; i < 256; i++) {
1161 printf("%02X", map[i]);
1162 if (i % 16 == 15)
1163 printf("\n");
1164 }
1165 #endif
1166
1167 return &rscreen->b.b;
1168 }