gallium: extend pipe_context::flush for it to accept an END_OF_FRAME flag
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_pipe.h"
24 #include "r600_public.h"
25
26 #include <errno.h>
27 #include "pipe/p_shader_tokens.h"
28 #include "util/u_blitter.h"
29 #include "util/u_format_s3tc.h"
30 #include "util/u_memory.h"
31 #include "util/u_simple_shaders.h"
32 #include "util/u_upload_mgr.h"
33 #include "vl/vl_decoder.h"
34 #include "vl/vl_video_buffer.h"
35 #include "os/os_time.h"
36
37 /*
38 * pipe_context
39 */
40 static struct r600_fence *r600_create_fence(struct r600_context *rctx)
41 {
42 struct r600_screen *rscreen = rctx->screen;
43 struct r600_fence *fence = NULL;
44
45 pipe_mutex_lock(rscreen->fences.mutex);
46
47 if (!rscreen->fences.bo) {
48 /* Create the shared buffer object */
49 rscreen->fences.bo = (struct r600_resource*)
50 pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM,
51 PIPE_USAGE_STAGING, 4096);
52 if (!rscreen->fences.bo) {
53 R600_ERR("r600: failed to create bo for fence objects\n");
54 goto out;
55 }
56 rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
57 rctx->cs,
58 PIPE_TRANSFER_READ_WRITE);
59 }
60
61 if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
62 struct r600_fence *entry;
63
64 /* Try to find a freed fence that has been signalled */
65 LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
66 if (rscreen->fences.data[entry->index] != 0) {
67 LIST_DELINIT(&entry->head);
68 fence = entry;
69 break;
70 }
71 }
72 }
73
74 if (!fence) {
75 /* Allocate a new fence */
76 struct r600_fence_block *block;
77 unsigned index;
78
79 if ((rscreen->fences.next_index + 1) >= 1024) {
80 R600_ERR("r600: too many concurrent fences\n");
81 goto out;
82 }
83
84 index = rscreen->fences.next_index++;
85
86 if (!(index % FENCE_BLOCK_SIZE)) {
87 /* Allocate a new block */
88 block = CALLOC_STRUCT(r600_fence_block);
89 if (block == NULL)
90 goto out;
91
92 LIST_ADD(&block->head, &rscreen->fences.blocks);
93 } else {
94 block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
95 }
96
97 fence = &block->fences[index % FENCE_BLOCK_SIZE];
98 fence->index = index;
99 }
100
101 pipe_reference_init(&fence->reference, 1);
102
103 rscreen->fences.data[fence->index] = 0;
104 r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
105
106 /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
107 fence->sleep_bo = (struct r600_resource*)
108 pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
109 PIPE_USAGE_STAGING, 1);
110 /* Add the fence as a dummy relocation. */
111 r600_context_bo_reloc(rctx, fence->sleep_bo, RADEON_USAGE_READWRITE);
112
113 out:
114 pipe_mutex_unlock(rscreen->fences.mutex);
115 return fence;
116 }
117
118
119 void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
120 unsigned flags)
121 {
122 struct r600_context *rctx = (struct r600_context *)ctx;
123 struct r600_fence **rfence = (struct r600_fence**)fence;
124 struct pipe_query *render_cond = NULL;
125 unsigned render_cond_mode = 0;
126
127 if (rfence)
128 *rfence = r600_create_fence(rctx);
129
130 /* Disable render condition. */
131 if (rctx->current_render_cond) {
132 render_cond = rctx->current_render_cond;
133 render_cond_mode = rctx->current_render_cond_mode;
134 ctx->render_condition(ctx, NULL, 0);
135 }
136
137 r600_context_flush(rctx, flags);
138
139 /* Re-enable render condition. */
140 if (render_cond) {
141 ctx->render_condition(ctx, render_cond, render_cond_mode);
142 }
143 }
144
145 static void r600_flush_from_st(struct pipe_context *ctx,
146 struct pipe_fence_handle **fence,
147 enum pipe_flush_flags flags)
148 {
149 r600_flush(ctx, fence, 0);
150 }
151
152 static void r600_flush_from_winsys(void *ctx, unsigned flags)
153 {
154 r600_flush((struct pipe_context*)ctx, NULL, flags);
155 }
156
157 static void r600_destroy_context(struct pipe_context *context)
158 {
159 struct r600_context *rctx = (struct r600_context *)context;
160
161 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
162 pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
163
164 if (rctx->dummy_pixel_shader) {
165 rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader);
166 }
167 if (rctx->custom_dsa_flush) {
168 rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
169 }
170 if (rctx->custom_blend_resolve) {
171 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_resolve);
172 }
173 if (rctx->custom_blend_decompress) {
174 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_decompress);
175 }
176 if (rctx->custom_blend_fmask_decompress) {
177 rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_fmask_decompress);
178 }
179 util_unreference_framebuffer_state(&rctx->framebuffer.state);
180
181 r600_context_fini(rctx);
182
183 if (rctx->blitter) {
184 util_blitter_destroy(rctx->blitter);
185 }
186 if (rctx->uploader) {
187 u_upload_destroy(rctx->uploader);
188 }
189 if (rctx->allocator_so_filled_size) {
190 u_suballocator_destroy(rctx->allocator_so_filled_size);
191 }
192 if (rctx->allocator_fetch_shader) {
193 u_suballocator_destroy(rctx->allocator_fetch_shader);
194 }
195 util_slab_destroy(&rctx->pool_transfers);
196
197 r600_release_command_buffer(&rctx->start_cs_cmd);
198
199 if (rctx->cs) {
200 rctx->ws->cs_destroy(rctx->cs);
201 }
202
203 FREE(rctx->range);
204 FREE(rctx);
205 }
206
207 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
208 {
209 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
210 struct r600_screen* rscreen = (struct r600_screen *)screen;
211
212 if (rctx == NULL)
213 return NULL;
214
215 util_slab_create(&rctx->pool_transfers,
216 sizeof(struct r600_transfer), 64,
217 UTIL_SLAB_SINGLETHREADED);
218
219 rctx->context.screen = screen;
220 rctx->context.priv = priv;
221 rctx->context.destroy = r600_destroy_context;
222 rctx->context.flush = r600_flush_from_st;
223
224 /* Easy accessing of screen/winsys. */
225 rctx->screen = rscreen;
226 rctx->ws = rscreen->ws;
227 rctx->family = rscreen->family;
228 rctx->chip_class = rscreen->chip_class;
229 rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12;
230
231 LIST_INITHEAD(&rctx->active_nontimer_queries);
232 LIST_INITHEAD(&rctx->dirty);
233 LIST_INITHEAD(&rctx->enable_list);
234
235 rctx->range = CALLOC(NUM_RANGES, sizeof(struct r600_range));
236 if (!rctx->range)
237 goto fail;
238
239 r600_init_blit_functions(rctx);
240 r600_init_query_functions(rctx);
241 r600_init_context_resource_functions(rctx);
242 r600_init_surface_functions(rctx);
243
244
245 rctx->context.create_video_decoder = vl_create_decoder;
246 rctx->context.create_video_buffer = vl_video_buffer_create;
247
248 r600_init_common_state_functions(rctx);
249
250 switch (rctx->chip_class) {
251 case R600:
252 case R700:
253 r600_init_state_functions(rctx);
254 r600_init_atom_start_cs(rctx);
255 if (r600_context_init(rctx))
256 goto fail;
257 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
258 rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx)
259 : r600_create_resolve_blend(rctx);
260 rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
261 rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 ||
262 rctx->family == CHIP_RV620 ||
263 rctx->family == CHIP_RS780 ||
264 rctx->family == CHIP_RS880 ||
265 rctx->family == CHIP_RV710);
266 break;
267 case EVERGREEN:
268 case CAYMAN:
269 evergreen_init_state_functions(rctx);
270 evergreen_init_atom_start_cs(rctx);
271 evergreen_init_atom_start_compute_cs(rctx);
272 if (evergreen_context_init(rctx))
273 goto fail;
274 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
275 rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
276 rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
277 rctx->custom_blend_fmask_decompress = evergreen_create_fmask_decompress_blend(rctx);
278 rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR ||
279 rctx->family == CHIP_PALM ||
280 rctx->family == CHIP_SUMO ||
281 rctx->family == CHIP_SUMO2 ||
282 rctx->family == CHIP_CAICOS ||
283 rctx->family == CHIP_CAYMAN ||
284 rctx->family == CHIP_ARUBA);
285 break;
286 default:
287 R600_ERR("Unsupported chip class %d.\n", rctx->chip_class);
288 goto fail;
289 }
290
291 rctx->cs = rctx->ws->cs_create(rctx->ws);
292 rctx->ws->cs_set_flush_callback(rctx->cs, r600_flush_from_winsys, rctx);
293
294 rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
295 PIPE_BIND_INDEX_BUFFER |
296 PIPE_BIND_CONSTANT_BUFFER);
297 if (!rctx->uploader)
298 goto fail;
299
300 rctx->allocator_fetch_shader = u_suballocator_create(&rctx->context, 64 * 1024, 256,
301 0, PIPE_USAGE_STATIC, FALSE);
302 if (!rctx->allocator_fetch_shader)
303 goto fail;
304
305 rctx->allocator_so_filled_size = u_suballocator_create(&rctx->context, 4096, 4,
306 0, PIPE_USAGE_STATIC, TRUE);
307 if (!rctx->allocator_so_filled_size)
308 goto fail;
309
310 rctx->blitter = util_blitter_create(&rctx->context);
311 if (rctx->blitter == NULL)
312 goto fail;
313 util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
314 rctx->blitter->draw_rectangle = r600_draw_rectangle;
315
316 r600_begin_new_cs(rctx);
317 r600_get_backend_mask(rctx); /* this emits commands and must be last */
318
319 rctx->dummy_pixel_shader =
320 util_make_fragment_cloneinput_shader(&rctx->context, 0,
321 TGSI_SEMANTIC_GENERIC,
322 TGSI_INTERPOLATE_CONSTANT);
323 rctx->context.bind_fs_state(&rctx->context, rctx->dummy_pixel_shader);
324
325 return &rctx->context;
326
327 fail:
328 r600_destroy_context(&rctx->context);
329 return NULL;
330 }
331
332 /*
333 * pipe_screen
334 */
335 static const char* r600_get_vendor(struct pipe_screen* pscreen)
336 {
337 return "X.Org";
338 }
339
340 static const char *r600_get_family_name(enum radeon_family family)
341 {
342 switch(family) {
343 case CHIP_R600: return "AMD R600";
344 case CHIP_RV610: return "AMD RV610";
345 case CHIP_RV630: return "AMD RV630";
346 case CHIP_RV670: return "AMD RV670";
347 case CHIP_RV620: return "AMD RV620";
348 case CHIP_RV635: return "AMD RV635";
349 case CHIP_RS780: return "AMD RS780";
350 case CHIP_RS880: return "AMD RS880";
351 case CHIP_RV770: return "AMD RV770";
352 case CHIP_RV730: return "AMD RV730";
353 case CHIP_RV710: return "AMD RV710";
354 case CHIP_RV740: return "AMD RV740";
355 case CHIP_CEDAR: return "AMD CEDAR";
356 case CHIP_REDWOOD: return "AMD REDWOOD";
357 case CHIP_JUNIPER: return "AMD JUNIPER";
358 case CHIP_CYPRESS: return "AMD CYPRESS";
359 case CHIP_HEMLOCK: return "AMD HEMLOCK";
360 case CHIP_PALM: return "AMD PALM";
361 case CHIP_SUMO: return "AMD SUMO";
362 case CHIP_SUMO2: return "AMD SUMO2";
363 case CHIP_BARTS: return "AMD BARTS";
364 case CHIP_TURKS: return "AMD TURKS";
365 case CHIP_CAICOS: return "AMD CAICOS";
366 case CHIP_CAYMAN: return "AMD CAYMAN";
367 case CHIP_ARUBA: return "AMD ARUBA";
368 default: return "AMD unknown";
369 }
370 }
371
372 static const char* r600_get_name(struct pipe_screen* pscreen)
373 {
374 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
375
376 return r600_get_family_name(rscreen->family);
377 }
378
379 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
380 {
381 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
382 enum radeon_family family = rscreen->family;
383
384 switch (param) {
385 /* Supported features (boolean caps). */
386 case PIPE_CAP_NPOT_TEXTURES:
387 case PIPE_CAP_TWO_SIDED_STENCIL:
388 case PIPE_CAP_ANISOTROPIC_FILTER:
389 case PIPE_CAP_POINT_SPRITE:
390 case PIPE_CAP_OCCLUSION_QUERY:
391 case PIPE_CAP_TEXTURE_SHADOW_MAP:
392 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
393 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
394 case PIPE_CAP_TEXTURE_SWIZZLE:
395 case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
396 case PIPE_CAP_DEPTH_CLIP_DISABLE:
397 case PIPE_CAP_SHADER_STENCIL_EXPORT:
398 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
399 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
400 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
401 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
402 case PIPE_CAP_SM3:
403 case PIPE_CAP_SEAMLESS_CUBE_MAP:
404 case PIPE_CAP_PRIMITIVE_RESTART:
405 case PIPE_CAP_CONDITIONAL_RENDER:
406 case PIPE_CAP_TEXTURE_BARRIER:
407 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
408 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
409 case PIPE_CAP_TGSI_INSTANCEID:
410 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
411 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
412 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
413 case PIPE_CAP_USER_INDEX_BUFFERS:
414 case PIPE_CAP_USER_CONSTANT_BUFFERS:
415 case PIPE_CAP_COMPUTE:
416 case PIPE_CAP_START_INSTANCE:
417 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
418 return 1;
419
420 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
421 return R600_MAP_BUFFER_ALIGNMENT;
422
423 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
424 return 256;
425
426 case PIPE_CAP_GLSL_FEATURE_LEVEL:
427 return 130;
428
429 case PIPE_CAP_TEXTURE_MULTISAMPLE:
430 return rscreen->msaa_texture_support != MSAA_TEXTURE_SAMPLE_ZERO;
431
432 /* Supported except the original R600. */
433 case PIPE_CAP_INDEP_BLEND_ENABLE:
434 case PIPE_CAP_INDEP_BLEND_FUNC:
435 /* R600 doesn't support per-MRT blends */
436 return family == CHIP_R600 ? 0 : 1;
437
438 /* Supported on Evergreen. */
439 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
440 case PIPE_CAP_CUBE_MAP_ARRAY:
441 return family >= CHIP_CEDAR ? 1 : 0;
442
443 /* Unsupported features. */
444 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
445 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
446 case PIPE_CAP_SCALED_RESOLVE:
447 case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
448 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
449 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
450 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
451 case PIPE_CAP_USER_VERTEX_BUFFERS:
452 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
453 return 0;
454
455 /* Stream output. */
456 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
457 return rscreen->has_streamout ? 4 : 0;
458 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
459 return rscreen->has_streamout ? 1 : 0;
460 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
461 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
462 return 32*4;
463
464 /* Texturing. */
465 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
466 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
467 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
468 if (family >= CHIP_CEDAR)
469 return 15;
470 else
471 return 14;
472 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
473 return rscreen->info.drm_minor >= 9 ?
474 (family >= CHIP_CEDAR ? 16384 : 8192) : 0;
475 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
476 return 32;
477
478 /* Render targets. */
479 case PIPE_CAP_MAX_RENDER_TARGETS:
480 /* XXX some r6xx are buggy and can only do 4 */
481 return 8;
482
483 /* Timer queries, present when the clock frequency is non zero. */
484 case PIPE_CAP_QUERY_TIME_ELAPSED:
485 return rscreen->info.r600_clock_crystal_freq != 0;
486 case PIPE_CAP_QUERY_TIMESTAMP:
487 return rscreen->info.drm_minor >= 20 &&
488 rscreen->info.r600_clock_crystal_freq != 0;
489
490 case PIPE_CAP_MIN_TEXEL_OFFSET:
491 return -8;
492
493 case PIPE_CAP_MAX_TEXEL_OFFSET:
494 return 7;
495 }
496 return 0;
497 }
498
499 static float r600_get_paramf(struct pipe_screen* pscreen,
500 enum pipe_capf param)
501 {
502 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
503 enum radeon_family family = rscreen->family;
504
505 switch (param) {
506 case PIPE_CAPF_MAX_LINE_WIDTH:
507 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
508 case PIPE_CAPF_MAX_POINT_WIDTH:
509 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
510 if (family >= CHIP_CEDAR)
511 return 16384.0f;
512 else
513 return 8192.0f;
514 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
515 return 16.0f;
516 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
517 return 16.0f;
518 case PIPE_CAPF_GUARD_BAND_LEFT:
519 case PIPE_CAPF_GUARD_BAND_TOP:
520 case PIPE_CAPF_GUARD_BAND_RIGHT:
521 case PIPE_CAPF_GUARD_BAND_BOTTOM:
522 return 0.0f;
523 }
524 return 0.0f;
525 }
526
527 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
528 {
529 switch(shader)
530 {
531 case PIPE_SHADER_FRAGMENT:
532 case PIPE_SHADER_VERTEX:
533 case PIPE_SHADER_COMPUTE:
534 break;
535 case PIPE_SHADER_GEOMETRY:
536 /* XXX: support and enable geometry programs */
537 return 0;
538 default:
539 /* XXX: support tessellation on Evergreen */
540 return 0;
541 }
542
543 /* XXX: all these should be fixed, since r600 surely supports much more! */
544 switch (param) {
545 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
546 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
547 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
548 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
549 return 16384;
550 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
551 return 8; /* XXX */
552 case PIPE_SHADER_CAP_MAX_INPUTS:
553 return 32;
554 case PIPE_SHADER_CAP_MAX_TEMPS:
555 return 256; /* Max native temporaries. */
556 case PIPE_SHADER_CAP_MAX_ADDRS:
557 /* XXX Isn't this equal to TEMPS? */
558 return 1; /* Max native address registers */
559 case PIPE_SHADER_CAP_MAX_CONSTS:
560 return R600_MAX_CONST_BUFFER_SIZE;
561 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
562 return R600_MAX_USER_CONST_BUFFERS;
563 case PIPE_SHADER_CAP_MAX_PREDS:
564 return 0; /* nothing uses this */
565 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
566 return 1;
567 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
568 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
569 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
570 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
571 return 1;
572 case PIPE_SHADER_CAP_SUBROUTINES:
573 return 0;
574 case PIPE_SHADER_CAP_INTEGERS:
575 return 1;
576 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
577 return 16;
578 case PIPE_SHADER_CAP_PREFERRED_IR:
579 if (shader == PIPE_SHADER_COMPUTE) {
580 return PIPE_SHADER_IR_LLVM;
581 } else {
582 return PIPE_SHADER_IR_TGSI;
583 }
584 }
585 return 0;
586 }
587
588 static int r600_get_video_param(struct pipe_screen *screen,
589 enum pipe_video_profile profile,
590 enum pipe_video_cap param)
591 {
592 switch (param) {
593 case PIPE_VIDEO_CAP_SUPPORTED:
594 return vl_profile_supported(screen, profile);
595 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
596 return 1;
597 case PIPE_VIDEO_CAP_MAX_WIDTH:
598 case PIPE_VIDEO_CAP_MAX_HEIGHT:
599 return vl_video_buffer_max_size(screen);
600 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
601 return PIPE_FORMAT_NV12;
602 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
603 return false;
604 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
605 return false;
606 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
607 return true;
608 default:
609 return 0;
610 }
611 }
612
613 static int r600_get_compute_param(struct pipe_screen *screen,
614 enum pipe_compute_cap param,
615 void *ret)
616 {
617 //TODO: select these params by asic
618 switch (param) {
619 case PIPE_COMPUTE_CAP_IR_TARGET:
620 if (ret) {
621 strcpy(ret, "r600--");
622 }
623 return 7 * sizeof(char);
624
625 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
626 if (ret) {
627 uint64_t * grid_dimension = ret;
628 grid_dimension[0] = 3;
629 }
630 return 1 * sizeof(uint64_t);
631
632 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
633 if (ret) {
634 uint64_t * grid_size = ret;
635 grid_size[0] = 65535;
636 grid_size[1] = 65535;
637 grid_size[2] = 1;
638 }
639 return 3 * sizeof(uint64_t) ;
640
641 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
642 if (ret) {
643 uint64_t * block_size = ret;
644 block_size[0] = 256;
645 block_size[1] = 256;
646 block_size[2] = 256;
647 }
648 return 3 * sizeof(uint64_t);
649
650 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
651 if (ret) {
652 uint64_t * max_threads_per_block = ret;
653 *max_threads_per_block = 256;
654 }
655 return sizeof(uint64_t);
656
657 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
658 if (ret) {
659 uint64_t * max_global_size = ret;
660 /* XXX: This is what the proprietary driver reports, we
661 * may want to use a different value. */
662 *max_global_size = 201326592;
663 }
664 return sizeof(uint64_t);
665
666 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
667 if (ret) {
668 uint64_t * max_input_size = ret;
669 *max_input_size = 1024;
670 }
671 return sizeof(uint64_t);
672
673 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
674 if (ret) {
675 uint64_t * max_local_size = ret;
676 /* XXX: This is what the proprietary driver reports, we
677 * may want to use a different value. */
678 *max_local_size = 32768;
679 }
680 return sizeof(uint64_t);
681
682 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
683 if (ret) {
684 uint64_t max_global_size;
685 uint64_t * max_mem_alloc_size = ret;
686 r600_get_compute_param(screen,
687 PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE,
688 &max_global_size);
689 /* OpenCL requres this value be at least
690 * max(MAX_GLOBAL_SIZE / 4, 128 * 1024 *1024)
691 * I'm really not sure what value to report here, but
692 * MAX_GLOBAL_SIZE / 4 seems resonable.
693 */
694 *max_mem_alloc_size = max_global_size / 4;
695 }
696 return sizeof(uint64_t);
697
698 default:
699 fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
700 return 0;
701 }
702 }
703
704 static void r600_destroy_screen(struct pipe_screen* pscreen)
705 {
706 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
707
708 if (rscreen == NULL)
709 return;
710
711 if (rscreen->global_pool) {
712 compute_memory_pool_delete(rscreen->global_pool);
713 }
714
715 if (rscreen->fences.bo) {
716 struct r600_fence_block *entry, *tmp;
717
718 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
719 LIST_DEL(&entry->head);
720 FREE(entry);
721 }
722
723 rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
724 pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
725 }
726 #if R600_TRACE_CS
727 if (rscreen->trace_bo) {
728 rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
729 pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
730 }
731 #endif
732 pipe_mutex_destroy(rscreen->fences.mutex);
733
734 rscreen->ws->destroy(rscreen->ws);
735 FREE(rscreen);
736 }
737
738 static void r600_fence_reference(struct pipe_screen *pscreen,
739 struct pipe_fence_handle **ptr,
740 struct pipe_fence_handle *fence)
741 {
742 struct r600_fence **oldf = (struct r600_fence**)ptr;
743 struct r600_fence *newf = (struct r600_fence*)fence;
744
745 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
746 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
747 pipe_mutex_lock(rscreen->fences.mutex);
748 pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL);
749 LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
750 pipe_mutex_unlock(rscreen->fences.mutex);
751 }
752
753 *ptr = fence;
754 }
755
756 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
757 struct pipe_fence_handle *fence)
758 {
759 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
760 struct r600_fence *rfence = (struct r600_fence*)fence;
761
762 return rscreen->fences.data[rfence->index] != 0;
763 }
764
765 static boolean r600_fence_finish(struct pipe_screen *pscreen,
766 struct pipe_fence_handle *fence,
767 uint64_t timeout)
768 {
769 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
770 struct r600_fence *rfence = (struct r600_fence*)fence;
771 int64_t start_time = 0;
772 unsigned spins = 0;
773
774 if (timeout != PIPE_TIMEOUT_INFINITE) {
775 start_time = os_time_get();
776
777 /* Convert to microseconds. */
778 timeout /= 1000;
779 }
780
781 while (rscreen->fences.data[rfence->index] == 0) {
782 /* Special-case infinite timeout - wait for the dummy BO to become idle */
783 if (timeout == PIPE_TIMEOUT_INFINITE) {
784 rscreen->ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
785 break;
786 }
787
788 /* The dummy BO will be busy until the CS including the fence has completed, or
789 * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
790 if (!rscreen->ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
791 break;
792
793 if (++spins % 256)
794 continue;
795 #ifdef PIPE_OS_UNIX
796 sched_yield();
797 #else
798 os_time_sleep(10);
799 #endif
800 if (timeout != PIPE_TIMEOUT_INFINITE &&
801 os_time_get() - start_time >= timeout) {
802 break;
803 }
804 }
805
806 return rscreen->fences.data[rfence->index] != 0;
807 }
808
809 static int r600_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
810 {
811 switch ((tiling_config & 0xe) >> 1) {
812 case 0:
813 rscreen->tiling_info.num_channels = 1;
814 break;
815 case 1:
816 rscreen->tiling_info.num_channels = 2;
817 break;
818 case 2:
819 rscreen->tiling_info.num_channels = 4;
820 break;
821 case 3:
822 rscreen->tiling_info.num_channels = 8;
823 break;
824 default:
825 return -EINVAL;
826 }
827
828 switch ((tiling_config & 0x30) >> 4) {
829 case 0:
830 rscreen->tiling_info.num_banks = 4;
831 break;
832 case 1:
833 rscreen->tiling_info.num_banks = 8;
834 break;
835 default:
836 return -EINVAL;
837
838 }
839 switch ((tiling_config & 0xc0) >> 6) {
840 case 0:
841 rscreen->tiling_info.group_bytes = 256;
842 break;
843 case 1:
844 rscreen->tiling_info.group_bytes = 512;
845 break;
846 default:
847 return -EINVAL;
848 }
849 return 0;
850 }
851
852 static int evergreen_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
853 {
854 switch (tiling_config & 0xf) {
855 case 0:
856 rscreen->tiling_info.num_channels = 1;
857 break;
858 case 1:
859 rscreen->tiling_info.num_channels = 2;
860 break;
861 case 2:
862 rscreen->tiling_info.num_channels = 4;
863 break;
864 case 3:
865 rscreen->tiling_info.num_channels = 8;
866 break;
867 default:
868 return -EINVAL;
869 }
870
871 switch ((tiling_config & 0xf0) >> 4) {
872 case 0:
873 rscreen->tiling_info.num_banks = 4;
874 break;
875 case 1:
876 rscreen->tiling_info.num_banks = 8;
877 break;
878 case 2:
879 rscreen->tiling_info.num_banks = 16;
880 break;
881 default:
882 return -EINVAL;
883 }
884
885 switch ((tiling_config & 0xf00) >> 8) {
886 case 0:
887 rscreen->tiling_info.group_bytes = 256;
888 break;
889 case 1:
890 rscreen->tiling_info.group_bytes = 512;
891 break;
892 default:
893 return -EINVAL;
894 }
895 return 0;
896 }
897
898 static int r600_init_tiling(struct r600_screen *rscreen)
899 {
900 uint32_t tiling_config = rscreen->info.r600_tiling_config;
901
902 /* set default group bytes, overridden by tiling info ioctl */
903 if (rscreen->chip_class <= R700) {
904 rscreen->tiling_info.group_bytes = 256;
905 } else {
906 rscreen->tiling_info.group_bytes = 512;
907 }
908
909 if (!tiling_config)
910 return 0;
911
912 if (rscreen->chip_class <= R700) {
913 return r600_interpret_tiling(rscreen, tiling_config);
914 } else {
915 return evergreen_interpret_tiling(rscreen, tiling_config);
916 }
917 }
918
919 static unsigned radeon_family_from_device(unsigned device)
920 {
921 switch (device) {
922 #define CHIPSET(pciid, name, family) case pciid: return CHIP_##family;
923 #include "pci_ids/r600_pci_ids.h"
924 #undef CHIPSET
925 default:
926 return CHIP_UNKNOWN;
927 }
928 }
929
930 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
931 {
932 struct r600_screen *rscreen = (struct r600_screen*)screen;
933
934 return 1000000 * rscreen->ws->query_timestamp(rscreen->ws) /
935 rscreen->info.r600_clock_crystal_freq;
936 }
937
938 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
939 {
940 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
941
942 if (rscreen == NULL) {
943 return NULL;
944 }
945
946 rscreen->ws = ws;
947 ws->query_info(ws, &rscreen->info);
948
949 rscreen->family = radeon_family_from_device(rscreen->info.pci_id);
950 if (rscreen->family == CHIP_UNKNOWN) {
951 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id);
952 FREE(rscreen);
953 return NULL;
954 }
955
956 /* setup class */
957 if (rscreen->family >= CHIP_CAYMAN) {
958 rscreen->chip_class = CAYMAN;
959 } else if (rscreen->family >= CHIP_CEDAR) {
960 rscreen->chip_class = EVERGREEN;
961 } else if (rscreen->family >= CHIP_RV770) {
962 rscreen->chip_class = R700;
963 } else {
964 rscreen->chip_class = R600;
965 }
966
967 /* Figure out streamout kernel support. */
968 switch (rscreen->chip_class) {
969 case R600:
970 if (rscreen->family < CHIP_RS780) {
971 rscreen->has_streamout = rscreen->info.drm_minor >= 14;
972 } else {
973 rscreen->has_streamout = rscreen->info.drm_minor >= 23;
974 }
975 break;
976 case R700:
977 rscreen->has_streamout = rscreen->info.drm_minor >= 17;
978 break;
979 case EVERGREEN:
980 case CAYMAN:
981 rscreen->has_streamout = rscreen->info.drm_minor >= 14;
982 break;
983 }
984
985 /* MSAA support. */
986 switch (rscreen->chip_class) {
987 case R600:
988 case R700:
989 rscreen->has_msaa = rscreen->info.drm_minor >= 22;
990 rscreen->msaa_texture_support = MSAA_TEXTURE_DECOMPRESSED;
991 break;
992 case EVERGREEN:
993 rscreen->has_msaa = rscreen->info.drm_minor >= 19;
994 rscreen->msaa_texture_support =
995 rscreen->info.drm_minor >= 24 ? MSAA_TEXTURE_COMPRESSED :
996 MSAA_TEXTURE_DECOMPRESSED;
997 break;
998 case CAYMAN:
999 rscreen->has_msaa = rscreen->info.drm_minor >= 19;
1000 /* We should be able to read compressed MSAA textures, but it doesn't work. */
1001 rscreen->msaa_texture_support = MSAA_TEXTURE_SAMPLE_ZERO;
1002 break;
1003 }
1004
1005 if (r600_init_tiling(rscreen)) {
1006 FREE(rscreen);
1007 return NULL;
1008 }
1009
1010 rscreen->screen.destroy = r600_destroy_screen;
1011 rscreen->screen.get_name = r600_get_name;
1012 rscreen->screen.get_vendor = r600_get_vendor;
1013 rscreen->screen.get_param = r600_get_param;
1014 rscreen->screen.get_shader_param = r600_get_shader_param;
1015 rscreen->screen.get_paramf = r600_get_paramf;
1016 rscreen->screen.get_video_param = r600_get_video_param;
1017 rscreen->screen.get_compute_param = r600_get_compute_param;
1018 rscreen->screen.get_timestamp = r600_get_timestamp;
1019
1020 if (rscreen->chip_class >= EVERGREEN) {
1021 rscreen->screen.is_format_supported = evergreen_is_format_supported;
1022 } else {
1023 rscreen->screen.is_format_supported = r600_is_format_supported;
1024 }
1025 rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
1026 rscreen->screen.context_create = r600_create_context;
1027 rscreen->screen.fence_reference = r600_fence_reference;
1028 rscreen->screen.fence_signalled = r600_fence_signalled;
1029 rscreen->screen.fence_finish = r600_fence_finish;
1030 r600_init_screen_resource_functions(&rscreen->screen);
1031
1032 util_format_s3tc_init();
1033
1034 rscreen->fences.bo = NULL;
1035 rscreen->fences.data = NULL;
1036 rscreen->fences.next_index = 0;
1037 LIST_INITHEAD(&rscreen->fences.pool);
1038 LIST_INITHEAD(&rscreen->fences.blocks);
1039 pipe_mutex_init(rscreen->fences.mutex);
1040
1041 /* Hyperz is very lockup prone any code that touch related part should be
1042 * carefully tested especialy on r6xx/r7xx Development show that some piglit
1043 * case were triggering lockup quickly such as :
1044 * piglit/bin/depthstencil-render-miplevels 1024 d=s=z24_s8
1045 */
1046 rscreen->use_hyperz = debug_get_bool_option("R600_HYPERZ", TRUE);
1047 rscreen->use_hyperz = rscreen->info.drm_minor >= 26 ? rscreen->use_hyperz : FALSE;
1048
1049 rscreen->global_pool = compute_memory_pool_new(rscreen);
1050
1051 #if R600_TRACE_CS
1052 rscreen->cs_count = 0;
1053 if (rscreen->info.drm_minor >= 28) {
1054 rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->screen,
1055 PIPE_BIND_CUSTOM,
1056 PIPE_USAGE_STAGING,
1057 4096);
1058 if (rscreen->trace_bo) {
1059 rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
1060 PIPE_TRANSFER_UNSYNCHRONIZED);
1061 }
1062 }
1063 #endif
1064
1065 return &rscreen->screen;
1066 }