r600g: remove slab allocator for pipe_resource (used mainly for user buffers)
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_pipe.h"
24 #include "r600_public.h"
25
26 #include <errno.h>
27 #include "pipe/p_shader_tokens.h"
28 #include "util/u_blitter.h"
29 #include "util/u_format_s3tc.h"
30 #include "util/u_simple_shaders.h"
31 #include "util/u_upload_mgr.h"
32 #include "vl/vl_decoder.h"
33 #include "vl/vl_video_buffer.h"
34 #include "os/os_time.h"
35
36 /*
37 * pipe_context
38 */
39 static struct r600_fence *r600_create_fence(struct r600_context *rctx)
40 {
41 struct r600_screen *rscreen = rctx->screen;
42 struct r600_fence *fence = NULL;
43
44 pipe_mutex_lock(rscreen->fences.mutex);
45
46 if (!rscreen->fences.bo) {
47 /* Create the shared buffer object */
48 rscreen->fences.bo = (struct r600_resource*)
49 pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM,
50 PIPE_USAGE_STAGING, 4096);
51 if (!rscreen->fences.bo) {
52 R600_ERR("r600: failed to create bo for fence objects\n");
53 goto out;
54 }
55 rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
56 rctx->cs,
57 PIPE_TRANSFER_READ_WRITE);
58 }
59
60 if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
61 struct r600_fence *entry;
62
63 /* Try to find a freed fence that has been signalled */
64 LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
65 if (rscreen->fences.data[entry->index] != 0) {
66 LIST_DELINIT(&entry->head);
67 fence = entry;
68 break;
69 }
70 }
71 }
72
73 if (!fence) {
74 /* Allocate a new fence */
75 struct r600_fence_block *block;
76 unsigned index;
77
78 if ((rscreen->fences.next_index + 1) >= 1024) {
79 R600_ERR("r600: too many concurrent fences\n");
80 goto out;
81 }
82
83 index = rscreen->fences.next_index++;
84
85 if (!(index % FENCE_BLOCK_SIZE)) {
86 /* Allocate a new block */
87 block = CALLOC_STRUCT(r600_fence_block);
88 if (block == NULL)
89 goto out;
90
91 LIST_ADD(&block->head, &rscreen->fences.blocks);
92 } else {
93 block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
94 }
95
96 fence = &block->fences[index % FENCE_BLOCK_SIZE];
97 fence->index = index;
98 }
99
100 pipe_reference_init(&fence->reference, 1);
101
102 rscreen->fences.data[fence->index] = 0;
103 r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
104
105 /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
106 fence->sleep_bo = (struct r600_resource*)
107 pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM,
108 PIPE_USAGE_STAGING, 1);
109 /* Add the fence as a dummy relocation. */
110 r600_context_bo_reloc(rctx, fence->sleep_bo, RADEON_USAGE_READWRITE);
111
112 out:
113 pipe_mutex_unlock(rscreen->fences.mutex);
114 return fence;
115 }
116
117
118 void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
119 unsigned flags)
120 {
121 struct r600_context *rctx = (struct r600_context *)ctx;
122 struct r600_fence **rfence = (struct r600_fence**)fence;
123 struct pipe_query *render_cond = NULL;
124 unsigned render_cond_mode = 0;
125
126 if (rfence)
127 *rfence = r600_create_fence(rctx);
128
129 /* Disable render condition. */
130 if (rctx->current_render_cond) {
131 render_cond = rctx->current_render_cond;
132 render_cond_mode = rctx->current_render_cond_mode;
133 ctx->render_condition(ctx, NULL, 0);
134 }
135
136 r600_context_flush(rctx, flags);
137
138 /* Re-enable render condition. */
139 if (render_cond) {
140 ctx->render_condition(ctx, render_cond, render_cond_mode);
141 }
142 }
143
144 static void r600_flush_from_st(struct pipe_context *ctx,
145 struct pipe_fence_handle **fence)
146 {
147 r600_flush(ctx, fence, 0);
148 }
149
150 static void r600_flush_from_winsys(void *ctx, unsigned flags)
151 {
152 r600_flush((struct pipe_context*)ctx, NULL, flags);
153 }
154
155 static void r600_destroy_context(struct pipe_context *context)
156 {
157 struct r600_context *rctx = (struct r600_context *)context;
158
159 if (rctx->dummy_pixel_shader) {
160 rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader);
161 }
162 if (rctx->custom_dsa_flush) {
163 rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
164 }
165 util_unreference_framebuffer_state(&rctx->framebuffer);
166
167 r600_context_fini(rctx);
168
169 if (rctx->blitter) {
170 util_blitter_destroy(rctx->blitter);
171 }
172 for (int i = 0; i < R600_PIPE_NSTATES; i++) {
173 free(rctx->states[i]);
174 }
175
176 if (rctx->uploader) {
177 u_upload_destroy(rctx->uploader);
178 }
179 util_slab_destroy(&rctx->pool_transfers);
180
181 r600_release_command_buffer(&rctx->start_cs_cmd);
182
183 if (rctx->cs) {
184 rctx->ws->cs_destroy(rctx->cs);
185 }
186
187 FREE(rctx->range);
188 FREE(rctx);
189 }
190
191 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
192 {
193 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
194 struct r600_screen* rscreen = (struct r600_screen *)screen;
195
196 if (rctx == NULL)
197 return NULL;
198
199 util_slab_create(&rctx->pool_transfers,
200 sizeof(struct pipe_transfer), 64,
201 UTIL_SLAB_SINGLETHREADED);
202
203 rctx->context.screen = screen;
204 rctx->context.priv = priv;
205 rctx->context.destroy = r600_destroy_context;
206 rctx->context.flush = r600_flush_from_st;
207
208 /* Easy accessing of screen/winsys. */
209 rctx->screen = rscreen;
210 rctx->ws = rscreen->ws;
211 rctx->family = rscreen->family;
212 rctx->chip_class = rscreen->chip_class;
213
214 LIST_INITHEAD(&rctx->dirty_states);
215 LIST_INITHEAD(&rctx->active_timer_queries);
216 LIST_INITHEAD(&rctx->active_nontimer_queries);
217 LIST_INITHEAD(&rctx->dirty);
218 LIST_INITHEAD(&rctx->resource_dirty);
219 LIST_INITHEAD(&rctx->enable_list);
220
221 rctx->range = CALLOC(NUM_RANGES, sizeof(struct r600_range));
222 if (!rctx->range)
223 goto fail;
224
225 r600_init_blit_functions(rctx);
226 r600_init_query_functions(rctx);
227 r600_init_context_resource_functions(rctx);
228 r600_init_surface_functions(rctx);
229 rctx->context.draw_vbo = r600_draw_vbo;
230
231 rctx->context.create_video_decoder = vl_create_decoder;
232 rctx->context.create_video_buffer = vl_video_buffer_create;
233
234 r600_init_common_atoms(rctx);
235
236 switch (rctx->chip_class) {
237 case R600:
238 case R700:
239 r600_init_state_functions(rctx);
240 r600_init_atom_start_cs(rctx);
241 if (r600_context_init(rctx))
242 goto fail;
243 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
244 rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 ||
245 rctx->family == CHIP_RV620 ||
246 rctx->family == CHIP_RS780 ||
247 rctx->family == CHIP_RS880 ||
248 rctx->family == CHIP_RV710);
249 break;
250 case EVERGREEN:
251 case CAYMAN:
252 evergreen_init_state_functions(rctx);
253 evergreen_init_atom_start_cs(rctx);
254 if (evergreen_context_init(rctx))
255 goto fail;
256 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
257 rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR ||
258 rctx->family == CHIP_PALM ||
259 rctx->family == CHIP_SUMO ||
260 rctx->family == CHIP_SUMO2 ||
261 rctx->family == CHIP_CAICOS ||
262 rctx->family == CHIP_CAYMAN ||
263 rctx->family == CHIP_ARUBA);
264 break;
265 default:
266 R600_ERR("Unsupported chip class %d.\n", rctx->chip_class);
267 goto fail;
268 }
269
270 rctx->cs = rctx->ws->cs_create(rctx->ws);
271 rctx->ws->cs_set_flush_callback(rctx->cs, r600_flush_from_winsys, rctx);
272 r600_emit_atom(rctx, &rctx->start_cs_cmd.atom);
273
274 rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
275 PIPE_BIND_INDEX_BUFFER |
276 PIPE_BIND_CONSTANT_BUFFER);
277 if (!rctx->uploader)
278 goto fail;
279
280 rctx->blitter = util_blitter_create(&rctx->context);
281 if (rctx->blitter == NULL)
282 goto fail;
283
284 r600_get_backend_mask(rctx); /* this emits commands and must be last */
285
286 if (rctx->chip_class == R600)
287 r600_set_max_scissor(rctx);
288
289 rctx->dummy_pixel_shader =
290 util_make_fragment_cloneinput_shader(&rctx->context, 0,
291 TGSI_SEMANTIC_GENERIC,
292 TGSI_INTERPOLATE_CONSTANT);
293 rctx->context.bind_fs_state(&rctx->context, rctx->dummy_pixel_shader);
294
295 return &rctx->context;
296
297 fail:
298 r600_destroy_context(&rctx->context);
299 return NULL;
300 }
301
302 /*
303 * pipe_screen
304 */
305 static const char* r600_get_vendor(struct pipe_screen* pscreen)
306 {
307 return "X.Org";
308 }
309
310 static const char *r600_get_family_name(enum radeon_family family)
311 {
312 switch(family) {
313 case CHIP_R600: return "AMD R600";
314 case CHIP_RV610: return "AMD RV610";
315 case CHIP_RV630: return "AMD RV630";
316 case CHIP_RV670: return "AMD RV670";
317 case CHIP_RV620: return "AMD RV620";
318 case CHIP_RV635: return "AMD RV635";
319 case CHIP_RS780: return "AMD RS780";
320 case CHIP_RS880: return "AMD RS880";
321 case CHIP_RV770: return "AMD RV770";
322 case CHIP_RV730: return "AMD RV730";
323 case CHIP_RV710: return "AMD RV710";
324 case CHIP_RV740: return "AMD RV740";
325 case CHIP_CEDAR: return "AMD CEDAR";
326 case CHIP_REDWOOD: return "AMD REDWOOD";
327 case CHIP_JUNIPER: return "AMD JUNIPER";
328 case CHIP_CYPRESS: return "AMD CYPRESS";
329 case CHIP_HEMLOCK: return "AMD HEMLOCK";
330 case CHIP_PALM: return "AMD PALM";
331 case CHIP_SUMO: return "AMD SUMO";
332 case CHIP_SUMO2: return "AMD SUMO2";
333 case CHIP_BARTS: return "AMD BARTS";
334 case CHIP_TURKS: return "AMD TURKS";
335 case CHIP_CAICOS: return "AMD CAICOS";
336 case CHIP_CAYMAN: return "AMD CAYMAN";
337 case CHIP_ARUBA: return "AMD ARUBA";
338 default: return "AMD unknown";
339 }
340 }
341
342 static const char* r600_get_name(struct pipe_screen* pscreen)
343 {
344 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
345
346 return r600_get_family_name(rscreen->family);
347 }
348
349 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
350 {
351 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
352 enum radeon_family family = rscreen->family;
353
354 switch (param) {
355 /* Supported features (boolean caps). */
356 case PIPE_CAP_NPOT_TEXTURES:
357 case PIPE_CAP_TWO_SIDED_STENCIL:
358 case PIPE_CAP_ANISOTROPIC_FILTER:
359 case PIPE_CAP_POINT_SPRITE:
360 case PIPE_CAP_OCCLUSION_QUERY:
361 case PIPE_CAP_TEXTURE_SHADOW_MAP:
362 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
363 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
364 case PIPE_CAP_TEXTURE_SWIZZLE:
365 case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
366 case PIPE_CAP_DEPTH_CLIP_DISABLE:
367 case PIPE_CAP_SHADER_STENCIL_EXPORT:
368 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
369 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
370 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
371 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
372 case PIPE_CAP_SM3:
373 case PIPE_CAP_SEAMLESS_CUBE_MAP:
374 case PIPE_CAP_PRIMITIVE_RESTART:
375 case PIPE_CAP_CONDITIONAL_RENDER:
376 case PIPE_CAP_TEXTURE_BARRIER:
377 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
378 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
379 case PIPE_CAP_TGSI_INSTANCEID:
380 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
381 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
382 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
383 case PIPE_CAP_USER_INDEX_BUFFERS:
384 case PIPE_CAP_USER_CONSTANT_BUFFERS:
385 return 1;
386
387 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
388 return 256;
389
390 case PIPE_CAP_GLSL_FEATURE_LEVEL:
391 return rscreen->glsl_feature_level;
392
393 /* Supported except the original R600. */
394 case PIPE_CAP_INDEP_BLEND_ENABLE:
395 case PIPE_CAP_INDEP_BLEND_FUNC:
396 /* R600 doesn't support per-MRT blends */
397 return family == CHIP_R600 ? 0 : 1;
398
399 /* Supported on Evergreen. */
400 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
401 return family >= CHIP_CEDAR ? 1 : 0;
402
403 /* Unsupported features. */
404 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
405 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
406 case PIPE_CAP_SCALED_RESOLVE:
407 case PIPE_CAP_TGSI_CAN_COMPACT_VARYINGS:
408 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
409 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
410 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
411 case PIPE_CAP_USER_VERTEX_BUFFERS:
412 case PIPE_CAP_COMPUTE:
413 return 0;
414
415 /* Stream output. */
416 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
417 return rscreen->info.r600_has_streamout ? 4 : 0;
418 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
419 return rscreen->info.r600_has_streamout ? 1 : 0;
420 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
421 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
422 return 16*4;
423
424 /* Texturing. */
425 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
426 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
427 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
428 if (family >= CHIP_CEDAR)
429 return 15;
430 else
431 return 14;
432 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
433 return rscreen->info.drm_minor >= 9 ?
434 (family >= CHIP_CEDAR ? 16384 : 8192) : 0;
435 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
436 return 32;
437
438 /* Render targets. */
439 case PIPE_CAP_MAX_RENDER_TARGETS:
440 /* XXX some r6xx are buggy and can only do 4 */
441 return 8;
442
443 /* Timer queries, present when the clock frequency is non zero. */
444 case PIPE_CAP_TIMER_QUERY:
445 return rscreen->info.r600_clock_crystal_freq != 0;
446
447 case PIPE_CAP_MIN_TEXEL_OFFSET:
448 return -8;
449
450 case PIPE_CAP_MAX_TEXEL_OFFSET:
451 return 7;
452
453 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
454 return (family < CHIP_RV770) ? 1 : 0;
455 }
456 return 0;
457 }
458
459 static float r600_get_paramf(struct pipe_screen* pscreen,
460 enum pipe_capf param)
461 {
462 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
463 enum radeon_family family = rscreen->family;
464
465 switch (param) {
466 case PIPE_CAPF_MAX_LINE_WIDTH:
467 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
468 case PIPE_CAPF_MAX_POINT_WIDTH:
469 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
470 if (family >= CHIP_CEDAR)
471 return 16384.0f;
472 else
473 return 8192.0f;
474 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
475 return 16.0f;
476 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
477 return 16.0f;
478 case PIPE_CAPF_GUARD_BAND_LEFT:
479 case PIPE_CAPF_GUARD_BAND_TOP:
480 case PIPE_CAPF_GUARD_BAND_RIGHT:
481 case PIPE_CAPF_GUARD_BAND_BOTTOM:
482 return 0.0f;
483 }
484 return 0.0f;
485 }
486
487 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
488 {
489 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
490 switch(shader)
491 {
492 case PIPE_SHADER_FRAGMENT:
493 case PIPE_SHADER_VERTEX:
494 break;
495 case PIPE_SHADER_GEOMETRY:
496 /* XXX: support and enable geometry programs */
497 return 0;
498 default:
499 /* XXX: support tessellation on Evergreen */
500 return 0;
501 }
502
503 /* XXX: all these should be fixed, since r600 surely supports much more! */
504 switch (param) {
505 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
506 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
507 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
508 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
509 return 16384;
510 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
511 return 8; /* XXX */
512 case PIPE_SHADER_CAP_MAX_INPUTS:
513 if(shader == PIPE_SHADER_FRAGMENT)
514 return 34;
515 else
516 return 32;
517 case PIPE_SHADER_CAP_MAX_TEMPS:
518 return 256; /* Max native temporaries. */
519 case PIPE_SHADER_CAP_MAX_ADDRS:
520 /* XXX Isn't this equal to TEMPS? */
521 return 1; /* Max native address registers */
522 case PIPE_SHADER_CAP_MAX_CONSTS:
523 return R600_MAX_CONST_BUFFER_SIZE;
524 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
525 return R600_MAX_CONST_BUFFERS-1;
526 case PIPE_SHADER_CAP_MAX_PREDS:
527 return 0; /* nothing uses this */
528 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
529 return 1;
530 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
531 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
532 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
533 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
534 return 1;
535 case PIPE_SHADER_CAP_SUBROUTINES:
536 return 0;
537 case PIPE_SHADER_CAP_INTEGERS:
538 return rscreen->glsl_feature_level >= 130;
539 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
540 return 16;
541 case PIPE_SHADER_CAP_PREFERRED_IR:
542 return PIPE_SHADER_IR_TGSI;
543 }
544 return 0;
545 }
546
547 static int r600_get_video_param(struct pipe_screen *screen,
548 enum pipe_video_profile profile,
549 enum pipe_video_cap param)
550 {
551 switch (param) {
552 case PIPE_VIDEO_CAP_SUPPORTED:
553 return vl_profile_supported(screen, profile);
554 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
555 return 1;
556 case PIPE_VIDEO_CAP_MAX_WIDTH:
557 case PIPE_VIDEO_CAP_MAX_HEIGHT:
558 return vl_video_buffer_max_size(screen);
559 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
560 return PIPE_FORMAT_NV12;
561 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
562 return false;
563 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
564 return false;
565 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
566 return true;
567 default:
568 return 0;
569 }
570 }
571
572 static void r600_destroy_screen(struct pipe_screen* pscreen)
573 {
574 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
575
576 if (rscreen == NULL)
577 return;
578
579 if (rscreen->fences.bo) {
580 struct r600_fence_block *entry, *tmp;
581
582 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
583 LIST_DEL(&entry->head);
584 FREE(entry);
585 }
586
587 rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
588 pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
589 }
590 pipe_mutex_destroy(rscreen->fences.mutex);
591
592 rscreen->ws->destroy(rscreen->ws);
593 FREE(rscreen);
594 }
595
596 static void r600_fence_reference(struct pipe_screen *pscreen,
597 struct pipe_fence_handle **ptr,
598 struct pipe_fence_handle *fence)
599 {
600 struct r600_fence **oldf = (struct r600_fence**)ptr;
601 struct r600_fence *newf = (struct r600_fence*)fence;
602
603 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
604 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
605 pipe_mutex_lock(rscreen->fences.mutex);
606 pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL);
607 LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
608 pipe_mutex_unlock(rscreen->fences.mutex);
609 }
610
611 *ptr = fence;
612 }
613
614 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
615 struct pipe_fence_handle *fence)
616 {
617 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
618 struct r600_fence *rfence = (struct r600_fence*)fence;
619
620 return rscreen->fences.data[rfence->index];
621 }
622
623 static boolean r600_fence_finish(struct pipe_screen *pscreen,
624 struct pipe_fence_handle *fence,
625 uint64_t timeout)
626 {
627 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
628 struct r600_fence *rfence = (struct r600_fence*)fence;
629 int64_t start_time = 0;
630 unsigned spins = 0;
631
632 if (timeout != PIPE_TIMEOUT_INFINITE) {
633 start_time = os_time_get();
634
635 /* Convert to microseconds. */
636 timeout /= 1000;
637 }
638
639 while (rscreen->fences.data[rfence->index] == 0) {
640 /* Special-case infinite timeout - wait for the dummy BO to become idle */
641 if (timeout == PIPE_TIMEOUT_INFINITE) {
642 rscreen->ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
643 break;
644 }
645
646 /* The dummy BO will be busy until the CS including the fence has completed, or
647 * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
648 if (!rscreen->ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
649 break;
650
651 if (++spins % 256)
652 continue;
653 #ifdef PIPE_OS_UNIX
654 sched_yield();
655 #else
656 os_time_sleep(10);
657 #endif
658 if (timeout != PIPE_TIMEOUT_INFINITE &&
659 os_time_get() - start_time >= timeout) {
660 break;
661 }
662 }
663
664 return rscreen->fences.data[rfence->index] != 0;
665 }
666
667 static int r600_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
668 {
669 switch ((tiling_config & 0xe) >> 1) {
670 case 0:
671 rscreen->tiling_info.num_channels = 1;
672 break;
673 case 1:
674 rscreen->tiling_info.num_channels = 2;
675 break;
676 case 2:
677 rscreen->tiling_info.num_channels = 4;
678 break;
679 case 3:
680 rscreen->tiling_info.num_channels = 8;
681 break;
682 default:
683 return -EINVAL;
684 }
685
686 switch ((tiling_config & 0x30) >> 4) {
687 case 0:
688 rscreen->tiling_info.num_banks = 4;
689 break;
690 case 1:
691 rscreen->tiling_info.num_banks = 8;
692 break;
693 default:
694 return -EINVAL;
695
696 }
697 switch ((tiling_config & 0xc0) >> 6) {
698 case 0:
699 rscreen->tiling_info.group_bytes = 256;
700 break;
701 case 1:
702 rscreen->tiling_info.group_bytes = 512;
703 break;
704 default:
705 return -EINVAL;
706 }
707 return 0;
708 }
709
710 static int evergreen_interpret_tiling(struct r600_screen *rscreen, uint32_t tiling_config)
711 {
712 switch (tiling_config & 0xf) {
713 case 0:
714 rscreen->tiling_info.num_channels = 1;
715 break;
716 case 1:
717 rscreen->tiling_info.num_channels = 2;
718 break;
719 case 2:
720 rscreen->tiling_info.num_channels = 4;
721 break;
722 case 3:
723 rscreen->tiling_info.num_channels = 8;
724 break;
725 default:
726 return -EINVAL;
727 }
728
729 switch ((tiling_config & 0xf0) >> 4) {
730 case 0:
731 rscreen->tiling_info.num_banks = 4;
732 break;
733 case 1:
734 rscreen->tiling_info.num_banks = 8;
735 break;
736 case 2:
737 rscreen->tiling_info.num_banks = 16;
738 break;
739 default:
740 return -EINVAL;
741 }
742
743 switch ((tiling_config & 0xf00) >> 8) {
744 case 0:
745 rscreen->tiling_info.group_bytes = 256;
746 break;
747 case 1:
748 rscreen->tiling_info.group_bytes = 512;
749 break;
750 default:
751 return -EINVAL;
752 }
753 return 0;
754 }
755
756 static int r600_init_tiling(struct r600_screen *rscreen)
757 {
758 uint32_t tiling_config = rscreen->info.r600_tiling_config;
759
760 /* set default group bytes, overridden by tiling info ioctl */
761 if (rscreen->chip_class <= R700) {
762 rscreen->tiling_info.group_bytes = 256;
763 } else {
764 rscreen->tiling_info.group_bytes = 512;
765 }
766
767 if (!tiling_config)
768 return 0;
769
770 if (rscreen->chip_class <= R700) {
771 return r600_interpret_tiling(rscreen, tiling_config);
772 } else {
773 return evergreen_interpret_tiling(rscreen, tiling_config);
774 }
775 }
776
777 static unsigned radeon_family_from_device(unsigned device)
778 {
779 switch (device) {
780 #define CHIPSET(pciid, name, family) case pciid: return CHIP_##family;
781 #include "pci_ids/r600_pci_ids.h"
782 #undef CHIPSET
783 default:
784 return CHIP_UNKNOWN;
785 }
786 }
787
788 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws)
789 {
790 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
791
792 if (rscreen == NULL) {
793 return NULL;
794 }
795
796 rscreen->ws = ws;
797 ws->query_info(ws, &rscreen->info);
798
799 rscreen->family = radeon_family_from_device(rscreen->info.pci_id);
800 if (rscreen->family == CHIP_UNKNOWN) {
801 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id);
802 FREE(rscreen);
803 return NULL;
804 }
805
806 /* setup class */
807 if (rscreen->family >= CHIP_CAYMAN) {
808 rscreen->chip_class = CAYMAN;
809 } else if (rscreen->family >= CHIP_CEDAR) {
810 rscreen->chip_class = EVERGREEN;
811 } else if (rscreen->family >= CHIP_RV770) {
812 rscreen->chip_class = R700;
813 } else {
814 rscreen->chip_class = R600;
815 }
816
817 /* XXX streamout is said to be broken on r700 and cayman */
818 if ((rscreen->chip_class == R700 ||
819 rscreen->chip_class == CAYMAN) &&
820 !debug_get_bool_option("R600_STREAMOUT", FALSE)) {
821 rscreen->info.r600_has_streamout = false;
822 }
823
824 if (r600_init_tiling(rscreen)) {
825 FREE(rscreen);
826 return NULL;
827 }
828
829 rscreen->screen.destroy = r600_destroy_screen;
830 rscreen->screen.get_name = r600_get_name;
831 rscreen->screen.get_vendor = r600_get_vendor;
832 rscreen->screen.get_param = r600_get_param;
833 rscreen->screen.get_shader_param = r600_get_shader_param;
834 rscreen->screen.get_paramf = r600_get_paramf;
835 rscreen->screen.get_video_param = r600_get_video_param;
836 if (rscreen->chip_class >= EVERGREEN) {
837 rscreen->screen.is_format_supported = evergreen_is_format_supported;
838 } else {
839 rscreen->screen.is_format_supported = r600_is_format_supported;
840 }
841 rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
842 rscreen->screen.context_create = r600_create_context;
843 rscreen->screen.fence_reference = r600_fence_reference;
844 rscreen->screen.fence_signalled = r600_fence_signalled;
845 rscreen->screen.fence_finish = r600_fence_finish;
846 r600_init_screen_resource_functions(&rscreen->screen);
847
848 util_format_s3tc_init();
849
850 rscreen->fences.bo = NULL;
851 rscreen->fences.data = NULL;
852 rscreen->fences.next_index = 0;
853 LIST_INITHEAD(&rscreen->fences.pool);
854 LIST_INITHEAD(&rscreen->fences.blocks);
855 pipe_mutex_init(rscreen->fences.mutex);
856
857 rscreen->use_surface_alloc = debug_get_bool_option("R600_SURF", TRUE);
858 rscreen->glsl_feature_level = debug_get_bool_option("R600_GLSL130", TRUE) ? 130 : 120;
859
860 return &rscreen->screen;
861 }