gallium: set PIPE_CAP_MIXED_COLORBUFFER_FORMATS in some drivers
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include <pipe/p_defines.h>
26 #include <pipe/p_state.h>
27 #include <pipe/p_context.h>
28 #include <tgsi/tgsi_scan.h>
29 #include <tgsi/tgsi_parse.h>
30 #include <tgsi/tgsi_util.h>
31 #include <util/u_blitter.h>
32 #include <util/u_double_list.h>
33 #include <util/u_format_s3tc.h>
34 #include <util/u_transfer.h>
35 #include <util/u_surface.h>
36 #include <util/u_pack_color.h>
37 #include <util/u_memory.h>
38 #include <util/u_inlines.h>
39 #include "util/u_upload_mgr.h"
40 #include "os/os_time.h"
41 #include <pipebuffer/pb_buffer.h>
42 #include "r600.h"
43 #include "r600d.h"
44 #include "r600_resource.h"
45 #include "r600_shader.h"
46 #include "r600_pipe.h"
47 #include "r600_state_inlines.h"
48
49 /*
50 * pipe_context
51 */
52 static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx)
53 {
54 struct r600_fence *fence = NULL;
55
56 if (!ctx->fences.bo) {
57 /* Create the shared buffer object */
58 ctx->fences.bo = r600_bo(ctx->radeon, 4096, 0, 0, 0);
59 if (!ctx->fences.bo) {
60 R600_ERR("r600: failed to create bo for fence objects\n");
61 return NULL;
62 }
63 ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, PB_USAGE_UNSYNCHRONIZED, NULL);
64 }
65
66 if (!LIST_IS_EMPTY(&ctx->fences.pool)) {
67 struct r600_fence *entry;
68
69 /* Try to find a freed fence that has been signalled */
70 LIST_FOR_EACH_ENTRY(entry, &ctx->fences.pool, head) {
71 if (ctx->fences.data[entry->index] != 0) {
72 LIST_DELINIT(&entry->head);
73 fence = entry;
74 break;
75 }
76 }
77 }
78
79 if (!fence) {
80 /* Allocate a new fence */
81 struct r600_fence_block *block;
82 unsigned index;
83
84 if ((ctx->fences.next_index + 1) >= 1024) {
85 R600_ERR("r600: too many concurrent fences\n");
86 return NULL;
87 }
88
89 index = ctx->fences.next_index++;
90
91 if (!(index % FENCE_BLOCK_SIZE)) {
92 /* Allocate a new block */
93 block = CALLOC_STRUCT(r600_fence_block);
94 if (block == NULL)
95 return NULL;
96
97 LIST_ADD(&block->head, &ctx->fences.blocks);
98 } else {
99 block = LIST_ENTRY(struct r600_fence_block, ctx->fences.blocks.next, head);
100 }
101
102 fence = &block->fences[index % FENCE_BLOCK_SIZE];
103 fence->ctx = ctx;
104 fence->index = index;
105 }
106
107 pipe_reference_init(&fence->reference, 1);
108
109 ctx->fences.data[fence->index] = 0;
110 r600_context_emit_fence(&ctx->ctx, ctx->fences.bo, fence->index, 1);
111 return fence;
112 }
113
114 static void r600_flush(struct pipe_context *ctx,
115 struct pipe_fence_handle **fence)
116 {
117 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
118 struct r600_fence **rfence = (struct r600_fence**)fence;
119
120 #if 0
121 static int dc = 0;
122 char dname[256];
123 #endif
124
125 if (rfence)
126 *rfence = r600_create_fence(rctx);
127
128 if (!rctx->ctx.pm4_cdwords)
129 return;
130
131 #if 0
132 sprintf(dname, "gallium-%08d.bof", dc);
133 if (dc < 20) {
134 r600_context_dump_bof(&rctx->ctx, dname);
135 R600_ERR("dumped %s\n", dname);
136 }
137 dc++;
138 #endif
139 r600_context_flush(&rctx->ctx);
140
141 /* XXX This shouldn't be really necessary, but removing it breaks some tests.
142 * Needless buffer reallocations may significantly increase memory consumption,
143 * so getting rid of this call is important. */
144 u_upload_flush(rctx->vbuf_mgr->uploader);
145 }
146
147 static void r600_update_num_contexts(struct r600_screen *rscreen, int diff)
148 {
149 pipe_mutex_lock(rscreen->mutex_num_contexts);
150 if (diff > 0) {
151 rscreen->num_contexts++;
152
153 if (rscreen->num_contexts > 1)
154 util_slab_set_thread_safety(&rscreen->pool_buffers,
155 UTIL_SLAB_MULTITHREADED);
156 } else {
157 rscreen->num_contexts--;
158
159 if (rscreen->num_contexts <= 1)
160 util_slab_set_thread_safety(&rscreen->pool_buffers,
161 UTIL_SLAB_SINGLETHREADED);
162 }
163 pipe_mutex_unlock(rscreen->mutex_num_contexts);
164 }
165
166 static void r600_destroy_context(struct pipe_context *context)
167 {
168 struct r600_pipe_context *rctx = (struct r600_pipe_context *)context;
169
170 rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
171
172 r600_context_fini(&rctx->ctx);
173
174 util_blitter_destroy(rctx->blitter);
175
176 for (int i = 0; i < R600_PIPE_NSTATES; i++) {
177 free(rctx->states[i]);
178 }
179
180 u_vbuf_mgr_destroy(rctx->vbuf_mgr);
181 util_slab_destroy(&rctx->pool_transfers);
182
183 if (rctx->fences.bo) {
184 struct r600_fence_block *entry, *tmp;
185
186 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rctx->fences.blocks, head) {
187 LIST_DEL(&entry->head);
188 FREE(entry);
189 }
190
191 r600_bo_unmap(rctx->radeon, rctx->fences.bo);
192 r600_bo_reference(rctx->radeon, &rctx->fences.bo, NULL);
193 }
194
195 r600_update_num_contexts(rctx->screen, -1);
196
197 FREE(rctx);
198 }
199
200 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
201 {
202 struct r600_pipe_context *rctx = CALLOC_STRUCT(r600_pipe_context);
203 struct r600_screen* rscreen = (struct r600_screen *)screen;
204 enum chip_class class;
205
206 if (rctx == NULL)
207 return NULL;
208
209 r600_update_num_contexts(rscreen, 1);
210
211 rctx->context.winsys = rscreen->screen.winsys;
212 rctx->context.screen = screen;
213 rctx->context.priv = priv;
214 rctx->context.destroy = r600_destroy_context;
215 rctx->context.flush = r600_flush;
216
217 /* Easy accessing of screen/winsys. */
218 rctx->screen = rscreen;
219 rctx->radeon = rscreen->radeon;
220 rctx->family = r600_get_family(rctx->radeon);
221
222 rctx->fences.bo = NULL;
223 rctx->fences.data = NULL;
224 rctx->fences.next_index = 0;
225 LIST_INITHEAD(&rctx->fences.pool);
226 LIST_INITHEAD(&rctx->fences.blocks);
227
228 r600_init_blit_functions(rctx);
229 r600_init_query_functions(rctx);
230 r600_init_context_resource_functions(rctx);
231 r600_init_surface_functions(rctx);
232 rctx->context.draw_vbo = r600_draw_vbo;
233
234 switch (r600_get_family(rctx->radeon)) {
235 case CHIP_R600:
236 case CHIP_RV610:
237 case CHIP_RV630:
238 case CHIP_RV670:
239 case CHIP_RV620:
240 case CHIP_RV635:
241 case CHIP_RS780:
242 case CHIP_RS880:
243 case CHIP_RV770:
244 case CHIP_RV730:
245 case CHIP_RV710:
246 case CHIP_RV740:
247 r600_init_state_functions(rctx);
248 if (r600_context_init(&rctx->ctx, rctx->radeon)) {
249 r600_destroy_context(&rctx->context);
250 return NULL;
251 }
252 r600_init_config(rctx);
253 break;
254 case CHIP_CEDAR:
255 case CHIP_REDWOOD:
256 case CHIP_JUNIPER:
257 case CHIP_CYPRESS:
258 case CHIP_HEMLOCK:
259 case CHIP_PALM:
260 case CHIP_BARTS:
261 case CHIP_TURKS:
262 case CHIP_CAICOS:
263 evergreen_init_state_functions(rctx);
264 if (evergreen_context_init(&rctx->ctx, rctx->radeon)) {
265 r600_destroy_context(&rctx->context);
266 return NULL;
267 }
268 evergreen_init_config(rctx);
269 break;
270 default:
271 R600_ERR("unsupported family %d\n", r600_get_family(rctx->radeon));
272 r600_destroy_context(&rctx->context);
273 return NULL;
274 }
275
276 util_slab_create(&rctx->pool_transfers,
277 sizeof(struct pipe_transfer), 64,
278 UTIL_SLAB_SINGLETHREADED);
279
280 rctx->vbuf_mgr = u_vbuf_mgr_create(&rctx->context, 1024 * 1024, 256,
281 PIPE_BIND_VERTEX_BUFFER |
282 PIPE_BIND_INDEX_BUFFER |
283 PIPE_BIND_CONSTANT_BUFFER,
284 U_VERTEX_FETCH_DWORD_ALIGNED);
285 if (!rctx->vbuf_mgr) {
286 r600_destroy_context(&rctx->context);
287 return NULL;
288 }
289
290 rctx->blitter = util_blitter_create(&rctx->context);
291 if (rctx->blitter == NULL) {
292 r600_destroy_context(&rctx->context);
293 return NULL;
294 }
295
296 class = r600_get_family_class(rctx->radeon);
297 if (class == R600 || class == R700)
298 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
299 else
300 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
301
302 return &rctx->context;
303 }
304
305 /*
306 * pipe_screen
307 */
308 static const char* r600_get_vendor(struct pipe_screen* pscreen)
309 {
310 return "X.Org";
311 }
312
313 static const char *r600_get_family_name(enum radeon_family family)
314 {
315 switch(family) {
316 case CHIP_R600: return "AMD R600";
317 case CHIP_RV610: return "AMD RV610";
318 case CHIP_RV630: return "AMD RV630";
319 case CHIP_RV670: return "AMD RV670";
320 case CHIP_RV620: return "AMD RV620";
321 case CHIP_RV635: return "AMD RV635";
322 case CHIP_RS780: return "AMD RS780";
323 case CHIP_RS880: return "AMD RS880";
324 case CHIP_RV770: return "AMD RV770";
325 case CHIP_RV730: return "AMD RV730";
326 case CHIP_RV710: return "AMD RV710";
327 case CHIP_RV740: return "AMD RV740";
328 case CHIP_CEDAR: return "AMD CEDAR";
329 case CHIP_REDWOOD: return "AMD REDWOOD";
330 case CHIP_JUNIPER: return "AMD JUNIPER";
331 case CHIP_CYPRESS: return "AMD CYPRESS";
332 case CHIP_HEMLOCK: return "AMD HEMLOCK";
333 case CHIP_PALM: return "AMD PALM";
334 case CHIP_BARTS: return "AMD BARTS";
335 case CHIP_TURKS: return "AMD TURKS";
336 case CHIP_CAICOS: return "AMD CAICOS";
337 default: return "AMD unknown";
338 }
339 }
340
341 static const char* r600_get_name(struct pipe_screen* pscreen)
342 {
343 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
344 enum radeon_family family = r600_get_family(rscreen->radeon);
345
346 return r600_get_family_name(family);
347 }
348
349 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
350 {
351 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
352 enum radeon_family family = r600_get_family(rscreen->radeon);
353
354 switch (param) {
355 /* Supported features (boolean caps). */
356 case PIPE_CAP_NPOT_TEXTURES:
357 case PIPE_CAP_TWO_SIDED_STENCIL:
358 case PIPE_CAP_GLSL:
359 case PIPE_CAP_DUAL_SOURCE_BLEND:
360 case PIPE_CAP_ANISOTROPIC_FILTER:
361 case PIPE_CAP_POINT_SPRITE:
362 case PIPE_CAP_OCCLUSION_QUERY:
363 case PIPE_CAP_TEXTURE_SHADOW_MAP:
364 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
365 case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
366 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
367 case PIPE_CAP_SM3:
368 case PIPE_CAP_TEXTURE_SWIZZLE:
369 case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
370 case PIPE_CAP_DEPTH_CLAMP:
371 case PIPE_CAP_SHADER_STENCIL_EXPORT:
372 case PIPE_CAP_TGSI_INSTANCEID:
373 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
374 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
375 return 1;
376 case PIPE_CAP_INDEP_BLEND_ENABLE:
377 /* R600 doesn't support per-MRT blends */
378 if (family == CHIP_R600)
379 return 0;
380 else
381 return 1;
382
383 /* Unsupported features (boolean caps). */
384 case PIPE_CAP_STREAM_OUTPUT:
385 case PIPE_CAP_PRIMITIVE_RESTART:
386 case PIPE_CAP_INDEP_BLEND_FUNC: /* FIXME allow this */
387 /* R600 doesn't support per-MRT blends */
388 if (family == CHIP_R600)
389 return 0;
390 else
391 return 0;
392
393 case PIPE_CAP_ARRAY_TEXTURES:
394 /* fix once the CS checker upstream is fixed */
395 return debug_get_bool_option("R600_ARRAY_TEXTURE", FALSE);
396
397 /* Texturing. */
398 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
399 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
400 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
401 if (family >= CHIP_CEDAR)
402 return 15;
403 else
404 return 14;
405 case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
406 /* FIXME allow this once infrastructure is there */
407 return 16;
408 case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
409 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
410 return 16;
411
412 /* Render targets. */
413 case PIPE_CAP_MAX_RENDER_TARGETS:
414 /* FIXME some r6xx are buggy and can only do 4 */
415 return 8;
416
417 /* Fragment coordinate conventions. */
418 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
419 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
420 return 1;
421 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
422 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
423 return 0;
424
425 /* Timer queries, present when the clock frequency is non zero. */
426 case PIPE_CAP_TIMER_QUERY:
427 return r600_get_clock_crystal_freq(rscreen->radeon) != 0;
428
429 default:
430 R600_ERR("r600: unknown param %d\n", param);
431 return 0;
432 }
433 }
434
435 static float r600_get_paramf(struct pipe_screen* pscreen, enum pipe_cap param)
436 {
437 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
438 enum radeon_family family = r600_get_family(rscreen->radeon);
439
440 switch (param) {
441 case PIPE_CAP_MAX_LINE_WIDTH:
442 case PIPE_CAP_MAX_LINE_WIDTH_AA:
443 case PIPE_CAP_MAX_POINT_WIDTH:
444 case PIPE_CAP_MAX_POINT_WIDTH_AA:
445 if (family >= CHIP_CEDAR)
446 return 16384.0f;
447 else
448 return 8192.0f;
449 case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
450 return 16.0f;
451 case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
452 return 16.0f;
453 default:
454 R600_ERR("r600: unsupported paramf %d\n", param);
455 return 0.0f;
456 }
457 }
458
459 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
460 {
461 switch(shader)
462 {
463 case PIPE_SHADER_FRAGMENT:
464 case PIPE_SHADER_VERTEX:
465 break;
466 case PIPE_SHADER_GEOMETRY:
467 /* TODO: support and enable geometry programs */
468 return 0;
469 default:
470 /* TODO: support tessellation on Evergreen */
471 return 0;
472 }
473
474 /* TODO: all these should be fixed, since r600 surely supports much more! */
475 switch (param) {
476 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
477 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
478 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
479 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
480 return 16384;
481 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
482 return 8; /* FIXME */
483 case PIPE_SHADER_CAP_MAX_INPUTS:
484 if(shader == PIPE_SHADER_FRAGMENT)
485 return 10;
486 else
487 return 16;
488 case PIPE_SHADER_CAP_MAX_TEMPS:
489 return 256; //max native temporaries
490 case PIPE_SHADER_CAP_MAX_ADDRS:
491 return 1; //max native address registers/* FIXME Isn't this equal to TEMPS? */
492 case PIPE_SHADER_CAP_MAX_CONSTS:
493 return R600_MAX_CONST_BUFFER_SIZE;
494 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
495 return R600_MAX_CONST_BUFFERS;
496 case PIPE_SHADER_CAP_MAX_PREDS:
497 return 0; /* FIXME */
498 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
499 return 1;
500 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
501 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
502 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
503 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
504 return 1;
505 case PIPE_SHADER_CAP_SUBROUTINES:
506 return 0;
507 default:
508 return 0;
509 }
510 }
511
512 static boolean r600_is_format_supported(struct pipe_screen* screen,
513 enum pipe_format format,
514 enum pipe_texture_target target,
515 unsigned sample_count,
516 unsigned usage)
517 {
518 unsigned retval = 0;
519 if (target >= PIPE_MAX_TEXTURE_TYPES) {
520 R600_ERR("r600: unsupported texture type %d\n", target);
521 return FALSE;
522 }
523
524 /* Multisample */
525 if (sample_count > 1)
526 return FALSE;
527
528 if ((usage & PIPE_BIND_SAMPLER_VIEW) &&
529 r600_is_sampler_format_supported(screen, format)) {
530 retval |= PIPE_BIND_SAMPLER_VIEW;
531 }
532
533 if ((usage & (PIPE_BIND_RENDER_TARGET |
534 PIPE_BIND_DISPLAY_TARGET |
535 PIPE_BIND_SCANOUT |
536 PIPE_BIND_SHARED)) &&
537 r600_is_colorbuffer_format_supported(format)) {
538 retval |= usage &
539 (PIPE_BIND_RENDER_TARGET |
540 PIPE_BIND_DISPLAY_TARGET |
541 PIPE_BIND_SCANOUT |
542 PIPE_BIND_SHARED);
543 }
544
545 if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
546 r600_is_zs_format_supported(format)) {
547 retval |= PIPE_BIND_DEPTH_STENCIL;
548 }
549
550 if (usage & PIPE_BIND_VERTEX_BUFFER) {
551 struct r600_screen *rscreen = (struct r600_screen *)screen;
552 enum radeon_family family = r600_get_family(rscreen->radeon);
553
554 if (r600_is_vertex_format_supported(format, family)) {
555 retval |= PIPE_BIND_VERTEX_BUFFER;
556 }
557 }
558
559 if (usage & PIPE_BIND_TRANSFER_READ)
560 retval |= PIPE_BIND_TRANSFER_READ;
561 if (usage & PIPE_BIND_TRANSFER_WRITE)
562 retval |= PIPE_BIND_TRANSFER_WRITE;
563
564 return retval == usage;
565 }
566
567 static void r600_destroy_screen(struct pipe_screen* pscreen)
568 {
569 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
570
571 if (rscreen == NULL)
572 return;
573
574 radeon_decref(rscreen->radeon);
575
576 util_slab_destroy(&rscreen->pool_buffers);
577 pipe_mutex_destroy(rscreen->mutex_num_contexts);
578 FREE(rscreen);
579 }
580
581 static void r600_fence_reference(struct pipe_screen *pscreen,
582 struct pipe_fence_handle **ptr,
583 struct pipe_fence_handle *fence)
584 {
585 struct r600_fence **oldf = (struct r600_fence**)ptr;
586 struct r600_fence *newf = (struct r600_fence*)fence;
587
588 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
589 struct r600_pipe_context *ctx = (*oldf)->ctx;
590 LIST_ADDTAIL(&(*oldf)->head, &ctx->fences.pool);
591 }
592
593 *ptr = fence;
594 }
595
596 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
597 struct pipe_fence_handle *fence)
598 {
599 struct r600_fence *rfence = (struct r600_fence*)fence;
600 struct r600_pipe_context *ctx = rfence->ctx;
601
602 return ctx->fences.data[rfence->index];
603 }
604
605 static boolean r600_fence_finish(struct pipe_screen *pscreen,
606 struct pipe_fence_handle *fence,
607 uint64_t timeout)
608 {
609 struct r600_fence *rfence = (struct r600_fence*)fence;
610 struct r600_pipe_context *ctx = rfence->ctx;
611 int64_t start_time = 0;
612 unsigned spins = 0;
613
614 if (timeout != PIPE_TIMEOUT_INFINITE) {
615 start_time = os_time_get();
616
617 /* Convert to microseconds. */
618 timeout /= 1000;
619 }
620
621 while (ctx->fences.data[rfence->index] == 0) {
622 if (++spins % 256)
623 continue;
624 #ifdef PIPE_OS_UNIX
625 sched_yield();
626 #else
627 os_time_sleep(10);
628 #endif
629 if (timeout != PIPE_TIMEOUT_INFINITE &&
630 os_time_get() - start_time >= timeout) {
631 return FALSE;
632 }
633 }
634
635 return TRUE;
636 }
637
638 struct pipe_screen *r600_screen_create(struct radeon *radeon)
639 {
640 struct r600_screen *rscreen;
641
642 rscreen = CALLOC_STRUCT(r600_screen);
643 if (rscreen == NULL) {
644 return NULL;
645 }
646
647 rscreen->radeon = radeon;
648 rscreen->screen.winsys = (struct pipe_winsys*)radeon;
649 rscreen->screen.destroy = r600_destroy_screen;
650 rscreen->screen.get_name = r600_get_name;
651 rscreen->screen.get_vendor = r600_get_vendor;
652 rscreen->screen.get_param = r600_get_param;
653 rscreen->screen.get_shader_param = r600_get_shader_param;
654 rscreen->screen.get_paramf = r600_get_paramf;
655 rscreen->screen.is_format_supported = r600_is_format_supported;
656 rscreen->screen.context_create = r600_create_context;
657 rscreen->screen.fence_reference = r600_fence_reference;
658 rscreen->screen.fence_signalled = r600_fence_signalled;
659 rscreen->screen.fence_finish = r600_fence_finish;
660 r600_init_screen_resource_functions(&rscreen->screen);
661
662 rscreen->tiling_info = r600_get_tiling_info(radeon);
663 util_format_s3tc_init();
664
665 util_slab_create(&rscreen->pool_buffers,
666 sizeof(struct r600_resource_buffer), 64,
667 UTIL_SLAB_SINGLETHREADED);
668
669 pipe_mutex_init(rscreen->mutex_num_contexts);
670
671 return &rscreen->screen;
672 }