r600g: implement the pipe_screen fence functions
[mesa.git] / src / gallium / drivers / r600 / r600_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25 #include <pipe/p_defines.h>
26 #include <pipe/p_state.h>
27 #include <pipe/p_context.h>
28 #include <tgsi/tgsi_scan.h>
29 #include <tgsi/tgsi_parse.h>
30 #include <tgsi/tgsi_util.h>
31 #include <util/u_blitter.h>
32 #include <util/u_double_list.h>
33 #include <util/u_format_s3tc.h>
34 #include <util/u_transfer.h>
35 #include <util/u_surface.h>
36 #include <util/u_pack_color.h>
37 #include <util/u_memory.h>
38 #include <util/u_inlines.h>
39 #include "util/u_upload_mgr.h"
40 #include "os/os_time.h"
41 #include <pipebuffer/pb_buffer.h>
42 #include "r600.h"
43 #include "r600d.h"
44 #include "r600_resource.h"
45 #include "r600_shader.h"
46 #include "r600_pipe.h"
47 #include "r600_state_inlines.h"
48
49 /*
50 * pipe_context
51 */
52 static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx)
53 {
54 struct r600_fence *fence = NULL;
55
56 if (!ctx->fences.bo) {
57 /* Create the shared buffer object */
58 ctx->fences.bo = r600_bo(ctx->radeon, 4096, 0, 0, 0);
59 if (!ctx->fences.bo) {
60 R600_ERR("r600: failed to create bo for fence objects\n");
61 return NULL;
62 }
63 ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, PB_USAGE_UNSYNCHRONIZED, NULL);
64 }
65
66 if (!LIST_IS_EMPTY(&ctx->fences.pool)) {
67 struct r600_fence *entry;
68
69 /* Try to find a freed fence that has been signalled */
70 LIST_FOR_EACH_ENTRY(entry, &ctx->fences.pool, head) {
71 if (ctx->fences.data[entry->index] != 0) {
72 LIST_DELINIT(&entry->head);
73 fence = entry;
74 break;
75 }
76 }
77 }
78
79 if (!fence) {
80 /* Allocate a new fence */
81 struct r600_fence_block *block;
82 unsigned index;
83
84 if ((ctx->fences.next_index + 1) >= 1024) {
85 R600_ERR("r600: too many concurrent fences\n");
86 return NULL;
87 }
88
89 index = ctx->fences.next_index++;
90
91 if (!(index % FENCE_BLOCK_SIZE)) {
92 /* Allocate a new block */
93 block = CALLOC_STRUCT(r600_fence_block);
94 if (block == NULL)
95 return NULL;
96
97 LIST_ADD(&block->head, &ctx->fences.blocks);
98 } else {
99 block = LIST_ENTRY(struct r600_fence_block, ctx->fences.blocks.next, head);
100 }
101
102 fence = &block->fences[index % FENCE_BLOCK_SIZE];
103 fence->ctx = ctx;
104 fence->index = index;
105 }
106
107 pipe_reference_init(&fence->reference, 1);
108
109 ctx->fences.data[fence->index] = 0;
110 r600_context_emit_fence(&ctx->ctx, ctx->fences.bo, fence->index, 1);
111 return fence;
112 }
113
114 static void r600_flush(struct pipe_context *ctx,
115 struct pipe_fence_handle **fence)
116 {
117 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
118 struct r600_fence **rfence = (struct r600_fence**)fence;
119
120 #if 0
121 static int dc = 0;
122 char dname[256];
123 #endif
124
125 if (rfence)
126 *rfence = r600_create_fence(rctx);
127
128 if (!rctx->ctx.pm4_cdwords)
129 return;
130
131 #if 0
132 sprintf(dname, "gallium-%08d.bof", dc);
133 if (dc < 20) {
134 r600_context_dump_bof(&rctx->ctx, dname);
135 R600_ERR("dumped %s\n", dname);
136 }
137 dc++;
138 #endif
139 r600_context_flush(&rctx->ctx);
140
141 /* XXX This shouldn't be really necessary, but removing it breaks some tests.
142 * Needless buffer reallocations may significantly increase memory consumption,
143 * so getting rid of this call is important. */
144 u_upload_flush(rctx->vbuf_mgr->uploader);
145 }
146
147 static void r600_update_num_contexts(struct r600_screen *rscreen, int diff)
148 {
149 pipe_mutex_lock(rscreen->mutex_num_contexts);
150 if (diff > 0) {
151 rscreen->num_contexts++;
152
153 if (rscreen->num_contexts > 1)
154 util_slab_set_thread_safety(&rscreen->pool_buffers,
155 UTIL_SLAB_MULTITHREADED);
156 } else {
157 rscreen->num_contexts--;
158
159 if (rscreen->num_contexts <= 1)
160 util_slab_set_thread_safety(&rscreen->pool_buffers,
161 UTIL_SLAB_SINGLETHREADED);
162 }
163 pipe_mutex_unlock(rscreen->mutex_num_contexts);
164 }
165
166 static void r600_destroy_context(struct pipe_context *context)
167 {
168 struct r600_pipe_context *rctx = (struct r600_pipe_context *)context;
169
170 rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush);
171
172 r600_context_fini(&rctx->ctx);
173
174 util_blitter_destroy(rctx->blitter);
175
176 for (int i = 0; i < R600_PIPE_NSTATES; i++) {
177 free(rctx->states[i]);
178 }
179
180 u_vbuf_mgr_destroy(rctx->vbuf_mgr);
181 util_slab_destroy(&rctx->pool_transfers);
182
183 if (rctx->fences.bo) {
184 struct r600_fence_block *entry, *tmp;
185
186 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rctx->fences.blocks, head) {
187 LIST_DEL(&entry->head);
188 FREE(entry);
189 }
190
191 r600_bo_unmap(rctx->radeon, rctx->fences.bo);
192 r600_bo_reference(rctx->radeon, &rctx->fences.bo, NULL);
193 }
194
195 r600_update_num_contexts(rctx->screen, -1);
196
197 FREE(rctx);
198 }
199
200 static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
201 {
202 struct r600_pipe_context *rctx = CALLOC_STRUCT(r600_pipe_context);
203 struct r600_screen* rscreen = (struct r600_screen *)screen;
204 enum chip_class class;
205
206 if (rctx == NULL)
207 return NULL;
208
209 r600_update_num_contexts(rscreen, 1);
210
211 rctx->context.winsys = rscreen->screen.winsys;
212 rctx->context.screen = screen;
213 rctx->context.priv = priv;
214 rctx->context.destroy = r600_destroy_context;
215 rctx->context.flush = r600_flush;
216
217 /* Easy accessing of screen/winsys. */
218 rctx->screen = rscreen;
219 rctx->radeon = rscreen->radeon;
220 rctx->family = r600_get_family(rctx->radeon);
221
222 rctx->fences.bo = NULL;
223 rctx->fences.data = NULL;
224 rctx->fences.next_index = 0;
225 LIST_INITHEAD(&rctx->fences.pool);
226 LIST_INITHEAD(&rctx->fences.blocks);
227
228 r600_init_blit_functions(rctx);
229 r600_init_query_functions(rctx);
230 r600_init_context_resource_functions(rctx);
231 r600_init_surface_functions(rctx);
232 rctx->context.draw_vbo = r600_draw_vbo;
233
234 switch (r600_get_family(rctx->radeon)) {
235 case CHIP_R600:
236 case CHIP_RV610:
237 case CHIP_RV630:
238 case CHIP_RV670:
239 case CHIP_RV620:
240 case CHIP_RV635:
241 case CHIP_RS780:
242 case CHIP_RS880:
243 case CHIP_RV770:
244 case CHIP_RV730:
245 case CHIP_RV710:
246 case CHIP_RV740:
247 r600_init_state_functions(rctx);
248 if (r600_context_init(&rctx->ctx, rctx->radeon)) {
249 r600_destroy_context(&rctx->context);
250 return NULL;
251 }
252 r600_init_config(rctx);
253 break;
254 case CHIP_CEDAR:
255 case CHIP_REDWOOD:
256 case CHIP_JUNIPER:
257 case CHIP_CYPRESS:
258 case CHIP_HEMLOCK:
259 case CHIP_PALM:
260 case CHIP_BARTS:
261 case CHIP_TURKS:
262 case CHIP_CAICOS:
263 evergreen_init_state_functions(rctx);
264 if (evergreen_context_init(&rctx->ctx, rctx->radeon)) {
265 r600_destroy_context(&rctx->context);
266 return NULL;
267 }
268 evergreen_init_config(rctx);
269 break;
270 default:
271 R600_ERR("unsupported family %d\n", r600_get_family(rctx->radeon));
272 r600_destroy_context(&rctx->context);
273 return NULL;
274 }
275
276 util_slab_create(&rctx->pool_transfers,
277 sizeof(struct pipe_transfer), 64,
278 UTIL_SLAB_SINGLETHREADED);
279
280 rctx->vbuf_mgr = u_vbuf_mgr_create(&rctx->context, 1024 * 1024, 256,
281 PIPE_BIND_VERTEX_BUFFER |
282 PIPE_BIND_INDEX_BUFFER |
283 PIPE_BIND_CONSTANT_BUFFER,
284 U_VERTEX_FETCH_DWORD_ALIGNED);
285 if (!rctx->vbuf_mgr) {
286 r600_destroy_context(&rctx->context);
287 return NULL;
288 }
289
290 rctx->blitter = util_blitter_create(&rctx->context);
291 if (rctx->blitter == NULL) {
292 r600_destroy_context(&rctx->context);
293 return NULL;
294 }
295
296 class = r600_get_family_class(rctx->radeon);
297 if (class == R600 || class == R700)
298 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
299 else
300 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
301
302 return &rctx->context;
303 }
304
305 /*
306 * pipe_screen
307 */
308 static const char* r600_get_vendor(struct pipe_screen* pscreen)
309 {
310 return "X.Org";
311 }
312
313 static const char *r600_get_family_name(enum radeon_family family)
314 {
315 switch(family) {
316 case CHIP_R600: return "AMD R600";
317 case CHIP_RV610: return "AMD RV610";
318 case CHIP_RV630: return "AMD RV630";
319 case CHIP_RV670: return "AMD RV670";
320 case CHIP_RV620: return "AMD RV620";
321 case CHIP_RV635: return "AMD RV635";
322 case CHIP_RS780: return "AMD RS780";
323 case CHIP_RS880: return "AMD RS880";
324 case CHIP_RV770: return "AMD RV770";
325 case CHIP_RV730: return "AMD RV730";
326 case CHIP_RV710: return "AMD RV710";
327 case CHIP_RV740: return "AMD RV740";
328 case CHIP_CEDAR: return "AMD CEDAR";
329 case CHIP_REDWOOD: return "AMD REDWOOD";
330 case CHIP_JUNIPER: return "AMD JUNIPER";
331 case CHIP_CYPRESS: return "AMD CYPRESS";
332 case CHIP_HEMLOCK: return "AMD HEMLOCK";
333 case CHIP_PALM: return "AMD PALM";
334 case CHIP_BARTS: return "AMD BARTS";
335 case CHIP_TURKS: return "AMD TURKS";
336 case CHIP_CAICOS: return "AMD CAICOS";
337 default: return "AMD unknown";
338 }
339 }
340
341 static const char* r600_get_name(struct pipe_screen* pscreen)
342 {
343 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
344 enum radeon_family family = r600_get_family(rscreen->radeon);
345
346 return r600_get_family_name(family);
347 }
348
349 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
350 {
351 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
352 enum radeon_family family = r600_get_family(rscreen->radeon);
353
354 switch (param) {
355 /* Supported features (boolean caps). */
356 case PIPE_CAP_NPOT_TEXTURES:
357 case PIPE_CAP_TWO_SIDED_STENCIL:
358 case PIPE_CAP_GLSL:
359 case PIPE_CAP_DUAL_SOURCE_BLEND:
360 case PIPE_CAP_ANISOTROPIC_FILTER:
361 case PIPE_CAP_POINT_SPRITE:
362 case PIPE_CAP_OCCLUSION_QUERY:
363 case PIPE_CAP_TEXTURE_SHADOW_MAP:
364 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
365 case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
366 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
367 case PIPE_CAP_SM3:
368 case PIPE_CAP_TEXTURE_SWIZZLE:
369 case PIPE_CAP_DEPTHSTENCIL_CLEAR_SEPARATE:
370 case PIPE_CAP_DEPTH_CLAMP:
371 case PIPE_CAP_SHADER_STENCIL_EXPORT:
372 case PIPE_CAP_TGSI_INSTANCEID:
373 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
374 return 1;
375 case PIPE_CAP_INDEP_BLEND_ENABLE:
376 /* R600 doesn't support per-MRT blends */
377 if (family == CHIP_R600)
378 return 0;
379 else
380 return 1;
381
382 /* Unsupported features (boolean caps). */
383 case PIPE_CAP_STREAM_OUTPUT:
384 case PIPE_CAP_PRIMITIVE_RESTART:
385 case PIPE_CAP_INDEP_BLEND_FUNC: /* FIXME allow this */
386 /* R600 doesn't support per-MRT blends */
387 if (family == CHIP_R600)
388 return 0;
389 else
390 return 0;
391
392 case PIPE_CAP_ARRAY_TEXTURES:
393 /* fix once the CS checker upstream is fixed */
394 return debug_get_bool_option("R600_ARRAY_TEXTURE", FALSE);
395
396 /* Texturing. */
397 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
398 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
399 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
400 if (family >= CHIP_CEDAR)
401 return 15;
402 else
403 return 14;
404 case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
405 /* FIXME allow this once infrastructure is there */
406 return 16;
407 case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
408 case PIPE_CAP_MAX_COMBINED_SAMPLERS:
409 return 16;
410
411 /* Render targets. */
412 case PIPE_CAP_MAX_RENDER_TARGETS:
413 /* FIXME some r6xx are buggy and can only do 4 */
414 return 8;
415
416 /* Fragment coordinate conventions. */
417 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
418 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
419 return 1;
420 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
421 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
422 return 0;
423
424 /* Timer queries, present when the clock frequency is non zero. */
425 case PIPE_CAP_TIMER_QUERY:
426 return r600_get_clock_crystal_freq(rscreen->radeon) != 0;
427
428 default:
429 R600_ERR("r600: unknown param %d\n", param);
430 return 0;
431 }
432 }
433
434 static float r600_get_paramf(struct pipe_screen* pscreen, enum pipe_cap param)
435 {
436 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
437 enum radeon_family family = r600_get_family(rscreen->radeon);
438
439 switch (param) {
440 case PIPE_CAP_MAX_LINE_WIDTH:
441 case PIPE_CAP_MAX_LINE_WIDTH_AA:
442 case PIPE_CAP_MAX_POINT_WIDTH:
443 case PIPE_CAP_MAX_POINT_WIDTH_AA:
444 if (family >= CHIP_CEDAR)
445 return 16384.0f;
446 else
447 return 8192.0f;
448 case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
449 return 16.0f;
450 case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
451 return 16.0f;
452 default:
453 R600_ERR("r600: unsupported paramf %d\n", param);
454 return 0.0f;
455 }
456 }
457
458 static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
459 {
460 switch(shader)
461 {
462 case PIPE_SHADER_FRAGMENT:
463 case PIPE_SHADER_VERTEX:
464 break;
465 case PIPE_SHADER_GEOMETRY:
466 /* TODO: support and enable geometry programs */
467 return 0;
468 default:
469 /* TODO: support tessellation on Evergreen */
470 return 0;
471 }
472
473 /* TODO: all these should be fixed, since r600 surely supports much more! */
474 switch (param) {
475 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
476 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
477 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
478 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
479 return 16384;
480 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
481 return 8; /* FIXME */
482 case PIPE_SHADER_CAP_MAX_INPUTS:
483 if(shader == PIPE_SHADER_FRAGMENT)
484 return 10;
485 else
486 return 16;
487 case PIPE_SHADER_CAP_MAX_TEMPS:
488 return 256; //max native temporaries
489 case PIPE_SHADER_CAP_MAX_ADDRS:
490 return 1; //max native address registers/* FIXME Isn't this equal to TEMPS? */
491 case PIPE_SHADER_CAP_MAX_CONSTS:
492 return R600_MAX_CONST_BUFFER_SIZE;
493 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
494 return R600_MAX_CONST_BUFFERS;
495 case PIPE_SHADER_CAP_MAX_PREDS:
496 return 0; /* FIXME */
497 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
498 return 1;
499 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
500 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
501 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
502 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
503 return 1;
504 case PIPE_SHADER_CAP_SUBROUTINES:
505 return 0;
506 default:
507 return 0;
508 }
509 }
510
511 static boolean r600_is_format_supported(struct pipe_screen* screen,
512 enum pipe_format format,
513 enum pipe_texture_target target,
514 unsigned sample_count,
515 unsigned usage)
516 {
517 unsigned retval = 0;
518 if (target >= PIPE_MAX_TEXTURE_TYPES) {
519 R600_ERR("r600: unsupported texture type %d\n", target);
520 return FALSE;
521 }
522
523 /* Multisample */
524 if (sample_count > 1)
525 return FALSE;
526
527 if ((usage & PIPE_BIND_SAMPLER_VIEW) &&
528 r600_is_sampler_format_supported(screen, format)) {
529 retval |= PIPE_BIND_SAMPLER_VIEW;
530 }
531
532 if ((usage & (PIPE_BIND_RENDER_TARGET |
533 PIPE_BIND_DISPLAY_TARGET |
534 PIPE_BIND_SCANOUT |
535 PIPE_BIND_SHARED)) &&
536 r600_is_colorbuffer_format_supported(format)) {
537 retval |= usage &
538 (PIPE_BIND_RENDER_TARGET |
539 PIPE_BIND_DISPLAY_TARGET |
540 PIPE_BIND_SCANOUT |
541 PIPE_BIND_SHARED);
542 }
543
544 if ((usage & PIPE_BIND_DEPTH_STENCIL) &&
545 r600_is_zs_format_supported(format)) {
546 retval |= PIPE_BIND_DEPTH_STENCIL;
547 }
548
549 if (usage & PIPE_BIND_VERTEX_BUFFER) {
550 struct r600_screen *rscreen = (struct r600_screen *)screen;
551 enum radeon_family family = r600_get_family(rscreen->radeon);
552
553 if (r600_is_vertex_format_supported(format, family)) {
554 retval |= PIPE_BIND_VERTEX_BUFFER;
555 }
556 }
557
558 if (usage & PIPE_BIND_TRANSFER_READ)
559 retval |= PIPE_BIND_TRANSFER_READ;
560 if (usage & PIPE_BIND_TRANSFER_WRITE)
561 retval |= PIPE_BIND_TRANSFER_WRITE;
562
563 return retval == usage;
564 }
565
566 static void r600_destroy_screen(struct pipe_screen* pscreen)
567 {
568 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
569
570 if (rscreen == NULL)
571 return;
572
573 radeon_decref(rscreen->radeon);
574
575 util_slab_destroy(&rscreen->pool_buffers);
576 pipe_mutex_destroy(rscreen->mutex_num_contexts);
577 FREE(rscreen);
578 }
579
580 static void r600_fence_reference(struct pipe_screen *pscreen,
581 struct pipe_fence_handle **ptr,
582 struct pipe_fence_handle *fence)
583 {
584 struct r600_fence **oldf = (struct r600_fence**)ptr;
585 struct r600_fence *newf = (struct r600_fence*)fence;
586
587 if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
588 struct r600_pipe_context *ctx = (*oldf)->ctx;
589 LIST_ADDTAIL(&(*oldf)->head, &ctx->fences.pool);
590 }
591
592 *ptr = fence;
593 }
594
595 static boolean r600_fence_signalled(struct pipe_screen *pscreen,
596 struct pipe_fence_handle *fence)
597 {
598 struct r600_fence *rfence = (struct r600_fence*)fence;
599 struct r600_pipe_context *ctx = rfence->ctx;
600
601 return ctx->fences.data[rfence->index];
602 }
603
604 static boolean r600_fence_finish(struct pipe_screen *pscreen,
605 struct pipe_fence_handle *fence,
606 uint64_t timeout)
607 {
608 struct r600_fence *rfence = (struct r600_fence*)fence;
609 struct r600_pipe_context *ctx = rfence->ctx;
610 int64_t start_time = 0;
611 unsigned spins = 0;
612
613 if (timeout != PIPE_TIMEOUT_INFINITE) {
614 start_time = os_time_get();
615
616 /* Convert to microseconds. */
617 timeout /= 1000;
618 }
619
620 while (ctx->fences.data[rfence->index] == 0) {
621 if (++spins % 256)
622 continue;
623 #ifdef PIPE_OS_UNIX
624 sched_yield();
625 #else
626 os_time_sleep(10);
627 #endif
628 if (timeout != PIPE_TIMEOUT_INFINITE &&
629 os_time_get() - start_time >= timeout) {
630 return FALSE;
631 }
632 }
633
634 return TRUE;
635 }
636
637 struct pipe_screen *r600_screen_create(struct radeon *radeon)
638 {
639 struct r600_screen *rscreen;
640
641 rscreen = CALLOC_STRUCT(r600_screen);
642 if (rscreen == NULL) {
643 return NULL;
644 }
645
646 rscreen->radeon = radeon;
647 rscreen->screen.winsys = (struct pipe_winsys*)radeon;
648 rscreen->screen.destroy = r600_destroy_screen;
649 rscreen->screen.get_name = r600_get_name;
650 rscreen->screen.get_vendor = r600_get_vendor;
651 rscreen->screen.get_param = r600_get_param;
652 rscreen->screen.get_shader_param = r600_get_shader_param;
653 rscreen->screen.get_paramf = r600_get_paramf;
654 rscreen->screen.is_format_supported = r600_is_format_supported;
655 rscreen->screen.context_create = r600_create_context;
656 rscreen->screen.fence_reference = r600_fence_reference;
657 rscreen->screen.fence_signalled = r600_fence_signalled;
658 rscreen->screen.fence_finish = r600_fence_finish;
659 r600_init_screen_resource_functions(&rscreen->screen);
660
661 rscreen->tiling_info = r600_get_tiling_info(radeon);
662 util_format_s3tc_init();
663
664 util_slab_create(&rscreen->pool_buffers,
665 sizeof(struct r600_resource_buffer), 64,
666 UTIL_SLAB_SINGLETHREADED);
667
668 pipe_mutex_init(rscreen->mutex_num_contexts);
669
670 return &rscreen->screen;
671 }