1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/u_debug_stack.h"
32 #include "util/u_debug_flush.h"
33 #include "util/u_hash_table.h"
34 #include "pipebuffer/pb_buffer.h"
35 #include "pipebuffer/pb_validate.h"
37 #include "svga_winsys.h"
38 #include "vmw_context.h"
39 #include "vmw_screen.h"
40 #include "vmw_buffer.h"
41 #include "vmw_surface.h"
42 #include "vmw_fence.h"
43 #include "vmw_shader.h"
44 #include "vmw_query.h"
46 #define VMW_COMMAND_SIZE (64*1024)
47 #define VMW_SURFACE_RELOCS (1024)
48 #define VMW_SHADER_RELOCS (1024)
49 #define VMW_REGION_RELOCS (512)
51 #define VMW_MUST_FLUSH_STACK 8
54 * A factor applied to the maximum mob memory size to determine
55 * the optimial time to preemptively flush the command buffer.
56 * The constant is based on some performance trials with SpecViewperf.
58 #define VMW_MAX_MOB_MEM_FACTOR 2
61 * A factor applied to the maximum surface memory size to determine
62 * the optimial time to preemptively flush the command buffer.
63 * The constant is based on some performance trials with SpecViewperf.
65 #define VMW_MAX_SURF_MEM_FACTOR 2
68 struct vmw_buffer_relocation
70 struct pb_buffer
*buffer
;
76 struct SVGAGuestPtr
*where
;
80 uint32
*offset_into_mob
;
85 struct vmw_ctx_validate_item
{
87 struct vmw_svga_winsys_surface
*vsurf
;
88 struct vmw_svga_winsys_shader
*vshader
;
93 struct vmw_svga_winsys_context
95 struct svga_winsys_context base
;
97 struct vmw_winsys_screen
*vws
;
98 struct util_hash_table
*hash
;
102 struct debug_stack_frame must_flush_stack
[VMW_MUST_FLUSH_STACK
];
103 struct debug_flush_ctx
*fctx
;
107 uint8_t buffer
[VMW_COMMAND_SIZE
];
114 struct vmw_ctx_validate_item items
[VMW_SURFACE_RELOCS
];
122 struct vmw_buffer_relocation relocs
[VMW_REGION_RELOCS
];
130 struct vmw_ctx_validate_item items
[VMW_SHADER_RELOCS
];
137 struct pb_validate
*validate
;
140 * The amount of surface, GMR or MOB memory that is referred by the commands
141 * currently batched in the context command buffer.
143 uint64_t seen_surfaces
;
144 uint64_t seen_regions
;
148 * Whether this context should fail to reserve more commands, not because it
149 * ran out of command space, but because a substantial ammount of GMR was
152 boolean preemptive_flush
;
156 static inline struct vmw_svga_winsys_context
*
157 vmw_svga_winsys_context(struct svga_winsys_context
*swc
)
160 return (struct vmw_svga_winsys_context
*)swc
;
164 static inline enum pb_usage_flags
165 vmw_translate_to_pb_flags(unsigned flags
)
167 enum pb_usage_flags f
= 0;
168 if (flags
& SVGA_RELOC_READ
)
169 f
|= PB_USAGE_GPU_READ
;
171 if (flags
& SVGA_RELOC_WRITE
)
172 f
|= PB_USAGE_GPU_WRITE
;
177 static enum pipe_error
178 vmw_swc_flush(struct svga_winsys_context
*swc
,
179 struct pipe_fence_handle
**pfence
)
181 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
182 struct vmw_winsys_screen
*vws
= vswc
->vws
;
183 struct pipe_fence_handle
*fence
= NULL
;
188 * If we hit a retry, lock the mutex and retry immediately.
189 * If we then still hit a retry, sleep until another thread
190 * wakes us up after it has released its buffers from the
193 * If we hit another error condition, we still need to broadcast since
194 * pb_validate_validate releases validated buffers in its error path.
197 ret
= pb_validate_validate(vswc
->validate
);
198 if (ret
!= PIPE_OK
) {
199 mtx_lock(&vws
->cs_mutex
);
200 while (ret
== PIPE_ERROR_RETRY
) {
201 ret
= pb_validate_validate(vswc
->validate
);
202 if (ret
== PIPE_ERROR_RETRY
) {
203 cnd_wait(&vws
->cs_cond
, &vws
->cs_mutex
);
206 if (ret
!= PIPE_OK
) {
207 cnd_broadcast(&vws
->cs_cond
);
209 mtx_unlock(&vws
->cs_mutex
);
212 assert(ret
== PIPE_OK
);
215 /* Apply relocations */
216 for(i
= 0; i
< vswc
->region
.used
; ++i
) {
217 struct vmw_buffer_relocation
*reloc
= &vswc
->region
.relocs
[i
];
218 struct SVGAGuestPtr ptr
;
220 if(!vmw_gmr_bufmgr_region_ptr(reloc
->buffer
, &ptr
))
223 ptr
.offset
+= reloc
->offset
;
227 *reloc
->mob
.id
= ptr
.gmrId
;
228 if (reloc
->mob
.offset_into_mob
)
229 *reloc
->mob
.offset_into_mob
= ptr
.offset
;
231 assert(ptr
.offset
== 0);
234 *reloc
->region
.where
= ptr
;
237 if (vswc
->command
.used
|| pfence
!= NULL
)
238 vmw_ioctl_command(vws
,
241 vswc
->command
.buffer
,
244 vswc
->base
.imported_fence_fd
,
247 pb_validate_fence(vswc
->validate
, fence
);
248 mtx_lock(&vws
->cs_mutex
);
249 cnd_broadcast(&vws
->cs_cond
);
250 mtx_unlock(&vws
->cs_mutex
);
253 vswc
->command
.used
= 0;
254 vswc
->command
.reserved
= 0;
256 for(i
= 0; i
< vswc
->surface
.used
+ vswc
->surface
.staged
; ++i
) {
257 struct vmw_ctx_validate_item
*isurf
= &vswc
->surface
.items
[i
];
258 if (isurf
->referenced
)
259 p_atomic_dec(&isurf
->vsurf
->validated
);
260 vmw_svga_winsys_surface_reference(&isurf
->vsurf
, NULL
);
263 util_hash_table_clear(vswc
->hash
);
264 vswc
->surface
.used
= 0;
265 vswc
->surface
.reserved
= 0;
267 for(i
= 0; i
< vswc
->shader
.used
+ vswc
->shader
.staged
; ++i
) {
268 struct vmw_ctx_validate_item
*ishader
= &vswc
->shader
.items
[i
];
269 if (ishader
->referenced
)
270 p_atomic_dec(&ishader
->vshader
->validated
);
271 vmw_svga_winsys_shader_reference(&ishader
->vshader
, NULL
);
274 vswc
->shader
.used
= 0;
275 vswc
->shader
.reserved
= 0;
277 vswc
->region
.used
= 0;
278 vswc
->region
.reserved
= 0;
281 vswc
->must_flush
= FALSE
;
282 debug_flush_flush(vswc
->fctx
);
284 swc
->hints
&= ~SVGA_HINT_FLAG_CAN_PRE_FLUSH
;
285 swc
->hints
&= ~SVGA_HINT_FLAG_EXPORT_FENCE_FD
;
286 vswc
->preemptive_flush
= FALSE
;
287 vswc
->seen_surfaces
= 0;
288 vswc
->seen_regions
= 0;
291 if (vswc
->base
.imported_fence_fd
!= -1) {
292 close(vswc
->base
.imported_fence_fd
);
293 vswc
->base
.imported_fence_fd
= -1;
297 vmw_fence_reference(vswc
->vws
, pfence
, fence
);
299 vmw_fence_reference(vswc
->vws
, &fence
, NULL
);
306 vmw_swc_reserve(struct svga_winsys_context
*swc
,
307 uint32_t nr_bytes
, uint32_t nr_relocs
)
309 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
312 /* Check if somebody forgot to check the previous failure */
313 if(vswc
->must_flush
) {
314 debug_printf("Forgot to flush:\n");
315 debug_backtrace_dump(vswc
->must_flush_stack
, VMW_MUST_FLUSH_STACK
);
316 assert(!vswc
->must_flush
);
318 debug_flush_might_flush(vswc
->fctx
);
321 assert(nr_bytes
<= vswc
->command
.size
);
322 if(nr_bytes
> vswc
->command
.size
)
325 if(vswc
->preemptive_flush
||
326 vswc
->command
.used
+ nr_bytes
> vswc
->command
.size
||
327 vswc
->surface
.used
+ nr_relocs
> vswc
->surface
.size
||
328 vswc
->shader
.used
+ nr_relocs
> vswc
->shader
.size
||
329 vswc
->region
.used
+ nr_relocs
> vswc
->region
.size
) {
331 vswc
->must_flush
= TRUE
;
332 debug_backtrace_capture(vswc
->must_flush_stack
, 1,
333 VMW_MUST_FLUSH_STACK
);
338 assert(vswc
->command
.used
+ nr_bytes
<= vswc
->command
.size
);
339 assert(vswc
->surface
.used
+ nr_relocs
<= vswc
->surface
.size
);
340 assert(vswc
->shader
.used
+ nr_relocs
<= vswc
->shader
.size
);
341 assert(vswc
->region
.used
+ nr_relocs
<= vswc
->region
.size
);
343 vswc
->command
.reserved
= nr_bytes
;
344 vswc
->surface
.reserved
= nr_relocs
;
345 vswc
->surface
.staged
= 0;
346 vswc
->shader
.reserved
= nr_relocs
;
347 vswc
->shader
.staged
= 0;
348 vswc
->region
.reserved
= nr_relocs
;
349 vswc
->region
.staged
= 0;
351 return vswc
->command
.buffer
+ vswc
->command
.used
;
355 vmw_swc_get_command_buffer_size(struct svga_winsys_context
*swc
)
357 const struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
358 return vswc
->command
.used
;
362 vmw_swc_context_relocation(struct svga_winsys_context
*swc
,
369 vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context
*vswc
,
370 struct pb_buffer
*pb_buf
,
373 MAYBE_UNUSED
enum pipe_error ret
;
374 unsigned translated_flags
;
375 boolean already_present
;
377 translated_flags
= vmw_translate_to_pb_flags(flags
);
378 ret
= pb_validate_add_buffer(vswc
->validate
, pb_buf
, translated_flags
,
379 vswc
->hash
, &already_present
);
380 assert(ret
== PIPE_OK
);
381 return !already_present
;
385 vmw_swc_region_relocation(struct svga_winsys_context
*swc
,
386 struct SVGAGuestPtr
*where
,
387 struct svga_winsys_buffer
*buffer
,
391 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
392 struct vmw_buffer_relocation
*reloc
;
394 assert(vswc
->region
.staged
< vswc
->region
.reserved
);
396 reloc
= &vswc
->region
.relocs
[vswc
->region
.used
+ vswc
->region
.staged
];
397 reloc
->region
.where
= where
;
400 * pb_validate holds a refcount to the buffer, so no need to
401 * refcount it again in the relocation.
403 reloc
->buffer
= vmw_pb_buffer(buffer
);
404 reloc
->offset
= offset
;
405 reloc
->is_mob
= FALSE
;
406 ++vswc
->region
.staged
;
408 if (vmw_swc_add_validate_buffer(vswc
, reloc
->buffer
, flags
)) {
409 vswc
->seen_regions
+= reloc
->buffer
->size
;
410 if ((swc
->hints
& SVGA_HINT_FLAG_CAN_PRE_FLUSH
) &&
411 vswc
->seen_regions
>= VMW_GMR_POOL_SIZE
/5)
412 vswc
->preemptive_flush
= TRUE
;
416 if (!(flags
& SVGA_RELOC_INTERNAL
))
417 debug_flush_cb_reference(vswc
->fctx
, vmw_debug_flush_buf(buffer
));
422 vmw_swc_mob_relocation(struct svga_winsys_context
*swc
,
424 uint32
*offset_into_mob
,
425 struct svga_winsys_buffer
*buffer
,
429 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
430 struct vmw_buffer_relocation
*reloc
;
431 struct pb_buffer
*pb_buffer
= vmw_pb_buffer(buffer
);
434 assert(vswc
->region
.staged
< vswc
->region
.reserved
);
436 reloc
= &vswc
->region
.relocs
[vswc
->region
.used
+ vswc
->region
.staged
];
438 reloc
->mob
.offset_into_mob
= offset_into_mob
;
441 * pb_validate holds a refcount to the buffer, so no need to
442 * refcount it again in the relocation.
444 reloc
->buffer
= pb_buffer
;
445 reloc
->offset
= offset
;
446 reloc
->is_mob
= TRUE
;
447 ++vswc
->region
.staged
;
450 if (vmw_swc_add_validate_buffer(vswc
, pb_buffer
, flags
)) {
451 vswc
->seen_mobs
+= pb_buffer
->size
;
453 if ((swc
->hints
& SVGA_HINT_FLAG_CAN_PRE_FLUSH
) &&
455 vswc
->vws
->ioctl
.max_mob_memory
/ VMW_MAX_MOB_MEM_FACTOR
)
456 vswc
->preemptive_flush
= TRUE
;
460 if (!(flags
& SVGA_RELOC_INTERNAL
))
461 debug_flush_cb_reference(vswc
->fctx
, vmw_debug_flush_buf(buffer
));
467 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
469 * @swc: Pointer to an svga_winsys_context
470 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
473 * This is primarily used by a discard surface map to indicate that the
474 * surface data is no longer referenced by a draw call, and mapping it
475 * should therefore no longer cause a flush.
478 vmw_swc_surface_clear_reference(struct svga_winsys_context
*swc
,
479 struct vmw_svga_winsys_surface
*vsurf
)
481 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
482 struct vmw_ctx_validate_item
*isrf
=
483 util_hash_table_get(vswc
->hash
, vsurf
);
485 if (isrf
&& isrf
->referenced
) {
486 isrf
->referenced
= FALSE
;
487 p_atomic_dec(&vsurf
->validated
);
492 vmw_swc_surface_only_relocation(struct svga_winsys_context
*swc
,
494 struct vmw_svga_winsys_surface
*vsurf
,
497 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
498 struct vmw_ctx_validate_item
*isrf
;
500 assert(vswc
->surface
.staged
< vswc
->surface
.reserved
);
501 isrf
= util_hash_table_get(vswc
->hash
, vsurf
);
504 isrf
= &vswc
->surface
.items
[vswc
->surface
.used
+ vswc
->surface
.staged
];
505 vmw_svga_winsys_surface_reference(&isrf
->vsurf
, vsurf
);
506 isrf
->referenced
= FALSE
;
508 * Note that a failure here may just fall back to unhashed behavior
509 * and potentially cause unnecessary flushing, so ignore the
512 (void) util_hash_table_set(vswc
->hash
, vsurf
, isrf
);
513 ++vswc
->surface
.staged
;
515 vswc
->seen_surfaces
+= vsurf
->size
;
516 if ((swc
->hints
& SVGA_HINT_FLAG_CAN_PRE_FLUSH
) &&
517 vswc
->seen_surfaces
>=
518 vswc
->vws
->ioctl
.max_surface_memory
/ VMW_MAX_SURF_MEM_FACTOR
)
519 vswc
->preemptive_flush
= TRUE
;
522 if (!(flags
& SVGA_RELOC_INTERNAL
) && !isrf
->referenced
) {
523 isrf
->referenced
= TRUE
;
524 p_atomic_inc(&vsurf
->validated
);
532 vmw_swc_surface_relocation(struct svga_winsys_context
*swc
,
535 struct svga_winsys_surface
*surface
,
538 struct vmw_svga_winsys_surface
*vsurf
;
540 assert(swc
->have_gb_objects
|| mobid
== NULL
);
543 *where
= SVGA3D_INVALID_ID
;
545 *mobid
= SVGA3D_INVALID_ID
;
549 vsurf
= vmw_svga_winsys_surface(surface
);
550 vmw_swc_surface_only_relocation(swc
, where
, vsurf
, flags
);
552 if (swc
->have_gb_objects
&& vsurf
->buf
!= NULL
) {
555 * Make sure backup buffer ends up fenced.
558 mtx_lock(&vsurf
->mutex
);
559 assert(vsurf
->buf
!= NULL
);
562 * An internal reloc means that the surface transfer direction
563 * is opposite to the MOB transfer direction...
565 if ((flags
& SVGA_RELOC_INTERNAL
) &&
566 (flags
& (SVGA_RELOC_READ
| SVGA_RELOC_WRITE
)) !=
567 (SVGA_RELOC_READ
| SVGA_RELOC_WRITE
))
568 flags
^= (SVGA_RELOC_READ
| SVGA_RELOC_WRITE
);
569 vmw_swc_mob_relocation(swc
, mobid
, NULL
, (struct svga_winsys_buffer
*)
570 vsurf
->buf
, 0, flags
);
571 mtx_unlock(&vsurf
->mutex
);
576 vmw_swc_shader_relocation(struct svga_winsys_context
*swc
,
580 struct svga_winsys_gb_shader
*shader
,
583 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
584 struct vmw_winsys_screen
*vws
= vswc
->vws
;
585 struct vmw_svga_winsys_shader
*vshader
;
586 struct vmw_ctx_validate_item
*ishader
;
589 *shid
= SVGA3D_INVALID_ID
;
593 vshader
= vmw_svga_winsys_shader(shader
);
595 if (!vws
->base
.have_vgpu10
) {
596 assert(vswc
->shader
.staged
< vswc
->shader
.reserved
);
597 ishader
= util_hash_table_get(vswc
->hash
, vshader
);
599 if (ishader
== NULL
) {
600 ishader
= &vswc
->shader
.items
[vswc
->shader
.used
+ vswc
->shader
.staged
];
601 vmw_svga_winsys_shader_reference(&ishader
->vshader
, vshader
);
602 ishader
->referenced
= FALSE
;
604 * Note that a failure here may just fall back to unhashed behavior
605 * and potentially cause unnecessary flushing, so ignore the
608 (void) util_hash_table_set(vswc
->hash
, vshader
, ishader
);
609 ++vswc
->shader
.staged
;
612 if (!ishader
->referenced
) {
613 ishader
->referenced
= TRUE
;
614 p_atomic_inc(&vshader
->validated
);
619 *shid
= vshader
->shid
;
622 vmw_swc_mob_relocation(swc
, mobid
, offset
, vshader
->buf
,
627 vmw_swc_query_relocation(struct svga_winsys_context
*swc
,
629 struct svga_winsys_gb_query
*query
)
631 /* Queries are backed by one big MOB */
632 vmw_swc_mob_relocation(swc
, id
, NULL
, query
->buf
, 0,
633 SVGA_RELOC_READ
| SVGA_RELOC_WRITE
);
637 vmw_swc_commit(struct svga_winsys_context
*swc
)
639 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
641 assert(vswc
->command
.used
+ vswc
->command
.reserved
<= vswc
->command
.size
);
642 vswc
->command
.used
+= vswc
->command
.reserved
;
643 vswc
->command
.reserved
= 0;
645 assert(vswc
->surface
.staged
<= vswc
->surface
.reserved
);
646 assert(vswc
->surface
.used
+ vswc
->surface
.staged
<= vswc
->surface
.size
);
647 vswc
->surface
.used
+= vswc
->surface
.staged
;
648 vswc
->surface
.staged
= 0;
649 vswc
->surface
.reserved
= 0;
651 assert(vswc
->shader
.staged
<= vswc
->shader
.reserved
);
652 assert(vswc
->shader
.used
+ vswc
->shader
.staged
<= vswc
->shader
.size
);
653 vswc
->shader
.used
+= vswc
->shader
.staged
;
654 vswc
->shader
.staged
= 0;
655 vswc
->shader
.reserved
= 0;
657 assert(vswc
->region
.staged
<= vswc
->region
.reserved
);
658 assert(vswc
->region
.used
+ vswc
->region
.staged
<= vswc
->region
.size
);
659 vswc
->region
.used
+= vswc
->region
.staged
;
660 vswc
->region
.staged
= 0;
661 vswc
->region
.reserved
= 0;
666 vmw_swc_destroy(struct svga_winsys_context
*swc
)
668 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
671 for(i
= 0; i
< vswc
->surface
.used
; ++i
) {
672 struct vmw_ctx_validate_item
*isurf
= &vswc
->surface
.items
[i
];
673 if (isurf
->referenced
)
674 p_atomic_dec(&isurf
->vsurf
->validated
);
675 vmw_svga_winsys_surface_reference(&isurf
->vsurf
, NULL
);
678 for(i
= 0; i
< vswc
->shader
.used
; ++i
) {
679 struct vmw_ctx_validate_item
*ishader
= &vswc
->shader
.items
[i
];
680 if (ishader
->referenced
)
681 p_atomic_dec(&ishader
->vshader
->validated
);
682 vmw_svga_winsys_shader_reference(&ishader
->vshader
, NULL
);
685 util_hash_table_destroy(vswc
->hash
);
686 pb_validate_destroy(vswc
->validate
);
687 vmw_ioctl_context_destroy(vswc
->vws
, swc
->cid
);
689 debug_flush_ctx_destroy(vswc
->fctx
);
694 static unsigned vmw_hash_ptr(void *p
)
696 return (unsigned)(unsigned long)p
;
699 static int vmw_ptr_compare(void *key1
, void *key2
)
701 return (key1
== key2
) ? 0 : 1;
706 * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
708 * @swc: The winsys context.
709 * @shaderId: Previously allocated shader id.
710 * @shaderType: The shader type.
711 * @bytecode: The shader bytecode
712 * @bytecodelen: The length of the bytecode.
714 * Creates an svga_winsys_gb_shader structure and allocates a buffer for the
715 * shader code and copies the shader code into the buffer. Shader
716 * resource creation is not done.
718 static struct svga_winsys_gb_shader
*
719 vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context
*swc
,
721 SVGA3dShaderType shaderType
,
722 const uint32
*bytecode
,
725 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
726 struct vmw_svga_winsys_shader
*shader
;
727 struct svga_winsys_gb_shader
*gb_shader
=
728 vmw_svga_winsys_shader_create(&vswc
->vws
->base
, shaderType
, bytecode
,
733 shader
= vmw_svga_winsys_shader(gb_shader
);
734 shader
->shid
= shaderId
;
740 * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
742 * @swc: The winsys context.
743 * @shader: A shader structure previously allocated by shader_create.
745 * Frees the shader structure and the buffer holding the shader code.
748 vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context
*swc
,
749 struct svga_winsys_gb_shader
*shader
)
751 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
753 vmw_svga_winsys_shader_destroy(&vswc
->vws
->base
, shader
);
757 * vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
759 * @swc: The winsys context.
760 * @surface: The surface to be referenced.
761 * @shader: The shader to be referenced.
762 * @flags: Relocation flags.
764 * This callback is needed because shader backing buffers are sub-allocated, and
765 * hence the kernel fencing is not sufficient. The buffers need to be put on
766 * the context's validation list and fenced after command submission to avoid
767 * reuse of busy shader buffers. In addition, surfaces need to be put on the
768 * validation list in order for the driver to regard them as referenced
769 * by the command stream.
771 static enum pipe_error
772 vmw_svga_winsys_resource_rebind(struct svga_winsys_context
*swc
,
773 struct svga_winsys_surface
*surface
,
774 struct svga_winsys_gb_shader
*shader
,
778 * Need to reserve one validation item for either the surface or
781 if (!vmw_swc_reserve(swc
, 0, 1))
782 return PIPE_ERROR_OUT_OF_MEMORY
;
785 vmw_swc_surface_relocation(swc
, NULL
, NULL
, surface
, flags
);
787 vmw_swc_shader_relocation(swc
, NULL
, NULL
, NULL
, shader
, flags
);
794 struct svga_winsys_context
*
795 vmw_svga_winsys_context_create(struct svga_winsys_screen
*sws
)
797 struct vmw_winsys_screen
*vws
= vmw_winsys_screen(sws
);
798 struct vmw_svga_winsys_context
*vswc
;
800 vswc
= CALLOC_STRUCT(vmw_svga_winsys_context
);
804 vswc
->base
.destroy
= vmw_swc_destroy
;
805 vswc
->base
.reserve
= vmw_swc_reserve
;
806 vswc
->base
.get_command_buffer_size
= vmw_swc_get_command_buffer_size
;
807 vswc
->base
.surface_relocation
= vmw_swc_surface_relocation
;
808 vswc
->base
.region_relocation
= vmw_swc_region_relocation
;
809 vswc
->base
.mob_relocation
= vmw_swc_mob_relocation
;
810 vswc
->base
.query_relocation
= vmw_swc_query_relocation
;
811 vswc
->base
.query_bind
= vmw_swc_query_bind
;
812 vswc
->base
.context_relocation
= vmw_swc_context_relocation
;
813 vswc
->base
.shader_relocation
= vmw_swc_shader_relocation
;
814 vswc
->base
.commit
= vmw_swc_commit
;
815 vswc
->base
.flush
= vmw_swc_flush
;
816 vswc
->base
.surface_map
= vmw_svga_winsys_surface_map
;
817 vswc
->base
.surface_unmap
= vmw_svga_winsys_surface_unmap
;
819 vswc
->base
.shader_create
= vmw_svga_winsys_vgpu10_shader_create
;
820 vswc
->base
.shader_destroy
= vmw_svga_winsys_vgpu10_shader_destroy
;
822 vswc
->base
.resource_rebind
= vmw_svga_winsys_resource_rebind
;
824 if (sws
->have_vgpu10
)
825 vswc
->base
.cid
= vmw_ioctl_extended_context_create(vws
, sws
->have_vgpu10
);
827 vswc
->base
.cid
= vmw_ioctl_context_create(vws
);
829 if (vswc
->base
.cid
== -1)
832 vswc
->base
.imported_fence_fd
= -1;
834 vswc
->base
.have_gb_objects
= sws
->have_gb_objects
;
838 vswc
->command
.size
= VMW_COMMAND_SIZE
;
839 vswc
->surface
.size
= VMW_SURFACE_RELOCS
;
840 vswc
->shader
.size
= VMW_SHADER_RELOCS
;
841 vswc
->region
.size
= VMW_REGION_RELOCS
;
843 vswc
->validate
= pb_validate_create();
845 goto out_no_validate
;
847 vswc
->hash
= util_hash_table_create(vmw_hash_ptr
, vmw_ptr_compare
);
852 vswc
->fctx
= debug_flush_ctx_create(TRUE
, VMW_DEBUG_FLUSH_STACK
);
855 vswc
->base
.force_coherent
= vws
->force_coherent
;
859 pb_validate_destroy(vswc
->validate
);
861 vmw_ioctl_context_destroy(vws
, vswc
->base
.cid
);