*/
static enum pipe_error
define_gb_shader_vgpu9(struct svga_context *svga,
- SVGA3dShaderType type,
struct svga_shader_variant *variant,
unsigned codeLen)
{
* Kernel module will allocate an id for the shader and issue
* the DefineGBShader command.
*/
- variant->gb_shader = sws->shader_create(sws, type,
+ variant->gb_shader = sws->shader_create(sws, variant->type,
variant->tokens, codeLen);
if (!variant->gb_shader)
*/
static enum pipe_error
define_gb_shader_vgpu10(struct svga_context *svga,
- SVGA3dShaderType type,
struct svga_shader_variant *variant,
unsigned codeLen)
{
/* Create gb memory for the shader and upload the shader code */
variant->gb_shader = swc->shader_create(swc,
- variant->id, type,
+ variant->id, variant->type,
variant->tokens, codeLen);
if (!variant->gb_shader) {
* the shader creation and return an error.
*/
ret = SVGA3D_vgpu10_DefineAndBindShader(swc, variant->gb_shader,
- variant->id, type, codeLen);
+ variant->id, variant->type, codeLen);
if (ret != PIPE_OK)
goto fail;
*/
enum pipe_error
svga_define_shader(struct svga_context *svga,
- SVGA3dShaderType type,
struct svga_shader_variant *variant)
{
unsigned codeLen = variant->nr_tokens * sizeof(variant->tokens[0]);
if (svga_have_gb_objects(svga)) {
if (svga_have_vgpu10(svga))
- ret = define_gb_shader_vgpu10(svga, type, variant, codeLen);
+ ret = define_gb_shader_vgpu10(svga, variant, codeLen);
else
- ret = define_gb_shader_vgpu9(svga, type, variant, codeLen);
+ ret = define_gb_shader_vgpu9(svga, variant, codeLen);
}
else {
/* Allocate an integer ID for the shader */
/* Issue SVGA3D device command to define the shader */
ret = SVGA3D_DefineShader(svga->swc,
variant->id,
- type,
+ variant->type,
variant->tokens,
codeLen);
if (ret != PIPE_OK) {
struct svga_shader_variant *
-svga_new_shader_variant(struct svga_context *svga)
+svga_new_shader_variant(struct svga_context *svga, enum pipe_shader_type type)
{
- svga->hud.num_shaders++;
- return CALLOC_STRUCT(svga_shader_variant);
+ struct svga_shader_variant *variant = CALLOC_STRUCT(svga_shader_variant);
+
+ if (variant) {
+ variant->type = svga_shader_type(type);
+ svga->hud.num_shaders++;
+ }
+ return variant;
}
void
svga_destroy_shader_variant(struct svga_context *svga,
- SVGA3dShaderType type,
struct svga_shader_variant *variant)
{
enum pipe_error ret = PIPE_OK;
}
else {
if (variant->id != UTIL_BITMASK_INVALID_INDEX) {
- ret = SVGA3D_DestroyShader(svga->swc, variant->id, type);
+ ret = SVGA3D_DestroyShader(svga->swc, variant->id, variant->type);
if (ret != PIPE_OK) {
/* flush and try again */
svga_context_flush(svga, NULL);
- ret = SVGA3D_DestroyShader(svga->swc, variant->id, type);
+ ret = SVGA3D_DestroyShader(svga->swc, variant->id, variant->type);
assert(ret == PIPE_OK);
}
util_bitmask_clear(svga->shader_id_bm, variant->id);
(unsigned) (variant->nr_tokens
* sizeof(variant->tokens[0])));
/* Free the too-large variant */
- svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_PS, variant);
+ svga_destroy_shader_variant(svga, variant);
/* Use simple pass-through shader instead */
variant = get_compiled_dummy_shader(svga, fs, key);
}
return PIPE_ERROR;
}
- ret = svga_define_shader(svga, SVGA3D_SHADERTYPE_PS, variant);
+ ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
- svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_PS, variant);
+ svga_destroy_shader_variant(svga, variant);
return ret;
}
(unsigned) (variant->nr_tokens
* sizeof(variant->tokens[0])));
/* Free the too-large variant */
- svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
+ svga_destroy_shader_variant(svga, variant);
/* Use simple pass-through shader instead */
variant = get_compiled_dummy_vertex_shader(svga, vs, key);
}
return PIPE_ERROR;
}
- ret = svga_define_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
+ ret = svga_define_shader(svga, variant);
if (ret != PIPE_OK) {
- svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
+ svga_destroy_shader_variant(svga, variant);
return ret;
}