2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "pipe/p_defines.h"
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28 #include "pipe/p_screen.h"
29 #include "util/u_atomic.h"
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "intel/compiler/brw_compiler.h"
33 #include "intel/compiler/brw_nir.h"
34 #include "iris_context.h"
37 get_new_program_id(struct iris_screen
*screen
)
39 return p_atomic_inc_return(&screen
->program_id
);
42 struct iris_uncompiled_shader
{
43 struct pipe_shader_state base
;
47 // XXX: need unify_interfaces() at link time...
50 iris_create_shader_state(struct pipe_context
*ctx
,
51 const struct pipe_shader_state
*state
)
53 //struct iris_context *ice = (struct iris_context *)ctx;
54 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
56 assert(state
->type
== PIPE_SHADER_IR_NIR
);
58 nir_shader
*nir
= state
->ir
.nir
;
60 struct iris_uncompiled_shader
*ish
=
61 calloc(1, sizeof(struct iris_uncompiled_shader
));
65 nir
= brw_preprocess_nir(screen
->compiler
, nir
);
67 ish
->program_id
= get_new_program_id(screen
);
68 ish
->base
.type
= PIPE_SHADER_IR_NIR
;
69 ish
->base
.ir
.nir
= nir
;
70 memcpy(&ish
->base
.stream_output
, &state
->stream_output
,
71 sizeof(struct pipe_stream_output_info
));
77 iris_delete_shader_state(struct pipe_context
*ctx
, void *state
)
79 struct iris_uncompiled_shader
*ish
= state
;
81 ralloc_free(ish
->base
.ir
.nir
);
86 iris_bind_vs_state(struct pipe_context
*ctx
, void *state
)
88 struct iris_context
*ice
= (struct iris_context
*)ctx
;
90 ice
->shaders
.uncompiled
[MESA_SHADER_VERTEX
] = state
;
91 ice
->state
.dirty
|= IRIS_DIRTY_UNCOMPILED_VS
;
95 iris_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
97 struct iris_context
*ice
= (struct iris_context
*)ctx
;
99 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_CTRL
] = state
;
100 ice
->state
.dirty
|= IRIS_DIRTY_UNCOMPILED_TCS
;
104 iris_bind_tes_state(struct pipe_context
*ctx
, void *state
)
106 struct iris_context
*ice
= (struct iris_context
*)ctx
;
108 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
])
109 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
111 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
] = state
;
112 ice
->state
.dirty
|= IRIS_DIRTY_UNCOMPILED_TES
;
116 iris_bind_gs_state(struct pipe_context
*ctx
, void *state
)
118 struct iris_context
*ice
= (struct iris_context
*)ctx
;
120 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
])
121 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
123 ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
] = state
;
124 ice
->state
.dirty
|= IRIS_DIRTY_UNCOMPILED_GS
;
128 iris_bind_fs_state(struct pipe_context
*ctx
, void *state
)
130 struct iris_context
*ice
= (struct iris_context
*)ctx
;
132 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
] = state
;
133 ice
->state
.dirty
|= IRIS_DIRTY_UNCOMPILED_FS
;
137 * Sets up the starting offsets for the groups of binding table entries
138 * common to all pipeline stages.
140 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
141 * unused but also make sure that addition of small offsets to them will
142 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
145 assign_common_binding_table_offsets(const struct gen_device_info
*devinfo
,
146 const struct nir_shader
*nir
,
147 struct brw_stage_prog_data
*prog_data
,
148 uint32_t next_binding_table_offset
)
150 const struct shader_info
*info
= &nir
->info
;
152 if (info
->num_textures
) {
153 prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
154 prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
155 next_binding_table_offset
+= info
->num_textures
;
157 prog_data
->binding_table
.texture_start
= 0xd0d0d0d0;
158 prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
161 int num_ubos
= info
->num_ubos
+ (nir
->num_uniforms
> 0 ? 1 : 0);
164 //assert(info->num_ubos <= BRW_MAX_UBO);
165 prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
166 next_binding_table_offset
+= num_ubos
;
168 prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
171 if (info
->num_ssbos
|| info
->num_abos
) {
172 //assert(info->num_abos <= BRW_MAX_ABO);
173 //assert(info->num_ssbos <= BRW_MAX_SSBO);
174 prog_data
->binding_table
.ssbo_start
= next_binding_table_offset
;
175 next_binding_table_offset
+= info
->num_abos
+ info
->num_ssbos
;
177 prog_data
->binding_table
.ssbo_start
= 0xd0d0d0d0;
180 prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
182 if (info
->num_images
) {
183 prog_data
->binding_table
.image_start
= next_binding_table_offset
;
184 next_binding_table_offset
+= info
->num_images
;
186 prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
189 /* This may or may not be used depending on how the compile goes. */
190 prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
191 next_binding_table_offset
++;
193 /* Plane 0 is just the regular texture section */
194 prog_data
->binding_table
.plane_start
[0] = prog_data
->binding_table
.texture_start
;
196 prog_data
->binding_table
.plane_start
[1] = next_binding_table_offset
;
197 next_binding_table_offset
+= info
->num_textures
;
199 prog_data
->binding_table
.plane_start
[2] = next_binding_table_offset
;
200 next_binding_table_offset
+= info
->num_textures
;
202 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
204 //assert(next_binding_table_offset <= BRW_MAX_SURFACES);
205 return next_binding_table_offset
;
209 iris_setup_uniforms(const struct brw_compiler
*compiler
,
212 struct brw_stage_prog_data
*prog_data
)
214 prog_data
->nr_params
= nir
->num_uniforms
;
215 prog_data
->param
= rzalloc_array(mem_ctx
, uint32_t, prog_data
->nr_params
);
217 nir_foreach_variable(var
, &nir
->uniforms
) {
218 const unsigned components
= glsl_get_components(var
->type
);
220 for (unsigned i
= 0; i
< components
; i
++) {
221 prog_data
->param
[var
->data
.driver_location
] =
222 var
->data
.driver_location
;
226 // XXX: vs clip planes?
227 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
231 iris_setup_push_uniform_range(const struct brw_compiler
*compiler
,
232 struct brw_stage_prog_data
*prog_data
)
234 if (prog_data
->nr_params
) {
235 for (int i
= 3; i
> 0; i
--)
236 prog_data
->ubo_ranges
[i
] = prog_data
->ubo_ranges
[i
- 1];
238 prog_data
->ubo_ranges
[0] = (struct brw_ubo_range
) {
241 .length
= DIV_ROUND_UP(prog_data
->nr_params
, 8),
247 iris_compile_vs(struct iris_context
*ice
,
248 struct iris_uncompiled_shader
*ish
,
249 const struct brw_vs_prog_key
*key
)
251 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
252 const struct brw_compiler
*compiler
= screen
->compiler
;
253 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
254 void *mem_ctx
= ralloc_context(NULL
);
255 struct brw_vs_prog_data
*vs_prog_data
=
256 rzalloc(mem_ctx
, struct brw_vs_prog_data
);
257 struct brw_vue_prog_data
*vue_prog_data
= &vs_prog_data
->base
;
258 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
260 assert(ish
->base
.type
== PIPE_SHADER_IR_NIR
);
262 nir_shader
*nir
= ish
->base
.ir
.nir
;
265 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0);
267 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
);
269 brw_compute_vue_map(devinfo
,
270 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
271 nir
->info
.separate_shader
);
273 char *error_str
= NULL
;
274 const unsigned *program
=
275 brw_compile_vs(compiler
, &ice
->dbg
, mem_ctx
, key
, vs_prog_data
,
276 nir
, -1, &error_str
);
277 if (program
== NULL
) {
278 dbg_printf("Failed to compile vertex shader: %s\n", error_str
);
279 ralloc_free(mem_ctx
);
283 iris_setup_push_uniform_range(compiler
, prog_data
);
286 ice
->vtbl
.create_so_decl_list(&ish
->base
.stream_output
,
287 &vue_prog_data
->vue_map
);
289 iris_upload_and_bind_shader(ice
, IRIS_CACHE_VS
, key
, program
, prog_data
,
292 ralloc_free(mem_ctx
);
297 iris_update_compiled_vs(struct iris_context
*ice
)
299 struct iris_uncompiled_shader
*ish
=
300 ice
->shaders
.uncompiled
[MESA_SHADER_VERTEX
];
302 struct brw_vs_prog_key key
= { .program_string_id
= ish
->program_id
};
303 ice
->vtbl
.populate_vs_key(ice
, &key
);
305 if (iris_bind_cached_shader(ice
, IRIS_CACHE_VS
, &key
))
308 UNUSED
bool success
= iris_compile_vs(ice
, ish
, &key
);
312 iris_update_compiled_tcs(struct iris_context
*ice
)
318 iris_compile_tes(struct iris_context
*ice
,
319 struct iris_uncompiled_shader
*ish
,
320 const struct brw_tes_prog_key
*key
)
322 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
323 const struct brw_compiler
*compiler
= screen
->compiler
;
324 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
325 void *mem_ctx
= ralloc_context(NULL
);
326 struct brw_tes_prog_data
*tes_prog_data
=
327 rzalloc(mem_ctx
, struct brw_tes_prog_data
);
328 struct brw_vue_prog_data
*vue_prog_data
= &tes_prog_data
->base
;
329 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
331 assert(ish
->base
.type
== PIPE_SHADER_IR_NIR
);
333 nir_shader
*nir
= ish
->base
.ir
.nir
;
335 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0);
337 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
);
339 struct brw_vue_map input_vue_map
;
340 brw_compute_tess_vue_map(&input_vue_map
, key
->inputs_read
,
341 key
->patch_inputs_read
);
343 char *error_str
= NULL
;
344 const unsigned *program
=
345 brw_compile_tes(compiler
, &ice
->dbg
, mem_ctx
, key
, &input_vue_map
,
346 tes_prog_data
, nir
, NULL
, -1, &error_str
);
347 if (program
== NULL
) {
348 dbg_printf("Failed to compile evaluation shader: %s\n", error_str
);
349 ralloc_free(mem_ctx
);
353 iris_setup_push_uniform_range(compiler
, prog_data
);
356 ice
->vtbl
.create_so_decl_list(&ish
->base
.stream_output
,
357 &vue_prog_data
->vue_map
);
359 iris_upload_and_bind_shader(ice
, IRIS_CACHE_TES
, key
, program
, prog_data
,
362 ralloc_free(mem_ctx
);
367 iris_update_compiled_tes(struct iris_context
*ice
)
369 struct iris_uncompiled_shader
*ish
=
370 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
];
375 struct brw_tes_prog_key key
= { .program_string_id
= ish
->program_id
};
376 ice
->vtbl
.populate_tes_key(ice
, &key
);
378 if (iris_bind_cached_shader(ice
, IRIS_CACHE_TES
, &key
))
381 UNUSED
bool success
= iris_compile_tes(ice
, ish
, &key
);
385 iris_compile_gs(struct iris_context
*ice
,
386 struct iris_uncompiled_shader
*ish
,
387 const struct brw_gs_prog_key
*key
)
389 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
390 const struct brw_compiler
*compiler
= screen
->compiler
;
391 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
392 void *mem_ctx
= ralloc_context(NULL
);
393 struct brw_gs_prog_data
*gs_prog_data
=
394 rzalloc(mem_ctx
, struct brw_gs_prog_data
);
395 struct brw_vue_prog_data
*vue_prog_data
= &gs_prog_data
->base
;
396 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
398 assert(ish
->base
.type
== PIPE_SHADER_IR_NIR
);
400 nir_shader
*nir
= ish
->base
.ir
.nir
;
402 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0);
404 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
);
406 brw_compute_vue_map(devinfo
,
407 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
408 nir
->info
.separate_shader
);
410 char *error_str
= NULL
;
411 const unsigned *program
=
412 brw_compile_gs(compiler
, &ice
->dbg
, mem_ctx
, key
, gs_prog_data
, nir
,
413 NULL
, -1, &error_str
);
414 if (program
== NULL
) {
415 dbg_printf("Failed to compile geometry shader: %s\n", error_str
);
416 ralloc_free(mem_ctx
);
420 iris_setup_push_uniform_range(compiler
, prog_data
);
423 ice
->vtbl
.create_so_decl_list(&ish
->base
.stream_output
,
424 &vue_prog_data
->vue_map
);
426 iris_upload_and_bind_shader(ice
, IRIS_CACHE_GS
, key
, program
, prog_data
,
429 ralloc_free(mem_ctx
);
435 iris_update_compiled_gs(struct iris_context
*ice
)
437 struct iris_uncompiled_shader
*ish
=
438 ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
];
443 struct brw_gs_prog_key key
= { .program_string_id
= ish
->program_id
};
444 ice
->vtbl
.populate_gs_key(ice
, &key
);
446 if (iris_bind_cached_shader(ice
, IRIS_CACHE_GS
, &key
))
449 UNUSED
bool success
= iris_compile_gs(ice
, ish
, &key
);
453 iris_compile_fs(struct iris_context
*ice
,
454 struct iris_uncompiled_shader
*ish
,
455 const struct brw_wm_prog_key
*key
,
456 struct brw_vue_map
*vue_map
)
458 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
459 const struct brw_compiler
*compiler
= screen
->compiler
;
460 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
461 void *mem_ctx
= ralloc_context(NULL
);
462 struct brw_wm_prog_data
*fs_prog_data
=
463 rzalloc(mem_ctx
, struct brw_wm_prog_data
);
464 struct brw_stage_prog_data
*prog_data
= &fs_prog_data
->base
;
466 assert(ish
->base
.type
== PIPE_SHADER_IR_NIR
);
468 nir_shader
*nir
= ish
->base
.ir
.nir
;
471 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
,
472 MAX2(key
->nr_color_regions
, 1));
474 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
);
476 char *error_str
= NULL
;
477 const unsigned *program
=
478 brw_compile_fs(compiler
, &ice
->dbg
, mem_ctx
, key
, fs_prog_data
,
479 nir
, NULL
, -1, -1, -1, true, false, vue_map
, &error_str
);
480 if (program
== NULL
) {
481 dbg_printf("Failed to compile fragment shader: %s\n", error_str
);
482 ralloc_free(mem_ctx
);
486 //brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch);
488 iris_setup_push_uniform_range(compiler
, prog_data
);
490 iris_upload_and_bind_shader(ice
, IRIS_CACHE_FS
, key
, program
, prog_data
,
493 ralloc_free(mem_ctx
);
498 iris_update_compiled_fs(struct iris_context
*ice
)
500 struct iris_uncompiled_shader
*ish
=
501 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
];
502 struct brw_wm_prog_key key
= { .program_string_id
= ish
->program_id
};
503 ice
->vtbl
.populate_fs_key(ice
, &key
);
505 if (iris_bind_cached_shader(ice
, IRIS_CACHE_FS
, &key
))
508 UNUSED
bool success
=
509 iris_compile_fs(ice
, ish
, &key
, ice
->shaders
.last_vue_map
);
512 static struct iris_compiled_shader
*
513 last_vue_shader(struct iris_context
*ice
)
515 if (ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
])
516 return ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
];
518 if (ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
])
519 return ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
521 return ice
->shaders
.prog
[MESA_SHADER_VERTEX
];
525 update_last_vue_map(struct iris_context
*ice
,
526 struct brw_stage_prog_data
*prog_data
)
528 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
529 struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
530 struct brw_vue_map
*old_map
= ice
->shaders
.last_vue_map
;
531 const uint64_t changed_slots
=
532 (old_map
? old_map
->slots_valid
: 0ull) ^ vue_map
->slots_valid
;
534 if (changed_slots
& VARYING_BIT_VIEWPORT
) {
535 // XXX: could use ctx->Const.MaxViewports for old API efficiency
536 ice
->state
.num_viewports
=
537 (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
) ? IRIS_MAX_VIEWPORTS
: 1;
538 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
|
539 IRIS_DIRTY_SF_CL_VIEWPORT
|
540 IRIS_DIRTY_SCISSOR_RECT
|
541 IRIS_DIRTY_UNCOMPILED_FS
;
545 if (changed_slots
|| (old_map
&& old_map
->separate
!= vue_map
->separate
)) {
546 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
549 ice
->shaders
.last_vue_map
= &vue_prog_data
->vue_map
;
552 static struct brw_vue_prog_data
*
553 get_vue_prog_data(struct iris_context
*ice
, gl_shader_stage stage
)
555 if (!ice
->shaders
.prog
[stage
])
558 return (void *) ice
->shaders
.prog
[stage
]->prog_data
;
562 iris_update_compiled_shaders(struct iris_context
*ice
)
564 const uint64_t dirty
= ice
->state
.dirty
;
566 struct brw_vue_prog_data
*old_prog_datas
[4];
567 if (!(dirty
& IRIS_DIRTY_URB
)) {
568 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++)
569 old_prog_datas
[i
] = get_vue_prog_data(ice
, i
);
572 if (dirty
& IRIS_DIRTY_UNCOMPILED_VS
)
573 iris_update_compiled_vs(ice
);
574 if (dirty
& IRIS_DIRTY_UNCOMPILED_TCS
)
575 iris_update_compiled_tcs(ice
);
576 if (dirty
& IRIS_DIRTY_UNCOMPILED_TES
)
577 iris_update_compiled_tes(ice
);
578 if (dirty
& IRIS_DIRTY_UNCOMPILED_GS
)
579 iris_update_compiled_gs(ice
);
581 struct iris_compiled_shader
*shader
= last_vue_shader(ice
);
582 update_last_vue_map(ice
, shader
->prog_data
);
583 if (ice
->state
.streamout
!= shader
->streamout
) {
584 ice
->state
.streamout
= shader
->streamout
;
585 ice
->state
.dirty
|= IRIS_DIRTY_SO_DECL_LIST
| IRIS_DIRTY_STREAMOUT
;
588 if (dirty
& IRIS_DIRTY_UNCOMPILED_FS
)
589 iris_update_compiled_fs(ice
);
592 if (!(dirty
& IRIS_DIRTY_URB
)) {
593 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
594 struct brw_vue_prog_data
*old
= old_prog_datas
[i
];
595 struct brw_vue_prog_data
*new = get_vue_prog_data(ice
, i
);
596 if (!!old
!= !!new ||
597 (new && new->urb_entry_size
!= old
->urb_entry_size
)) {
598 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
606 iris_init_program_functions(struct pipe_context
*ctx
)
608 ctx
->create_vs_state
= iris_create_shader_state
;
609 ctx
->create_tcs_state
= iris_create_shader_state
;
610 ctx
->create_tes_state
= iris_create_shader_state
;
611 ctx
->create_gs_state
= iris_create_shader_state
;
612 ctx
->create_fs_state
= iris_create_shader_state
;
614 ctx
->delete_vs_state
= iris_delete_shader_state
;
615 ctx
->delete_tcs_state
= iris_delete_shader_state
;
616 ctx
->delete_tes_state
= iris_delete_shader_state
;
617 ctx
->delete_gs_state
= iris_delete_shader_state
;
618 ctx
->delete_fs_state
= iris_delete_shader_state
;
620 ctx
->bind_vs_state
= iris_bind_vs_state
;
621 ctx
->bind_tcs_state
= iris_bind_tcs_state
;
622 ctx
->bind_tes_state
= iris_bind_tes_state
;
623 ctx
->bind_gs_state
= iris_bind_gs_state
;
624 ctx
->bind_fs_state
= iris_bind_fs_state
;