radeonsi: Remove use.sgpr* intrinsics, use load instructions instead
[mesa.git] / src / gallium / drivers / radeonsi / radeonsi_shader.c
1
2 #include "gallivm/lp_bld_tgsi_action.h"
3 #include "gallivm/lp_bld_const.h"
4 #include "gallivm/lp_bld_intr.h"
5 #include "gallivm/lp_bld_tgsi.h"
6 #include "radeon_llvm.h"
7 #include "radeon_llvm_emit.h"
8 #include "tgsi/tgsi_info.h"
9 #include "tgsi/tgsi_parse.h"
10 #include "tgsi/tgsi_scan.h"
11 #include "tgsi/tgsi_dump.h"
12
13 #include "radeonsi_pipe.h"
14 #include "radeonsi_shader.h"
15 #include "sid.h"
16
17 #include <assert.h>
18 #include <errno.h>
19 #include <stdio.h>
20
21 /*
22 static ps_remap_inputs(
23 struct tgsi_llvm_context * tl_ctx,
24 unsigned tgsi_index,
25 unsigned tgsi_chan)
26 {
27 :
28 }
29
30 struct si_input
31 {
32 struct list_head head;
33 unsigned tgsi_index;
34 unsigned tgsi_chan;
35 unsigned order;
36 };
37 */
38
39
40 struct si_shader_context
41 {
42 struct radeon_llvm_context radeon_bld;
43 struct r600_context *rctx;
44 struct tgsi_parse_context parse;
45 struct tgsi_token * tokens;
46 struct si_pipe_shader *shader;
47 unsigned type; /* TGSI_PROCESSOR_* specifies the type of shader. */
48 /* unsigned num_inputs; */
49 /* struct list_head inputs; */
50 /* unsigned * input_mappings *//* From TGSI to SI hw */
51 /* struct tgsi_shader_info info;*/
52 };
53
54 static struct si_shader_context * si_shader_context(
55 struct lp_build_tgsi_context * bld_base)
56 {
57 return (struct si_shader_context *)bld_base;
58 }
59
60
61 #define PERSPECTIVE_BASE 0
62 #define LINEAR_BASE 9
63
64 #define SAMPLE_OFFSET 0
65 #define CENTER_OFFSET 2
66 #define CENTROID_OFSET 4
67
68 #define USE_SGPR_MAX_SUFFIX_LEN 5
69 #define CONST_ADDR_SPACE 2
70 #define USER_SGPR_ADDR_SPACE 8
71
72 enum sgpr_type {
73 SGPR_CONST_PTR_F32,
74 SGPR_CONST_PTR_V4I32,
75 SGPR_CONST_PTR_V8I32,
76 SGPR_I32,
77 SGPR_I64
78 };
79
80 /**
81 * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad
82 *
83 * @param offset The offset parameter specifies the number of
84 * elements to offset, not the number of bytes or dwords. An element is the
85 * the type pointed to by the base_ptr parameter (e.g. int is the element of
86 * an int* pointer)
87 *
88 * When LLVM lowers the load instruction, it will convert the element offset
89 * into a dword offset automatically.
90 *
91 */
92 static LLVMValueRef build_indexed_load(
93 struct gallivm_state * gallivm,
94 LLVMValueRef base_ptr,
95 LLVMValueRef offset)
96 {
97 LLVMValueRef computed_ptr = LLVMBuildGEP(
98 gallivm->builder, base_ptr, &offset, 1, "");
99
100 return LLVMBuildLoad(gallivm->builder, computed_ptr, "");
101 }
102
103 /**
104 * Load a value stored in one of the user SGPRs
105 *
106 * @param sgpr This is the sgpr to load the value from. If you need to load a
107 * value that is stored in consecutive SGPR registers (e.g. a 64-bit pointer),
108 * then you should pass the index of the first SGPR that holds the value. For
109 * example, if you want to load a pointer that is stored in SGPRs 2 and 3, then
110 * use pass 2 for the sgpr parameter.
111 *
112 * The value of the sgpr parameter must also be aligned to the width of the type
113 * being loaded, so that the sgpr parameter is divisible by the dword width of the
114 * type. For example, if the value being loaded is two dwords wide, then the sgpr
115 * parameter must be divisible by two.
116 */
117 static LLVMValueRef use_sgpr(
118 struct gallivm_state * gallivm,
119 enum sgpr_type type,
120 unsigned sgpr)
121 {
122 LLVMValueRef sgpr_index;
123 LLVMTypeRef ret_type;
124 LLVMValueRef ptr;
125
126 sgpr_index = lp_build_const_int32(gallivm, sgpr);
127
128 switch (type) {
129 case SGPR_CONST_PTR_F32:
130 assert(sgpr % 2 == 0);
131 ret_type = LLVMFloatTypeInContext(gallivm->context);
132 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
133 break;
134
135 case SGPR_I32:
136 ret_type = LLVMInt32TypeInContext(gallivm->context);
137 break;
138
139 case SGPR_I64:
140 assert(sgpr % 2 == 0);
141 ret_type= LLVMInt64TypeInContext(gallivm->context);
142 break;
143
144 case SGPR_CONST_PTR_V4I32:
145 assert(sgpr % 2 == 0);
146 ret_type = LLVMInt32TypeInContext(gallivm->context);
147 ret_type = LLVMVectorType(ret_type, 4);
148 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
149 break;
150
151 case SGPR_CONST_PTR_V8I32:
152 assert(sgpr % 2 == 0);
153 ret_type = LLVMInt32TypeInContext(gallivm->context);
154 ret_type = LLVMVectorType(ret_type, 8);
155 ret_type = LLVMPointerType(ret_type, CONST_ADDR_SPACE);
156 break;
157
158 default:
159 assert(!"Unsupported SGPR type in use_sgpr()");
160 return NULL;
161 }
162
163 ret_type = LLVMPointerType(ret_type, USER_SGPR_ADDR_SPACE);
164 ptr = LLVMBuildIntToPtr(gallivm->builder, sgpr_index, ret_type, "");
165 return LLVMBuildLoad(gallivm->builder, ptr, "");
166 }
167
168 static void declare_input_vs(
169 struct si_shader_context * si_shader_ctx,
170 unsigned input_index,
171 const struct tgsi_full_declaration *decl)
172 {
173 LLVMValueRef t_list_ptr;
174 LLVMValueRef t_offset;
175 LLVMValueRef t_list;
176 LLVMValueRef attribute_offset;
177 LLVMValueRef buffer_index_reg;
178 LLVMValueRef args[3];
179 LLVMTypeRef vec4_type;
180 LLVMValueRef input;
181 struct lp_build_context * uint = &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
182 struct lp_build_context * base = &si_shader_ctx->radeon_bld.soa.bld_base.base;
183 struct r600_context *rctx = si_shader_ctx->rctx;
184 struct pipe_vertex_element *velem = &rctx->vertex_elements->elements[input_index];
185 unsigned chan;
186
187 /* Load the T list */
188 /* XXX: Communicate with the rest of the driver about which SGPR the T#
189 * list pointer is going to be stored in. Hard code to SGPR[6:7] for
190 * now */
191 t_list_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_V4I32, 6);
192
193 t_offset = lp_build_const_int32(base->gallivm, velem->vertex_buffer_index);
194
195 t_list = build_indexed_load(base->gallivm, t_list_ptr, t_offset);
196
197 /* Build the attribute offset */
198 attribute_offset = lp_build_const_int32(base->gallivm, velem->src_offset);
199
200 /* Load the buffer index is always, which is always stored in VGPR0
201 * for Vertex Shaders */
202 buffer_index_reg = lp_build_intrinsic(base->gallivm->builder,
203 "llvm.SI.vs.load.buffer.index", uint->elem_type, NULL, 0);
204
205 vec4_type = LLVMVectorType(base->elem_type, 4);
206 args[0] = t_list;
207 args[1] = attribute_offset;
208 args[2] = buffer_index_reg;
209 input = lp_build_intrinsic(base->gallivm->builder,
210 "llvm.SI.vs.load.input", vec4_type, args, 3);
211
212 /* Break up the vec4 into individual components */
213 for (chan = 0; chan < 4; chan++) {
214 LLVMValueRef llvm_chan = lp_build_const_int32(base->gallivm, chan);
215 /* XXX: Use a helper function for this. There is one in
216 * tgsi_llvm.c. */
217 si_shader_ctx->radeon_bld.inputs[radeon_llvm_reg_index_soa(input_index, chan)] =
218 LLVMBuildExtractElement(base->gallivm->builder,
219 input, llvm_chan, "");
220 }
221 }
222
223 static void declare_input_fs(
224 struct si_shader_context * si_shader_ctx,
225 unsigned input_index,
226 const struct tgsi_full_declaration *decl)
227 {
228 const char * intr_name;
229 unsigned chan;
230 struct lp_build_context * base =
231 &si_shader_ctx->radeon_bld.soa.bld_base.base;
232 struct gallivm_state * gallivm = base->gallivm;
233
234 /* This value is:
235 * [15:0] NewPrimMask (Bit mask for each quad. It is set it the
236 * quad begins a new primitive. Bit 0 always needs
237 * to be unset)
238 * [32:16] ParamOffset
239 *
240 */
241 /* XXX: This register number must be identical to the S_00B02C_USER_SGPR
242 * register field value
243 */
244 LLVMValueRef params = use_sgpr(base->gallivm, SGPR_I32, 6);
245
246
247 /* XXX: Is this the input_index? */
248 LLVMValueRef attr_number = lp_build_const_int32(gallivm, input_index);
249
250 /* XXX: Handle all possible interpolation modes */
251 switch (decl->Interp.Interpolate) {
252 case TGSI_INTERPOLATE_COLOR:
253 if (si_shader_ctx->rctx->rasterizer->flatshade) {
254 intr_name = "llvm.SI.fs.interp.constant";
255 } else {
256 if (decl->Interp.Centroid)
257 intr_name = "llvm.SI.fs.interp.persp.centroid";
258 else
259 intr_name = "llvm.SI.fs.interp.persp.center";
260 }
261 break;
262 case TGSI_INTERPOLATE_CONSTANT:
263 intr_name = "llvm.SI.fs.interp.constant";
264 break;
265 case TGSI_INTERPOLATE_LINEAR:
266 if (decl->Interp.Centroid)
267 intr_name = "llvm.SI.fs.interp.linear.centroid";
268 else
269 intr_name = "llvm.SI.fs.interp.linear.center";
270 break;
271 case TGSI_INTERPOLATE_PERSPECTIVE:
272 if (decl->Interp.Centroid)
273 intr_name = "llvm.SI.fs.interp.persp.centroid";
274 else
275 intr_name = "llvm.SI.fs.interp.persp.center";
276 break;
277 default:
278 fprintf(stderr, "Warning: Unhandled interpolation mode.\n");
279 return;
280 }
281
282 /* XXX: Could there be more than TGSI_NUM_CHANNELS (4) ? */
283 for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
284 LLVMValueRef args[3];
285 LLVMValueRef llvm_chan = lp_build_const_int32(gallivm, chan);
286 unsigned soa_index = radeon_llvm_reg_index_soa(input_index, chan);
287 LLVMTypeRef input_type = LLVMFloatTypeInContext(gallivm->context);
288 args[0] = llvm_chan;
289 args[1] = attr_number;
290 args[2] = params;
291 si_shader_ctx->radeon_bld.inputs[soa_index] =
292 lp_build_intrinsic(gallivm->builder, intr_name,
293 input_type, args, 3);
294 }
295 }
296
297 static void declare_input(
298 struct radeon_llvm_context * radeon_bld,
299 unsigned input_index,
300 const struct tgsi_full_declaration *decl)
301 {
302 struct si_shader_context * si_shader_ctx =
303 si_shader_context(&radeon_bld->soa.bld_base);
304 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
305 declare_input_vs(si_shader_ctx, input_index, decl);
306 } else if (si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT) {
307 declare_input_fs(si_shader_ctx, input_index, decl);
308 } else {
309 fprintf(stderr, "Warning: Unsupported shader type,\n");
310 }
311 }
312
313 static LLVMValueRef fetch_constant(
314 struct lp_build_tgsi_context * bld_base,
315 const struct tgsi_full_src_register *reg,
316 enum tgsi_opcode_type type,
317 unsigned swizzle)
318 {
319 struct lp_build_context * base = &bld_base->base;
320
321 LLVMValueRef const_ptr;
322 LLVMValueRef offset;
323
324 /* XXX: Assume the pointer to the constant buffer is being stored in
325 * SGPR[0:1] */
326 const_ptr = use_sgpr(base->gallivm, SGPR_CONST_PTR_F32, 0);
327
328 /* XXX: This assumes that the constant buffer is not packed, so
329 * CONST[0].x will have an offset of 0 and CONST[1].x will have an
330 * offset of 4. */
331 offset = lp_build_const_int32(base->gallivm,
332 (reg->Register.Index * 4) + swizzle);
333
334 return build_indexed_load(base->gallivm, const_ptr, offset);
335 }
336
337 /* XXX: This is partially implemented for VS only at this point. It is not complete */
338 static void si_llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
339 {
340 struct si_shader_context * si_shader_ctx = si_shader_context(bld_base);
341 struct r600_shader * shader = &si_shader_ctx->shader->shader;
342 struct lp_build_context * base = &bld_base->base;
343 struct lp_build_context * uint =
344 &si_shader_ctx->radeon_bld.soa.bld_base.uint_bld;
345 struct tgsi_parse_context *parse = &si_shader_ctx->parse;
346 LLVMValueRef last_args[9] = { 0 };
347
348 while (!tgsi_parse_end_of_tokens(parse)) {
349 /* XXX: component_bits controls which components of the output
350 * registers actually get exported. (e.g bit 0 means export
351 * X component, bit 1 means export Y component, etc.) I'm
352 * hard coding this to 0xf for now. In the future, we might
353 * want to do something else. */
354 unsigned component_bits = 0xf;
355 unsigned chan;
356 struct tgsi_full_declaration *d =
357 &parse->FullToken.FullDeclaration;
358 LLVMValueRef args[9];
359 unsigned target;
360 unsigned index;
361 unsigned color_count = 0;
362 unsigned param_count = 0;
363 int i;
364
365 tgsi_parse_token(parse);
366 if (parse->FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
367 continue;
368
369 switch (d->Declaration.File) {
370 case TGSI_FILE_INPUT:
371 i = shader->ninput++;
372 shader->input[i].name = d->Semantic.Name;
373 shader->input[i].sid = d->Semantic.Index;
374 shader->input[i].interpolate = d->Interp.Interpolate;
375 shader->input[i].centroid = d->Interp.Centroid;
376 break;
377 case TGSI_FILE_OUTPUT:
378 i = shader->noutput++;
379 shader->output[i].name = d->Semantic.Name;
380 shader->output[i].sid = d->Semantic.Index;
381 shader->output[i].interpolate = d->Interp.Interpolate;
382 break;
383 }
384
385 if (d->Declaration.File != TGSI_FILE_OUTPUT)
386 continue;
387
388 for (index = d->Range.First; index <= d->Range.Last; index++) {
389 for (chan = 0; chan < 4; chan++ ) {
390 LLVMValueRef out_ptr =
391 si_shader_ctx->radeon_bld.soa.outputs
392 [index][chan];
393 /* +5 because the first output value will be
394 * the 6th argument to the intrinsic. */
395 args[chan + 5]= LLVMBuildLoad(
396 base->gallivm->builder, out_ptr, "");
397 }
398
399 /* XXX: We probably need to keep track of the output
400 * values, so we know what we are passing to the next
401 * stage. */
402
403 /* Select the correct target */
404 switch(d->Semantic.Name) {
405 case TGSI_SEMANTIC_POSITION:
406 target = V_008DFC_SQ_EXP_POS;
407 break;
408 case TGSI_SEMANTIC_COLOR:
409 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX) {
410 target = V_008DFC_SQ_EXP_PARAM + param_count;
411 shader->output[i].param_offset = param_count;
412 param_count++;
413 } else {
414 target = V_008DFC_SQ_EXP_MRT + color_count;
415 color_count++;
416 }
417 break;
418 case TGSI_SEMANTIC_GENERIC:
419 target = V_008DFC_SQ_EXP_PARAM + param_count;
420 shader->output[i].param_offset = param_count;
421 param_count++;
422 break;
423 default:
424 target = 0;
425 fprintf(stderr,
426 "Warning: SI unhandled output type:%d\n",
427 d->Semantic.Name);
428 }
429
430 /* Specify which components to enable */
431 args[0] = lp_build_const_int32(base->gallivm,
432 component_bits);
433
434 /* Specify whether the EXEC mask represents the valid mask */
435 args[1] = lp_build_const_int32(base->gallivm, 0);
436
437 /* Specify whether this is the last export */
438 args[2] = lp_build_const_int32(base->gallivm, 0);
439
440 /* Specify the target we are exporting */
441 args[3] = lp_build_const_int32(base->gallivm, target);
442
443 /* Set COMPR flag to zero to export data as 32-bit */
444 args[4] = uint->zero;
445
446 if (si_shader_ctx->type == TGSI_PROCESSOR_VERTEX ?
447 (d->Semantic.Name == TGSI_SEMANTIC_POSITION) :
448 (d->Semantic.Name == TGSI_SEMANTIC_COLOR)) {
449 if (last_args[0]) {
450 lp_build_intrinsic(base->gallivm->builder,
451 "llvm.SI.export",
452 LLVMVoidTypeInContext(base->gallivm->context),
453 last_args, 9);
454 }
455
456 memcpy(last_args, args, sizeof(args));
457 } else {
458 lp_build_intrinsic(base->gallivm->builder,
459 "llvm.SI.export",
460 LLVMVoidTypeInContext(base->gallivm->context),
461 args, 9);
462 }
463
464 }
465 }
466
467 /* Specify whether the EXEC mask represents the valid mask */
468 last_args[1] = lp_build_const_int32(base->gallivm,
469 si_shader_ctx->type == TGSI_PROCESSOR_FRAGMENT);
470
471 /* Specify that this is the last export */
472 last_args[2] = lp_build_const_int32(base->gallivm, 1);
473
474 lp_build_intrinsic(base->gallivm->builder,
475 "llvm.SI.export",
476 LLVMVoidTypeInContext(base->gallivm->context),
477 last_args, 9);
478
479 /* XXX: Look up what this function does */
480 /* ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);*/
481 }
482
483 static void tex_fetch_args(
484 struct lp_build_tgsi_context * bld_base,
485 struct lp_build_emit_data * emit_data)
486 {
487 LLVMValueRef ptr;
488 LLVMValueRef offset;
489
490 /* WriteMask */
491 emit_data->args[0] = lp_build_const_int32(bld_base->base.gallivm,
492 emit_data->inst->Dst[0].Register.WriteMask);
493
494 /* Coordinates */
495 /* XXX: Not all sample instructions need 4 address arguments. */
496 emit_data->args[1] = lp_build_emit_fetch(bld_base, emit_data->inst,
497 0, LP_CHAN_ALL);
498
499 /* Resource */
500 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V8I32, 4);
501 offset = lp_build_const_int32(bld_base->base.gallivm,
502 8 * emit_data->inst->Src[1].Register.Index);
503 emit_data->args[2] = build_indexed_load(bld_base->base.gallivm,
504 ptr, offset);
505
506 /* Sampler */
507 ptr = use_sgpr(bld_base->base.gallivm, SGPR_CONST_PTR_V4I32, 2);
508 offset = lp_build_const_int32(bld_base->base.gallivm,
509 4 * emit_data->inst->Src[1].Register.Index);
510 emit_data->args[3] = build_indexed_load(bld_base->base.gallivm,
511 ptr, offset);
512
513 /* Dimensions */
514 /* XXX: We might want to pass this information to the shader at some. */
515 /* emit_data->args[4] = lp_build_const_int32(bld_base->base.gallivm,
516 emit_data->inst->Texture.Texture);
517 */
518
519 emit_data->arg_count = 4;
520 /* XXX: To optimize, we could use a float or v2f32, if the last bits of
521 * the writemask are clear */
522 emit_data->dst_type = LLVMVectorType(
523 LLVMFloatTypeInContext(bld_base->base.gallivm->context),
524 4);
525 }
526
527 static const struct lp_build_tgsi_action tex_action = {
528 .fetch_args = tex_fetch_args,
529 .emit = lp_build_tgsi_intrinsic,
530 .intr_name = "llvm.SI.sample"
531 };
532
533
534 int si_pipe_shader_create(
535 struct pipe_context *ctx,
536 struct si_pipe_shader *shader)
537 {
538 struct r600_context *rctx = (struct r600_context*)ctx;
539 struct si_shader_context si_shader_ctx;
540 struct tgsi_shader_info shader_info;
541 struct lp_build_tgsi_context * bld_base;
542 LLVMModuleRef mod;
543 unsigned char * inst_bytes;
544 unsigned inst_byte_count;
545 unsigned i;
546
547 radeon_llvm_context_init(&si_shader_ctx.radeon_bld);
548 bld_base = &si_shader_ctx.radeon_bld.soa.bld_base;
549
550 tgsi_scan_shader(shader->tokens, &shader_info);
551 bld_base->info = &shader_info;
552 bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = fetch_constant;
553 bld_base->emit_epilogue = si_llvm_emit_epilogue;
554
555 bld_base->op_actions[TGSI_OPCODE_TEX] = tex_action;
556
557 si_shader_ctx.radeon_bld.load_input = declare_input;
558 si_shader_ctx.tokens = shader->tokens;
559 tgsi_parse_init(&si_shader_ctx.parse, si_shader_ctx.tokens);
560 si_shader_ctx.shader = shader;
561 si_shader_ctx.type = si_shader_ctx.parse.FullHeader.Processor.Processor;
562 si_shader_ctx.rctx = rctx;
563
564 shader->shader.nr_cbufs = rctx->nr_cbufs;
565
566 lp_build_tgsi_llvm(bld_base, shader->tokens);
567
568 radeon_llvm_finalize_module(&si_shader_ctx.radeon_bld);
569
570 mod = bld_base->base.gallivm->module;
571 tgsi_dump(shader->tokens, 0);
572 LLVMDumpModule(mod);
573 radeon_llvm_compile(mod, &inst_bytes, &inst_byte_count, "SI", 1 /* dump */);
574 fprintf(stderr, "SI CODE:\n");
575 for (i = 0; i < inst_byte_count; i+=4 ) {
576 fprintf(stderr, "%02x%02x%02x%02x\n", inst_bytes[i + 3],
577 inst_bytes[i + 2], inst_bytes[i + 1],
578 inst_bytes[i]);
579 }
580
581 shader->num_sgprs = util_le32_to_cpu(*(uint32_t*)inst_bytes);
582 shader->num_vgprs = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 4));
583 shader->spi_ps_input_ena = util_le32_to_cpu(*(uint32_t*)(inst_bytes + 8));
584
585 tgsi_parse_free(&si_shader_ctx.parse);
586
587 /* copy new shader */
588 if (shader->bo == NULL) {
589 uint32_t *ptr;
590
591 shader->bo = (struct r600_resource*)
592 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, inst_byte_count);
593 if (shader->bo == NULL) {
594 return -ENOMEM;
595 }
596 ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
597 if (0 /*R600_BIG_ENDIAN*/) {
598 for (i = 0; i < (inst_byte_count-12)/4; ++i) {
599 ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
600 }
601 } else {
602 memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
603 }
604 rctx->ws->buffer_unmap(shader->bo->cs_buf);
605 }
606
607 free(inst_bytes);
608
609 return 0;
610 }
611
612 void si_pipe_shader_destroy(struct pipe_context *ctx, struct si_pipe_shader *shader)
613 {
614 pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
615
616 memset(&shader->shader,0,sizeof(struct r600_shader));
617 }