2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
33 #include "main/imports.h"
34 #include "main/glspirv.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "program/prog_to_nir.h"
38 #include "program/program.h"
39 #include "program/programopt.h"
41 #include "util/ralloc.h"
42 #include "compiler/glsl/ir.h"
43 #include "compiler/glsl/glsl_to_nir.h"
44 #include "compiler/nir/nir_serialize.h"
46 #include "brw_program.h"
47 #include "brw_context.h"
48 #include "compiler/brw_nir.h"
49 #include "brw_defines.h"
50 #include "intel_batchbuffer.h"
53 brw_nir_lower_uniforms(nir_shader
*nir
, bool is_scalar
)
56 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
57 type_size_scalar_bytes
);
58 return nir_lower_io(nir
, nir_var_uniform
, type_size_scalar_bytes
, 0);
60 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
61 type_size_vec4_bytes
);
62 return nir_lower_io(nir
, nir_var_uniform
, type_size_vec4_bytes
, 0);
67 brw_create_nir(struct brw_context
*brw
,
68 const struct gl_shader_program
*shader_prog
,
69 struct gl_program
*prog
,
70 gl_shader_stage stage
,
73 struct gl_context
*ctx
= &brw
->ctx
;
74 const nir_shader_compiler_options
*options
=
75 ctx
->Const
.ShaderCompilerOptions
[stage
].NirOptions
;
78 /* First, lower the GLSL/Mesa IR or SPIR-V to NIR */
80 if (shader_prog
->data
->spirv
) {
81 nir
= _mesa_spirv_to_nir(ctx
, shader_prog
, stage
, options
);
82 nir_lower_deref_instrs(nir
, ~0);
84 nir
= glsl_to_nir(shader_prog
, stage
, options
);
85 nir_lower_deref_instrs(nir
, ~0);
89 nir_remove_dead_variables(nir
, nir_var_shader_in
| nir_var_shader_out
);
90 nir_lower_returns(nir
);
91 nir_validate_shader(nir
);
92 NIR_PASS_V(nir
, nir_lower_io_to_temporaries
,
93 nir_shader_get_entrypoint(nir
), true, false);
95 nir
= prog_to_nir(prog
, options
);
96 nir_lower_deref_instrs(nir
, ~0);
97 NIR_PASS_V(nir
, nir_lower_regs_to_ssa
); /* turn registers into SSA */
99 nir_validate_shader(nir
);
101 /* Lower PatchVerticesIn from system value to uniform. This needs to
102 * happen before brw_preprocess_nir, since that will lower system values
105 * We only do this for TES if no TCS is present, since otherwise we know
106 * the number of vertices in the patch at link time and we can lower it
107 * directly to a constant. We do this in nir_lower_patch_vertices, which
108 * needs to run after brw_nir_preprocess has turned the system values
111 const bool lower_patch_vertices_in_to_uniform
=
112 (stage
== MESA_SHADER_TESS_CTRL
&& brw
->screen
->devinfo
.gen
>= 8) ||
113 (stage
== MESA_SHADER_TESS_EVAL
&&
114 !shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
]);
116 if (lower_patch_vertices_in_to_uniform
)
117 brw_nir_lower_patch_vertices_in_to_uniform(nir
);
119 nir
= brw_preprocess_nir(brw
->screen
->compiler
, nir
);
121 if (stage
== MESA_SHADER_TESS_EVAL
&& !lower_patch_vertices_in_to_uniform
) {
122 assert(shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
]);
123 struct gl_linked_shader
*linked_tcs
=
124 shader_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
125 uint32_t patch_vertices
= linked_tcs
->Program
->info
.tess
.tcs_vertices_out
;
126 nir_lower_tes_patch_vertices(nir
, patch_vertices
);
129 if (stage
== MESA_SHADER_FRAGMENT
) {
130 static const struct nir_lower_wpos_ytransform_options wpos_options
= {
131 .state_tokens
= {STATE_INTERNAL
, STATE_FB_WPOS_Y_TRANSFORM
, 0, 0, 0},
132 .fs_coord_pixel_center_integer
= 1,
133 .fs_coord_origin_upper_left
= 1,
136 bool progress
= false;
137 NIR_PASS(progress
, nir
, nir_lower_wpos_ytransform
, &wpos_options
);
139 _mesa_add_state_reference(prog
->Parameters
,
140 wpos_options
.state_tokens
);
144 NIR_PASS_V(nir
, brw_nir_lower_uniforms
, is_scalar
);
150 brw_shader_gather_info(nir_shader
*nir
, struct gl_program
*prog
)
152 nir_shader_gather_info(nir
, nir_shader_get_entrypoint(nir
));
154 /* Copy the info we just generated back into the gl_program */
155 const char *prog_name
= prog
->info
.name
;
156 const char *prog_label
= prog
->info
.label
;
157 prog
->info
= nir
->info
;
158 prog
->info
.name
= prog_name
;
159 prog
->info
.label
= prog_label
;
163 get_new_program_id(struct intel_screen
*screen
)
165 return p_atomic_inc_return(&screen
->program_id
);
168 static struct gl_program
*brwNewProgram(struct gl_context
*ctx
, GLenum target
,
169 GLuint id
, bool is_arb_asm
)
171 struct brw_context
*brw
= brw_context(ctx
);
172 struct brw_program
*prog
= rzalloc(NULL
, struct brw_program
);
175 prog
->id
= get_new_program_id(brw
->screen
);
177 return _mesa_init_gl_program(&prog
->program
, target
, id
, is_arb_asm
);
183 static void brwDeleteProgram( struct gl_context
*ctx
,
184 struct gl_program
*prog
)
186 struct brw_context
*brw
= brw_context(ctx
);
188 /* Beware! prog's refcount has reached zero, and it's about to be freed.
190 * In brw_upload_pipeline_state(), we compare brw->programs[i] to
191 * ctx->FooProgram._Current, and flag BRW_NEW_FOO_PROGRAM if the
192 * pointer has changed.
194 * We cannot leave brw->programs[i] as a dangling pointer to the dead
195 * program. malloc() may allocate the same memory for a new gl_program,
196 * causing us to see matching pointers...but totally different programs.
198 * We cannot set brw->programs[i] to NULL, either. If we've deleted the
199 * active program, Mesa may set ctx->FooProgram._Current to NULL. That
200 * would cause us to see matching pointers (NULL == NULL), and fail to
201 * detect that a program has changed since our last draw.
203 * So, set it to a bogus gl_program pointer that will never match,
204 * causing us to properly reevaluate the state on our next draw.
206 * Getting this wrong causes heisenbugs which are very hard to catch,
207 * as you need a very specific allocation pattern to hit the problem.
209 static const struct gl_program deleted_program
;
211 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
212 if (brw
->programs
[i
] == prog
)
213 brw
->programs
[i
] = (struct gl_program
*) &deleted_program
;
216 _mesa_delete_program( ctx
, prog
);
221 brwProgramStringNotify(struct gl_context
*ctx
,
223 struct gl_program
*prog
)
225 assert(target
== GL_VERTEX_PROGRAM_ARB
|| !prog
->arb
.IsPositionInvariant
);
227 struct brw_context
*brw
= brw_context(ctx
);
228 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
231 case GL_FRAGMENT_PROGRAM_ARB
: {
232 struct brw_program
*newFP
= brw_program(prog
);
233 const struct brw_program
*curFP
=
234 brw_program_const(brw
->programs
[MESA_SHADER_FRAGMENT
]);
237 brw
->ctx
.NewDriverState
|= BRW_NEW_FRAGMENT_PROGRAM
;
238 newFP
->id
= get_new_program_id(brw
->screen
);
240 prog
->nir
= brw_create_nir(brw
, NULL
, prog
, MESA_SHADER_FRAGMENT
, true);
242 brw_shader_gather_info(prog
->nir
, prog
);
244 brw_fs_precompile(ctx
, prog
);
247 case GL_VERTEX_PROGRAM_ARB
: {
248 struct brw_program
*newVP
= brw_program(prog
);
249 const struct brw_program
*curVP
=
250 brw_program_const(brw
->programs
[MESA_SHADER_VERTEX
]);
253 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTEX_PROGRAM
;
254 if (newVP
->program
.arb
.IsPositionInvariant
) {
255 _mesa_insert_mvp_code(ctx
, &newVP
->program
);
257 newVP
->id
= get_new_program_id(brw
->screen
);
259 /* Also tell tnl about it:
261 _tnl_program_string(ctx
, target
, prog
);
263 prog
->nir
= brw_create_nir(brw
, NULL
, prog
, MESA_SHADER_VERTEX
,
264 compiler
->scalar_stage
[MESA_SHADER_VERTEX
]);
266 brw_shader_gather_info(prog
->nir
, prog
);
268 brw_vs_precompile(ctx
, prog
);
273 * driver->ProgramStringNotify is only called for ARB programs, fixed
274 * function vertex programs, and ir_to_mesa (which isn't used by the
275 * i965 back-end). Therefore, even after geometry shaders are added,
276 * this function should only ever be called with a target of
277 * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
279 unreachable("Unexpected target in brwProgramStringNotify");
286 brw_memory_barrier(struct gl_context
*ctx
, GLbitfield barriers
)
288 struct brw_context
*brw
= brw_context(ctx
);
289 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
290 unsigned bits
= PIPE_CONTROL_DATA_CACHE_FLUSH
| PIPE_CONTROL_CS_STALL
;
291 assert(devinfo
->gen
>= 7 && devinfo
->gen
<= 11);
293 if (barriers
& (GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT
|
294 GL_ELEMENT_ARRAY_BARRIER_BIT
|
295 GL_COMMAND_BARRIER_BIT
))
296 bits
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
298 if (barriers
& GL_UNIFORM_BARRIER_BIT
)
299 bits
|= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
300 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
302 if (barriers
& GL_TEXTURE_FETCH_BARRIER_BIT
)
303 bits
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
305 if (barriers
& (GL_TEXTURE_UPDATE_BARRIER_BIT
|
306 GL_PIXEL_BUFFER_BARRIER_BIT
))
307 bits
|= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
308 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
310 if (barriers
& GL_FRAMEBUFFER_BARRIER_BIT
)
311 bits
|= (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
312 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
314 /* Typed surface messages are handled by the render cache on IVB, so we
315 * need to flush it too.
317 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
)
318 bits
|= PIPE_CONTROL_RENDER_TARGET_FLUSH
;
320 brw_emit_pipe_control_flush(brw
, bits
);
324 brw_framebuffer_fetch_barrier(struct gl_context
*ctx
)
326 struct brw_context
*brw
= brw_context(ctx
);
327 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
329 if (!ctx
->Extensions
.EXT_shader_framebuffer_fetch
) {
330 if (devinfo
->gen
>= 6) {
331 brw_emit_pipe_control_flush(brw
,
332 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
333 PIPE_CONTROL_CS_STALL
);
334 brw_emit_pipe_control_flush(brw
,
335 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
337 brw_emit_pipe_control_flush(brw
,
338 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
344 brw_get_scratch_bo(struct brw_context
*brw
,
345 struct brw_bo
**scratch_bo
, int size
)
347 struct brw_bo
*old_bo
= *scratch_bo
;
349 if (old_bo
&& old_bo
->size
< size
) {
350 brw_bo_unreference(old_bo
);
356 brw_bo_alloc(brw
->bufmgr
, "scratch bo", size
, BRW_MEMZONE_SCRATCH
);
361 * Reserve enough scratch space for the given stage to hold \p per_thread_size
362 * bytes times the given \p thread_count.
365 brw_alloc_stage_scratch(struct brw_context
*brw
,
366 struct brw_stage_state
*stage_state
,
367 unsigned per_thread_size
)
369 if (stage_state
->per_thread_scratch
>= per_thread_size
)
372 stage_state
->per_thread_scratch
= per_thread_size
;
374 if (stage_state
->scratch_bo
)
375 brw_bo_unreference(stage_state
->scratch_bo
);
377 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
378 unsigned thread_count
;
379 switch(stage_state
->stage
) {
380 case MESA_SHADER_VERTEX
:
381 thread_count
= devinfo
->max_vs_threads
;
383 case MESA_SHADER_TESS_CTRL
:
384 thread_count
= devinfo
->max_tcs_threads
;
386 case MESA_SHADER_TESS_EVAL
:
387 thread_count
= devinfo
->max_tes_threads
;
389 case MESA_SHADER_GEOMETRY
:
390 thread_count
= devinfo
->max_gs_threads
;
392 case MESA_SHADER_FRAGMENT
:
393 thread_count
= devinfo
->max_wm_threads
;
395 case MESA_SHADER_COMPUTE
: {
396 unsigned subslices
= MAX2(brw
->screen
->subslice_total
, 1);
398 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
400 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
401 * allocate scratch space enough so that each slice has 4 slices
404 * According to the other driver team, this applies to compute shaders
405 * as well. This is not currently documented at all.
407 * brw->screen->subslice_total is the TOTAL number of subslices
408 * and we wish to view that there are 4 subslices per slice
409 * instead of the actual number of subslices per slice.
411 if (devinfo
->gen
>= 9)
412 subslices
= 4 * brw
->screen
->devinfo
.num_slices
;
414 unsigned scratch_ids_per_subslice
;
415 if (devinfo
->is_haswell
) {
416 /* WaCSScratchSize:hsw
418 * Haswell's scratch space address calculation appears to be sparse
419 * rather than tightly packed. The Thread ID has bits indicating
420 * which subslice, EU within a subslice, and thread within an EU it
421 * is. There's a maximum of two slices and two subslices, so these
422 * can be stored with a single bit. Even though there are only 10 EUs
423 * per subslice, this is stored in 4 bits, so there's an effective
424 * maximum value of 16 EUs. Similarly, although there are only 7
425 * threads per EU, this is stored in a 3 bit number, giving an
426 * effective maximum value of 8 threads per EU.
428 * This means that we need to use 16 * 8 instead of 10 * 7 for the
429 * number of threads per subslice.
431 scratch_ids_per_subslice
= 16 * 8;
432 } else if (devinfo
->is_cherryview
) {
433 /* Cherryview devices have either 6 or 8 EUs per subslice, and each
434 * EU has 7 threads. The 6 EU devices appear to calculate thread IDs
435 * as if it had 8 EUs.
437 scratch_ids_per_subslice
= 8 * 7;
439 scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
442 thread_count
= scratch_ids_per_subslice
* subslices
;
446 unreachable("Unsupported stage!");
449 stage_state
->scratch_bo
=
450 brw_bo_alloc(brw
->bufmgr
, "shader scratch space",
451 per_thread_size
* thread_count
, BRW_MEMZONE_SCRATCH
);
454 void brwInitFragProgFuncs( struct dd_function_table
*functions
)
456 assert(functions
->ProgramStringNotify
== _tnl_program_string
);
458 functions
->NewProgram
= brwNewProgram
;
459 functions
->DeleteProgram
= brwDeleteProgram
;
460 functions
->ProgramStringNotify
= brwProgramStringNotify
;
462 functions
->LinkShader
= brw_link_shader
;
464 functions
->MemoryBarrier
= brw_memory_barrier
;
465 functions
->FramebufferFetchBarrier
= brw_framebuffer_fetch_barrier
;
468 struct shader_times
{
475 brw_init_shader_time(struct brw_context
*brw
)
477 const int max_entries
= 2048;
478 brw
->shader_time
.bo
=
479 brw_bo_alloc(brw
->bufmgr
, "shader time",
480 max_entries
* BRW_SHADER_TIME_STRIDE
* 3,
482 brw
->shader_time
.names
= rzalloc_array(brw
, const char *, max_entries
);
483 brw
->shader_time
.ids
= rzalloc_array(brw
, int, max_entries
);
484 brw
->shader_time
.types
= rzalloc_array(brw
, enum shader_time_shader_type
,
486 brw
->shader_time
.cumulative
= rzalloc_array(brw
, struct shader_times
,
488 brw
->shader_time
.max_entries
= max_entries
;
492 compare_time(const void *a
, const void *b
)
494 uint64_t * const *a_val
= a
;
495 uint64_t * const *b_val
= b
;
497 /* We don't just subtract because we're turning the value to an int. */
498 if (**a_val
< **b_val
)
500 else if (**a_val
== **b_val
)
507 print_shader_time_line(const char *stage
, const char *name
,
508 int shader_num
, uint64_t time
, uint64_t total
)
510 fprintf(stderr
, "%-6s%-18s", stage
, name
);
513 fprintf(stderr
, "%4d: ", shader_num
);
515 fprintf(stderr
, " : ");
517 fprintf(stderr
, "%16lld (%7.2f Gcycles) %4.1f%%\n",
519 (double)time
/ 1000000000.0,
520 (double)time
/ total
* 100.0);
524 brw_report_shader_time(struct brw_context
*brw
)
526 if (!brw
->shader_time
.bo
|| !brw
->shader_time
.num_entries
)
529 uint64_t scaled
[brw
->shader_time
.num_entries
];
530 uint64_t *sorted
[brw
->shader_time
.num_entries
];
531 uint64_t total_by_type
[ST_CS
+ 1];
532 memset(total_by_type
, 0, sizeof(total_by_type
));
534 for (int i
= 0; i
< brw
->shader_time
.num_entries
; i
++) {
535 uint64_t written
= 0, reset
= 0;
536 enum shader_time_shader_type type
= brw
->shader_time
.types
[i
];
538 sorted
[i
] = &scaled
[i
];
548 written
= brw
->shader_time
.cumulative
[i
].written
;
549 reset
= brw
->shader_time
.cumulative
[i
].reset
;
553 /* I sometimes want to print things that aren't the 3 shader times.
554 * Just print the sum in that case.
561 uint64_t time
= brw
->shader_time
.cumulative
[i
].time
;
563 scaled
[i
] = time
/ written
* (written
+ reset
);
576 total_by_type
[type
] += scaled
[i
];
586 fprintf(stderr
, "No shader time collected yet\n");
590 qsort(sorted
, brw
->shader_time
.num_entries
, sizeof(sorted
[0]), compare_time
);
592 fprintf(stderr
, "\n");
593 fprintf(stderr
, "type ID cycles spent %% of total\n");
594 for (int s
= 0; s
< brw
->shader_time
.num_entries
; s
++) {
596 /* Work back from the sorted pointers times to a time to print. */
597 int i
= sorted
[s
] - scaled
;
602 int shader_num
= brw
->shader_time
.ids
[i
];
603 const char *shader_name
= brw
->shader_time
.names
[i
];
605 switch (brw
->shader_time
.types
[i
]) {
632 print_shader_time_line(stage
, shader_name
, shader_num
,
636 fprintf(stderr
, "\n");
637 print_shader_time_line("total", "vs", 0, total_by_type
[ST_VS
], total
);
638 print_shader_time_line("total", "tcs", 0, total_by_type
[ST_TCS
], total
);
639 print_shader_time_line("total", "tes", 0, total_by_type
[ST_TES
], total
);
640 print_shader_time_line("total", "gs", 0, total_by_type
[ST_GS
], total
);
641 print_shader_time_line("total", "fs8", 0, total_by_type
[ST_FS8
], total
);
642 print_shader_time_line("total", "fs16", 0, total_by_type
[ST_FS16
], total
);
643 print_shader_time_line("total", "cs", 0, total_by_type
[ST_CS
], total
);
647 brw_collect_shader_time(struct brw_context
*brw
)
649 if (!brw
->shader_time
.bo
)
652 /* This probably stalls on the last rendering. We could fix that by
653 * delaying reading the reports, but it doesn't look like it's a big
654 * overhead compared to the cost of tracking the time in the first place.
656 void *bo_map
= brw_bo_map(brw
, brw
->shader_time
.bo
, MAP_READ
| MAP_WRITE
);
658 for (int i
= 0; i
< brw
->shader_time
.num_entries
; i
++) {
659 uint32_t *times
= bo_map
+ i
* 3 * BRW_SHADER_TIME_STRIDE
;
661 brw
->shader_time
.cumulative
[i
].time
+= times
[BRW_SHADER_TIME_STRIDE
* 0 / 4];
662 brw
->shader_time
.cumulative
[i
].written
+= times
[BRW_SHADER_TIME_STRIDE
* 1 / 4];
663 brw
->shader_time
.cumulative
[i
].reset
+= times
[BRW_SHADER_TIME_STRIDE
* 2 / 4];
666 /* Zero the BO out to clear it out for our next collection.
668 memset(bo_map
, 0, brw
->shader_time
.bo
->size
);
669 brw_bo_unmap(brw
->shader_time
.bo
);
673 brw_collect_and_report_shader_time(struct brw_context
*brw
)
675 brw_collect_shader_time(brw
);
677 if (brw
->shader_time
.report_time
== 0 ||
678 get_time() - brw
->shader_time
.report_time
>= 1.0) {
679 brw_report_shader_time(brw
);
680 brw
->shader_time
.report_time
= get_time();
685 * Chooses an index in the shader_time buffer and sets up tracking information
688 * Note that this holds on to references to the underlying programs, which may
689 * change their lifetimes compared to normal operation.
692 brw_get_shader_time_index(struct brw_context
*brw
, struct gl_program
*prog
,
693 enum shader_time_shader_type type
, bool is_glsl_sh
)
695 int shader_time_index
= brw
->shader_time
.num_entries
++;
696 assert(shader_time_index
< brw
->shader_time
.max_entries
);
697 brw
->shader_time
.types
[shader_time_index
] = type
;
702 } else if (is_glsl_sh
) {
703 name
= prog
->info
.label
?
704 ralloc_strdup(brw
->shader_time
.names
, prog
->info
.label
) : "glsl";
709 brw
->shader_time
.names
[shader_time_index
] = name
;
710 brw
->shader_time
.ids
[shader_time_index
] = prog
->Id
;
712 return shader_time_index
;
716 brw_destroy_shader_time(struct brw_context
*brw
)
718 brw_bo_unreference(brw
->shader_time
.bo
);
719 brw
->shader_time
.bo
= NULL
;
723 brw_stage_prog_data_free(const void *p
)
725 struct brw_stage_prog_data
*prog_data
= (struct brw_stage_prog_data
*)p
;
727 ralloc_free(prog_data
->param
);
728 ralloc_free(prog_data
->pull_param
);
732 brw_dump_arb_asm(const char *stage
, struct gl_program
*prog
)
734 fprintf(stderr
, "ARB_%s_program %d ir for native %s shader\n",
735 stage
, prog
->Id
, stage
);
736 _mesa_print_program(prog
);
740 brw_setup_tex_for_precompile(struct brw_context
*brw
,
741 struct brw_sampler_prog_key_data
*tex
,
742 struct gl_program
*prog
)
744 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
745 const bool has_shader_channel_select
= devinfo
->is_haswell
|| devinfo
->gen
>= 8;
746 unsigned sampler_count
= util_last_bit(prog
->SamplersUsed
);
747 for (unsigned i
= 0; i
< sampler_count
; i
++) {
748 if (!has_shader_channel_select
&& (prog
->ShadowSamplers
& (1 << i
))) {
749 /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
751 MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_X
, SWIZZLE_ONE
);
753 /* Color sampler: assume no swizzling. */
754 tex
->swizzles
[i
] = SWIZZLE_XYZW
;
760 * Sets up the starting offsets for the groups of binding table entries
761 * common to all pipeline stages.
763 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
764 * unused but also make sure that addition of small offsets to them will
765 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
768 brw_assign_common_binding_table_offsets(const struct gen_device_info
*devinfo
,
769 const struct gl_program
*prog
,
770 struct brw_stage_prog_data
*stage_prog_data
,
771 uint32_t next_binding_table_offset
)
773 int num_textures
= util_last_bit(prog
->SamplersUsed
);
775 stage_prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
776 next_binding_table_offset
+= num_textures
;
778 if (prog
->info
.num_ubos
) {
779 assert(prog
->info
.num_ubos
<= BRW_MAX_UBO
);
780 stage_prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
781 next_binding_table_offset
+= prog
->info
.num_ubos
;
783 stage_prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
786 if (prog
->info
.num_ssbos
|| prog
->info
.num_abos
) {
787 assert(prog
->info
.num_abos
<= BRW_MAX_ABO
);
788 assert(prog
->info
.num_ssbos
<= BRW_MAX_SSBO
);
789 stage_prog_data
->binding_table
.ssbo_start
= next_binding_table_offset
;
790 next_binding_table_offset
+= prog
->info
.num_abos
+ prog
->info
.num_ssbos
;
792 stage_prog_data
->binding_table
.ssbo_start
= 0xd0d0d0d0;
795 if (INTEL_DEBUG
& DEBUG_SHADER_TIME
) {
796 stage_prog_data
->binding_table
.shader_time_start
= next_binding_table_offset
;
797 next_binding_table_offset
++;
799 stage_prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
802 if (prog
->info
.uses_texture_gather
) {
803 if (devinfo
->gen
>= 8) {
804 stage_prog_data
->binding_table
.gather_texture_start
=
805 stage_prog_data
->binding_table
.texture_start
;
807 stage_prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
808 next_binding_table_offset
+= num_textures
;
811 stage_prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
814 if (prog
->info
.num_images
) {
815 stage_prog_data
->binding_table
.image_start
= next_binding_table_offset
;
816 next_binding_table_offset
+= prog
->info
.num_images
;
818 stage_prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
821 /* This may or may not be used depending on how the compile goes. */
822 stage_prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
823 next_binding_table_offset
++;
825 /* Plane 0 is just the regular texture section */
826 stage_prog_data
->binding_table
.plane_start
[0] = stage_prog_data
->binding_table
.texture_start
;
828 stage_prog_data
->binding_table
.plane_start
[1] = next_binding_table_offset
;
829 next_binding_table_offset
+= num_textures
;
831 stage_prog_data
->binding_table
.plane_start
[2] = next_binding_table_offset
;
832 next_binding_table_offset
+= num_textures
;
834 /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
836 assert(next_binding_table_offset
<= BRW_MAX_SURFACES
);
837 return next_binding_table_offset
;
841 brw_program_serialize_nir(struct gl_context
*ctx
, struct gl_program
*prog
)
845 nir_serialize(&writer
, prog
->nir
);
846 prog
->driver_cache_blob
= ralloc_size(NULL
, writer
.size
);
847 memcpy(prog
->driver_cache_blob
, writer
.data
, writer
.size
);
848 prog
->driver_cache_blob_size
= writer
.size
;
849 blob_finish(&writer
);
853 brw_program_deserialize_nir(struct gl_context
*ctx
, struct gl_program
*prog
,
854 gl_shader_stage stage
)
857 assert(prog
->driver_cache_blob
&& prog
->driver_cache_blob_size
> 0);
858 const struct nir_shader_compiler_options
*options
=
859 ctx
->Const
.ShaderCompilerOptions
[stage
].NirOptions
;
860 struct blob_reader reader
;
861 blob_reader_init(&reader
, prog
->driver_cache_blob
,
862 prog
->driver_cache_blob_size
);
863 prog
->nir
= nir_deserialize(NULL
, options
, &reader
);
866 if (prog
->driver_cache_blob
) {
867 ralloc_free(prog
->driver_cache_blob
);
868 prog
->driver_cache_blob
= NULL
;
869 prog
->driver_cache_blob_size
= 0;