2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 #include "ir_uniform.h"
27 #include "program/hash_table.h"
28 #include "main/macros.h"
32 * Atomic counter as seen by the program.
34 struct active_atomic_counter
{
40 * Atomic counter buffer referenced by the program. There is a one
41 * to one correspondence between these and the objects that can be
42 * queried using glGetActiveAtomicCounterBufferiv().
44 struct active_atomic_buffer
{
45 active_atomic_buffer()
46 : counters(0), num_counters(0), stage_references(), size(0)
49 ~active_atomic_buffer()
54 void push_back(unsigned id
, ir_variable
*var
)
56 counters
= (active_atomic_counter
*)
57 realloc(counters
, sizeof(active_atomic_counter
) * (num_counters
+ 1));
59 counters
[num_counters
].id
= id
;
60 counters
[num_counters
].var
= var
;
64 active_atomic_counter
*counters
;
65 unsigned num_counters
;
66 unsigned stage_references
[MESA_SHADER_TYPES
];
71 cmp_actives(const void *a
, const void *b
)
73 const active_atomic_counter
*const first
= (active_atomic_counter
*) a
;
74 const active_atomic_counter
*const second
= (active_atomic_counter
*) b
;
76 return int(first
->var
->atomic
.offset
) - int(second
->var
->atomic
.offset
);
80 check_atomic_counters_overlap(const ir_variable
*x
, const ir_variable
*y
)
82 return ((x
->atomic
.offset
>= y
->atomic
.offset
&&
83 x
->atomic
.offset
< y
->atomic
.offset
+ y
->type
->atomic_size()) ||
84 (y
->atomic
.offset
>= x
->atomic
.offset
&&
85 y
->atomic
.offset
< x
->atomic
.offset
+ x
->type
->atomic_size()));
88 active_atomic_buffer
*
89 find_active_atomic_counters(struct gl_context
*ctx
,
90 struct gl_shader_program
*prog
,
91 unsigned *num_buffers
)
93 active_atomic_buffer
*const buffers
=
94 new active_atomic_buffer
[ctx
->Const
.MaxAtomicBufferBindings
];
98 for (unsigned i
= 0; i
< MESA_SHADER_TYPES
; ++i
) {
99 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
103 foreach_list(node
, sh
->ir
) {
104 ir_variable
*var
= ((ir_instruction
*)node
)->as_variable();
106 if (var
&& var
->type
->contains_atomic()) {
108 bool found
= prog
->UniformHash
->get(id
, var
->name
);
110 active_atomic_buffer
*buf
= &buffers
[var
->binding
];
112 /* If this is the first time the buffer is used, increment
113 * the counter of buffers used.
118 buf
->push_back(id
, var
);
120 buf
->stage_references
[i
]++;
121 buf
->size
= MAX2(buf
->size
, var
->atomic
.offset
+
122 var
->type
->atomic_size());
127 for (unsigned i
= 0; i
< ctx
->Const
.MaxAtomicBufferBindings
; i
++) {
128 if (buffers
[i
].size
== 0)
131 qsort(buffers
[i
].counters
, buffers
[i
].num_counters
,
132 sizeof(active_atomic_counter
),
135 for (unsigned j
= 1; j
< buffers
[i
].num_counters
; j
++) {
136 /* If an overlapping counter found, it must be a reference to the
137 * same counter from a different shader stage.
139 if (check_atomic_counters_overlap(buffers
[i
].counters
[j
-1].var
,
140 buffers
[i
].counters
[j
].var
)
141 && strcmp(buffers
[i
].counters
[j
-1].var
->name
,
142 buffers
[i
].counters
[j
].var
->name
) != 0) {
143 linker_error(prog
, "Atomic counter %s declared at offset %d "
144 "which is already in use.",
145 buffers
[i
].counters
[j
].var
->name
,
146 buffers
[i
].counters
[j
].var
->atomic
.offset
);
155 link_assign_atomic_counter_resources(struct gl_context
*ctx
,
156 struct gl_shader_program
*prog
)
158 unsigned num_buffers
;
159 active_atomic_buffer
*abs
=
160 find_active_atomic_counters(ctx
, prog
, &num_buffers
);
162 prog
->AtomicBuffers
= rzalloc_array(prog
, gl_active_atomic_buffer
,
164 prog
->NumAtomicBuffers
= num_buffers
;
167 for (unsigned binding
= 0;
168 binding
< ctx
->Const
.MaxAtomicBufferBindings
;
171 /* If the binding was not used, skip.
173 if (abs
[binding
].size
== 0)
176 active_atomic_buffer
&ab
= abs
[binding
];
177 gl_active_atomic_buffer
&mab
= prog
->AtomicBuffers
[i
];
179 /* Assign buffer-specific fields. */
180 mab
.Binding
= binding
;
181 mab
.MinimumSize
= ab
.size
;
182 mab
.Uniforms
= rzalloc_array(prog
->AtomicBuffers
, GLuint
,
184 mab
.NumUniforms
= ab
.num_counters
;
186 /* Assign counter-specific fields. */
187 for (unsigned j
= 0; j
< ab
.num_counters
; j
++) {
188 ir_variable
*const var
= ab
.counters
[j
].var
;
189 const unsigned id
= ab
.counters
[j
].id
;
190 gl_uniform_storage
*const storage
= &prog
->UniformStorage
[id
];
192 mab
.Uniforms
[j
] = id
;
193 var
->atomic
.buffer_index
= i
;
194 storage
->atomic_buffer_index
= i
;
195 storage
->offset
= var
->atomic
.offset
;
196 storage
->array_stride
= (var
->type
->is_array() ?
197 var
->type
->element_type()->atomic_size() : 0);
200 /* Assign stage-specific fields. */
201 for (unsigned j
= 0; j
< MESA_SHADER_TYPES
; ++j
)
202 mab
.StageReferences
[j
] =
203 (ab
.stage_references
[j
] ? GL_TRUE
: GL_FALSE
);
209 assert(i
== num_buffers
);
213 link_check_atomic_counter_resources(struct gl_context
*ctx
,
214 struct gl_shader_program
*prog
)
216 STATIC_ASSERT(MESA_SHADER_TYPES
== 3);
217 static const char *shader_names
[MESA_SHADER_TYPES
] = {
218 "vertex", "geometry", "fragment"
220 const unsigned max_atomic_counters
[MESA_SHADER_TYPES
] = {
221 ctx
->Const
.VertexProgram
.MaxAtomicCounters
,
222 ctx
->Const
.GeometryProgram
.MaxAtomicCounters
,
223 ctx
->Const
.FragmentProgram
.MaxAtomicCounters
225 const unsigned max_atomic_buffers
[MESA_SHADER_TYPES
] = {
226 ctx
->Const
.VertexProgram
.MaxAtomicBuffers
,
227 ctx
->Const
.GeometryProgram
.MaxAtomicBuffers
,
228 ctx
->Const
.FragmentProgram
.MaxAtomicBuffers
230 unsigned num_buffers
;
231 active_atomic_buffer
*const abs
=
232 find_active_atomic_counters(ctx
, prog
, &num_buffers
);
233 unsigned atomic_counters
[MESA_SHADER_TYPES
] = {};
234 unsigned atomic_buffers
[MESA_SHADER_TYPES
] = {};
235 unsigned total_atomic_counters
= 0;
236 unsigned total_atomic_buffers
= 0;
238 /* Sum the required resources. Note that this counts buffers and
239 * counters referenced by several shader stages multiple times
240 * against the combined limit -- That's the behavior the spec
243 for (unsigned i
= 0; i
< ctx
->Const
.MaxAtomicBufferBindings
; i
++) {
244 if (abs
[i
].size
== 0)
247 for (unsigned j
= 0; j
< MESA_SHADER_TYPES
; ++j
) {
248 const unsigned n
= abs
[i
].stage_references
[j
];
251 atomic_counters
[j
] += n
;
252 total_atomic_counters
+= n
;
254 total_atomic_buffers
++;
259 /* Check that they are within the supported limits. */
260 for (unsigned i
= 0; i
< MESA_SHADER_TYPES
; i
++) {
261 if (atomic_counters
[i
] > max_atomic_counters
[i
])
262 linker_error(prog
, "Too many %s shader atomic counters",
265 if (atomic_buffers
[i
] > max_atomic_buffers
[i
])
266 linker_error(prog
, "Too many %s shader atomic counter buffers",
270 if (total_atomic_counters
> ctx
->Const
.MaxCombinedAtomicCounters
)
271 linker_error(prog
, "Too many combined atomic counters");
273 if (total_atomic_buffers
> ctx
->Const
.MaxCombinedAtomicBuffers
)
274 linker_error(prog
, "Too many combined atomic buffers");