2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <sys/errno.h>
26 #include "main/condrender.h"
27 #include "main/mtypes.h"
28 #include "main/state.h"
29 #include "brw_context.h"
31 #include "brw_state.h"
32 #include "intel_batchbuffer.h"
33 #include "intel_buffer_objects.h"
34 #include "brw_defines.h"
38 prepare_indirect_gpgpu_walker(struct brw_context
*brw
)
40 GLintptr indirect_offset
= brw
->compute
.num_work_groups_offset
;
41 drm_intel_bo
*bo
= brw
->compute
.num_work_groups_bo
;
43 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMX
, bo
,
44 I915_GEM_DOMAIN_VERTEX
, 0,
46 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMY
, bo
,
47 I915_GEM_DOMAIN_VERTEX
, 0,
49 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMZ
, bo
,
50 I915_GEM_DOMAIN_VERTEX
, 0,
56 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
58 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (7 - 2));
59 OUT_BATCH(MI_PREDICATE_SRC0
+ 4);
61 OUT_BATCH(MI_PREDICATE_SRC1
+ 0);
63 OUT_BATCH(MI_PREDICATE_SRC1
+ 4);
67 /* Load compute_dispatch_indirect_x_size into SRC0 */
68 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
,
69 I915_GEM_DOMAIN_INSTRUCTION
, 0,
72 /* predicate = (compute_dispatch_indirect_x_size == 0); */
74 OUT_BATCH(GEN7_MI_PREDICATE
|
75 MI_PREDICATE_LOADOP_LOAD
|
76 MI_PREDICATE_COMBINEOP_SET
|
77 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
80 /* Load compute_dispatch_indirect_y_size into SRC0 */
81 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
,
82 I915_GEM_DOMAIN_INSTRUCTION
, 0,
85 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
87 OUT_BATCH(GEN7_MI_PREDICATE
|
88 MI_PREDICATE_LOADOP_LOAD
|
89 MI_PREDICATE_COMBINEOP_OR
|
90 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
93 /* Load compute_dispatch_indirect_z_size into SRC0 */
94 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
,
95 I915_GEM_DOMAIN_INSTRUCTION
, 0,
98 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
100 OUT_BATCH(GEN7_MI_PREDICATE
|
101 MI_PREDICATE_LOADOP_LOAD
|
102 MI_PREDICATE_COMBINEOP_OR
|
103 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
106 /* predicate = !predicate; */
108 OUT_BATCH(GEN7_MI_PREDICATE
|
109 MI_PREDICATE_LOADOP_LOADINV
|
110 MI_PREDICATE_COMBINEOP_OR
|
111 MI_PREDICATE_COMPAREOP_FALSE
);
116 brw_emit_gpgpu_walker(struct brw_context
*brw
)
118 const struct brw_cs_prog_data
*prog_data
=
119 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
121 const GLuint
*num_groups
= brw
->compute
.num_work_groups
;
122 uint32_t indirect_flag
;
124 if (brw
->compute
.num_work_groups_bo
== NULL
) {
128 GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE
|
129 (brw
->gen
== 7 ? GEN7_GPGPU_PREDICATE_ENABLE
: 0);
130 prepare_indirect_gpgpu_walker(brw
);
133 const unsigned simd_size
= prog_data
->simd_size
;
134 unsigned group_size
= prog_data
->local_size
[0] *
135 prog_data
->local_size
[1] * prog_data
->local_size
[2];
136 unsigned thread_width_max
=
137 (group_size
+ simd_size
- 1) / simd_size
;
139 uint32_t right_mask
= 0xffffffffu
>> (32 - simd_size
);
140 const unsigned right_non_aligned
= group_size
& (simd_size
- 1);
141 if (right_non_aligned
!= 0)
142 right_mask
>>= (simd_size
- right_non_aligned
);
144 uint32_t dwords
= brw
->gen
< 8 ? 11 : 15;
146 OUT_BATCH(GPGPU_WALKER
<< 16 | (dwords
- 2) | indirect_flag
);
149 OUT_BATCH(0); /* Indirect Data Length */
150 OUT_BATCH(0); /* Indirect Data Start Address */
152 assert(thread_width_max
<= brw
->screen
->devinfo
.max_cs_threads
);
153 OUT_BATCH(SET_FIELD(simd_size
/ 16, GPGPU_WALKER_SIMD_SIZE
) |
154 SET_FIELD(thread_width_max
- 1, GPGPU_WALKER_THREAD_WIDTH_MAX
));
155 OUT_BATCH(0); /* Thread Group ID Starting X */
157 OUT_BATCH(0); /* MBZ */
158 OUT_BATCH(num_groups
[0]); /* Thread Group ID X Dimension */
159 OUT_BATCH(0); /* Thread Group ID Starting Y */
161 OUT_BATCH(0); /* MBZ */
162 OUT_BATCH(num_groups
[1]); /* Thread Group ID Y Dimension */
163 OUT_BATCH(0); /* Thread Group ID Starting/Resume Z */
164 OUT_BATCH(num_groups
[2]); /* Thread Group ID Z Dimension */
165 OUT_BATCH(right_mask
); /* Right Execution Mask */
166 OUT_BATCH(0xffffffff); /* Bottom Execution Mask */
170 OUT_BATCH(MEDIA_STATE_FLUSH
<< 16 | (2 - 2));
177 brw_dispatch_compute_common(struct gl_context
*ctx
)
179 struct brw_context
*brw
= brw_context(ctx
);
180 int estimated_buffer_space_needed
;
181 bool fail_next
= false;
183 if (!_mesa_check_conditional_render(ctx
))
187 _mesa_update_state(ctx
);
189 brw_validate_textures(brw
);
191 const int sampler_state_size
= 16; /* 16 bytes */
192 estimated_buffer_space_needed
= 512; /* batchbuffer commands */
193 estimated_buffer_space_needed
+= (BRW_MAX_TEX_UNIT
*
194 (sampler_state_size
+
195 sizeof(struct gen5_sampler_default_color
)));
196 estimated_buffer_space_needed
+= 1024; /* push constants */
197 estimated_buffer_space_needed
+= 512; /* misc. pad */
199 /* Flush the batch if it's approaching full, so that we don't wrap while
200 * we've got validated state that needs to be in the same batch as the
203 intel_batchbuffer_require_space(brw
, estimated_buffer_space_needed
,
205 intel_batchbuffer_save_state(brw
);
208 brw
->no_batch_wrap
= true;
209 brw_upload_compute_state(brw
);
211 brw_emit_gpgpu_walker(brw
);
213 brw
->no_batch_wrap
= false;
215 if (dri_bufmgr_check_aperture_space(&brw
->batch
.bo
, 1)) {
217 intel_batchbuffer_reset_to_saved(brw
);
218 intel_batchbuffer_flush(brw
);
222 if (intel_batchbuffer_flush(brw
) == -ENOSPC
) {
223 static bool warned
= false;
226 fprintf(stderr
, "i965: Single compute shader dispatch "
227 "exceeded available aperture space\n");
234 /* Now that we know we haven't run out of aperture space, we can safely
235 * reset the dirty bits.
237 brw_compute_state_finished(brw
);
239 if (brw
->always_flush_batch
)
240 intel_batchbuffer_flush(brw
);
242 brw_program_cache_check_size(brw
);
244 /* Note: since compute shaders can't write to framebuffers, there's no need
245 * to call brw_postdraw_set_buffers_need_resolve().
250 brw_dispatch_compute(struct gl_context
*ctx
, const GLuint
*num_groups
) {
251 struct brw_context
*brw
= brw_context(ctx
);
253 brw
->compute
.num_work_groups_bo
= NULL
;
254 brw
->compute
.num_work_groups
= num_groups
;
255 ctx
->NewDriverState
|= BRW_NEW_CS_WORK_GROUPS
;
257 brw_dispatch_compute_common(ctx
);
261 brw_dispatch_compute_indirect(struct gl_context
*ctx
, GLintptr indirect
)
263 struct brw_context
*brw
= brw_context(ctx
);
264 static const GLuint indirect_group_counts
[3] = { 0, 0, 0 };
265 struct gl_buffer_object
*indirect_buffer
= ctx
->DispatchIndirectBuffer
;
267 intel_bufferobj_buffer(brw
,
268 intel_buffer_object(indirect_buffer
),
269 indirect
, 3 * sizeof(GLuint
));
271 brw
->compute
.num_work_groups_bo
= bo
;
272 brw
->compute
.num_work_groups_offset
= indirect
;
273 brw
->compute
.num_work_groups
= indirect_group_counts
;
274 ctx
->NewDriverState
|= BRW_NEW_CS_WORK_GROUPS
;
276 brw_dispatch_compute_common(ctx
);
280 brw_init_compute_functions(struct dd_function_table
*functions
)
282 functions
->DispatchCompute
= brw_dispatch_compute
;
283 functions
->DispatchComputeIndirect
= brw_dispatch_compute_indirect
;