2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <sys/errno.h>
26 #include "main/condrender.h"
27 #include "main/mtypes.h"
28 #include "main/state.h"
29 #include "brw_context.h"
31 #include "brw_state.h"
32 #include "intel_batchbuffer.h"
33 #include "intel_buffer_objects.h"
34 #include "brw_defines.h"
38 prepare_indirect_gpgpu_walker(struct brw_context
*brw
)
40 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
41 GLintptr indirect_offset
= brw
->compute
.num_work_groups_offset
;
42 struct brw_bo
*bo
= brw
->compute
.num_work_groups_bo
;
44 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMX
, bo
, indirect_offset
+ 0);
45 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMY
, bo
, indirect_offset
+ 4);
46 brw_load_register_mem(brw
, GEN7_GPGPU_DISPATCHDIMZ
, bo
, indirect_offset
+ 8);
51 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
53 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (7 - 2));
54 OUT_BATCH(MI_PREDICATE_SRC0
+ 4);
56 OUT_BATCH(MI_PREDICATE_SRC1
+ 0);
58 OUT_BATCH(MI_PREDICATE_SRC1
+ 4);
62 /* Load compute_dispatch_indirect_x_size into SRC0 */
63 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
, indirect_offset
+ 0);
65 /* predicate = (compute_dispatch_indirect_x_size == 0); */
67 OUT_BATCH(GEN7_MI_PREDICATE
|
68 MI_PREDICATE_LOADOP_LOAD
|
69 MI_PREDICATE_COMBINEOP_SET
|
70 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
73 /* Load compute_dispatch_indirect_y_size into SRC0 */
74 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
, indirect_offset
+ 4);
76 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
78 OUT_BATCH(GEN7_MI_PREDICATE
|
79 MI_PREDICATE_LOADOP_LOAD
|
80 MI_PREDICATE_COMBINEOP_OR
|
81 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
84 /* Load compute_dispatch_indirect_z_size into SRC0 */
85 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
, bo
, indirect_offset
+ 8);
87 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
89 OUT_BATCH(GEN7_MI_PREDICATE
|
90 MI_PREDICATE_LOADOP_LOAD
|
91 MI_PREDICATE_COMBINEOP_OR
|
92 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
95 /* predicate = !predicate; */
97 OUT_BATCH(GEN7_MI_PREDICATE
|
98 MI_PREDICATE_LOADOP_LOADINV
|
99 MI_PREDICATE_COMBINEOP_OR
|
100 MI_PREDICATE_COMPAREOP_FALSE
);
105 brw_emit_gpgpu_walker(struct brw_context
*brw
)
107 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
108 const struct brw_cs_prog_data
*prog_data
=
109 brw_cs_prog_data(brw
->cs
.base
.prog_data
);
111 const GLuint
*num_groups
= brw
->compute
.num_work_groups
;
112 uint32_t indirect_flag
;
114 if (brw
->compute
.num_work_groups_bo
== NULL
) {
118 GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE
|
119 (devinfo
->gen
== 7 ? GEN7_GPGPU_PREDICATE_ENABLE
: 0);
120 prepare_indirect_gpgpu_walker(brw
);
123 const unsigned simd_size
= prog_data
->simd_size
;
124 unsigned group_size
= prog_data
->local_size
[0] *
125 prog_data
->local_size
[1] * prog_data
->local_size
[2];
126 unsigned thread_width_max
=
127 (group_size
+ simd_size
- 1) / simd_size
;
129 uint32_t right_mask
= 0xffffffffu
>> (32 - simd_size
);
130 const unsigned right_non_aligned
= group_size
& (simd_size
- 1);
131 if (right_non_aligned
!= 0)
132 right_mask
>>= (simd_size
- right_non_aligned
);
134 uint32_t dwords
= devinfo
->gen
< 8 ? 11 : 15;
136 OUT_BATCH(GPGPU_WALKER
<< 16 | (dwords
- 2) | indirect_flag
);
138 if (devinfo
->gen
>= 8) {
139 OUT_BATCH(0); /* Indirect Data Length */
140 OUT_BATCH(0); /* Indirect Data Start Address */
142 assert(thread_width_max
<= brw
->screen
->devinfo
.max_cs_threads
);
143 OUT_BATCH(SET_FIELD(simd_size
/ 16, GPGPU_WALKER_SIMD_SIZE
) |
144 SET_FIELD(thread_width_max
- 1, GPGPU_WALKER_THREAD_WIDTH_MAX
));
145 OUT_BATCH(0); /* Thread Group ID Starting X */
146 if (devinfo
->gen
>= 8)
147 OUT_BATCH(0); /* MBZ */
148 OUT_BATCH(num_groups
[0]); /* Thread Group ID X Dimension */
149 OUT_BATCH(0); /* Thread Group ID Starting Y */
150 if (devinfo
->gen
>= 8)
151 OUT_BATCH(0); /* MBZ */
152 OUT_BATCH(num_groups
[1]); /* Thread Group ID Y Dimension */
153 OUT_BATCH(0); /* Thread Group ID Starting/Resume Z */
154 OUT_BATCH(num_groups
[2]); /* Thread Group ID Z Dimension */
155 OUT_BATCH(right_mask
); /* Right Execution Mask */
156 OUT_BATCH(0xffffffff); /* Bottom Execution Mask */
160 OUT_BATCH(MEDIA_STATE_FLUSH
<< 16 | (2 - 2));
167 brw_dispatch_compute_common(struct gl_context
*ctx
)
169 struct brw_context
*brw
= brw_context(ctx
);
170 bool fail_next
= false;
172 if (!_mesa_check_conditional_render(ctx
))
176 _mesa_update_state(ctx
);
178 brw_validate_textures(brw
);
180 brw_predraw_resolve_inputs(brw
);
182 /* Flush the batch if the batch/state buffers are nearly full. We can
183 * grow them if needed, but this is not free, so we'd like to avoid it.
185 intel_batchbuffer_require_space(brw
, 600, RENDER_RING
);
186 brw_require_statebuffer_space(brw
, 2500);
187 intel_batchbuffer_save_state(brw
);
190 brw
->batch
.no_wrap
= true;
191 brw_upload_compute_state(brw
);
193 brw_emit_gpgpu_walker(brw
);
195 brw
->batch
.no_wrap
= false;
197 if (!brw_batch_has_aperture_space(brw
, 0)) {
199 intel_batchbuffer_reset_to_saved(brw
);
200 intel_batchbuffer_flush(brw
);
204 int ret
= intel_batchbuffer_flush(brw
);
205 WARN_ONCE(ret
== -ENOSPC
,
206 "i965: Single compute shader dispatch "
207 "exceeded available aperture space\n");
211 /* Now that we know we haven't run out of aperture space, we can safely
212 * reset the dirty bits.
214 brw_compute_state_finished(brw
);
216 if (brw
->always_flush_batch
)
217 intel_batchbuffer_flush(brw
);
219 brw_program_cache_check_size(brw
);
221 /* Note: since compute shaders can't write to framebuffers, there's no need
222 * to call brw_postdraw_set_buffers_need_resolve().
227 brw_dispatch_compute(struct gl_context
*ctx
, const GLuint
*num_groups
) {
228 struct brw_context
*brw
= brw_context(ctx
);
230 brw
->compute
.num_work_groups_bo
= NULL
;
231 brw
->compute
.num_work_groups
= num_groups
;
232 ctx
->NewDriverState
|= BRW_NEW_CS_WORK_GROUPS
;
234 brw_dispatch_compute_common(ctx
);
238 brw_dispatch_compute_indirect(struct gl_context
*ctx
, GLintptr indirect
)
240 struct brw_context
*brw
= brw_context(ctx
);
241 static const GLuint indirect_group_counts
[3] = { 0, 0, 0 };
242 struct gl_buffer_object
*indirect_buffer
= ctx
->DispatchIndirectBuffer
;
244 intel_bufferobj_buffer(brw
,
245 intel_buffer_object(indirect_buffer
),
246 indirect
, 3 * sizeof(GLuint
), false);
248 brw
->compute
.num_work_groups_bo
= bo
;
249 brw
->compute
.num_work_groups_offset
= indirect
;
250 brw
->compute
.num_work_groups
= indirect_group_counts
;
251 ctx
->NewDriverState
|= BRW_NEW_CS_WORK_GROUPS
;
253 brw_dispatch_compute_common(ctx
);
257 brw_init_compute_functions(struct dd_function_table
*functions
)
259 functions
->DispatchCompute
= brw_dispatch_compute
;
260 functions
->DispatchComputeIndirect
= brw_dispatch_compute_indirect
;