Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_compute.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <sys/errno.h>
25
26 #include "main/condrender.h"
27 #include "main/mtypes.h"
28 #include "main/state.h"
29 #include "brw_context.h"
30 #include "brw_draw.h"
31 #include "brw_state.h"
32 #include "intel_batchbuffer.h"
33 #include "intel_buffer_objects.h"
34 #include "brw_defines.h"
35
36
37 static void
38 brw_emit_gpgpu_walker(struct brw_context *brw)
39 {
40 const struct brw_cs_prog_data *prog_data = brw->cs.prog_data;
41
42 const GLuint *num_groups = brw->compute.num_work_groups;
43 uint32_t indirect_flag;
44
45 if (brw->compute.num_work_groups_bo == NULL) {
46 indirect_flag = 0;
47 } else {
48 GLintptr indirect_offset = brw->compute.num_work_groups_offset;
49 drm_intel_bo *bo = brw->compute.num_work_groups_bo;
50
51 indirect_flag = GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE;
52
53 brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
54 I915_GEM_DOMAIN_VERTEX, 0,
55 indirect_offset + 0);
56 brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMY, bo,
57 I915_GEM_DOMAIN_VERTEX, 0,
58 indirect_offset + 4);
59 brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMZ, bo,
60 I915_GEM_DOMAIN_VERTEX, 0,
61 indirect_offset + 8);
62 }
63
64 const unsigned simd_size = prog_data->simd_size;
65 unsigned group_size = prog_data->local_size[0] *
66 prog_data->local_size[1] * prog_data->local_size[2];
67 unsigned thread_width_max =
68 (group_size + simd_size - 1) / simd_size;
69
70 uint32_t right_mask = 0xffffffffu >> (32 - simd_size);
71 const unsigned right_non_aligned = group_size & (simd_size - 1);
72 if (right_non_aligned != 0)
73 right_mask >>= (simd_size - right_non_aligned);
74
75 uint32_t dwords = brw->gen < 8 ? 11 : 15;
76 BEGIN_BATCH(dwords);
77 OUT_BATCH(GPGPU_WALKER << 16 | (dwords - 2) | indirect_flag);
78 OUT_BATCH(0);
79 if (brw->gen >= 8) {
80 OUT_BATCH(0); /* Indirect Data Length */
81 OUT_BATCH(0); /* Indirect Data Start Address */
82 }
83 assert(thread_width_max <= brw->max_cs_threads);
84 OUT_BATCH(SET_FIELD(simd_size / 16, GPGPU_WALKER_SIMD_SIZE) |
85 SET_FIELD(thread_width_max - 1, GPGPU_WALKER_THREAD_WIDTH_MAX));
86 OUT_BATCH(0); /* Thread Group ID Starting X */
87 if (brw->gen >= 8)
88 OUT_BATCH(0); /* MBZ */
89 OUT_BATCH(num_groups[0]); /* Thread Group ID X Dimension */
90 OUT_BATCH(0); /* Thread Group ID Starting Y */
91 if (brw->gen >= 8)
92 OUT_BATCH(0); /* MBZ */
93 OUT_BATCH(num_groups[1]); /* Thread Group ID Y Dimension */
94 OUT_BATCH(0); /* Thread Group ID Starting/Resume Z */
95 OUT_BATCH(num_groups[2]); /* Thread Group ID Z Dimension */
96 OUT_BATCH(right_mask); /* Right Execution Mask */
97 OUT_BATCH(0xffffffff); /* Bottom Execution Mask */
98 ADVANCE_BATCH();
99
100 BEGIN_BATCH(2);
101 OUT_BATCH(MEDIA_STATE_FLUSH << 16 | (2 - 2));
102 OUT_BATCH(0);
103 ADVANCE_BATCH();
104 }
105
106
107 static void
108 brw_dispatch_compute_common(struct gl_context *ctx)
109 {
110 struct brw_context *brw = brw_context(ctx);
111 int estimated_buffer_space_needed;
112 bool fail_next = false;
113
114 if (!_mesa_check_conditional_render(ctx))
115 return;
116
117 if (ctx->NewState)
118 _mesa_update_state(ctx);
119
120 brw_validate_textures(brw);
121
122 const int sampler_state_size = 16; /* 16 bytes */
123 estimated_buffer_space_needed = 512; /* batchbuffer commands */
124 estimated_buffer_space_needed += (BRW_MAX_TEX_UNIT *
125 (sampler_state_size +
126 sizeof(struct gen5_sampler_default_color)));
127 estimated_buffer_space_needed += 1024; /* push constants */
128 estimated_buffer_space_needed += 512; /* misc. pad */
129
130 /* Flush the batch if it's approaching full, so that we don't wrap while
131 * we've got validated state that needs to be in the same batch as the
132 * primitives.
133 */
134 intel_batchbuffer_require_space(brw, estimated_buffer_space_needed,
135 RENDER_RING);
136 intel_batchbuffer_save_state(brw);
137
138 retry:
139 brw->no_batch_wrap = true;
140 brw_upload_compute_state(brw);
141
142 brw_emit_gpgpu_walker(brw);
143
144 brw->no_batch_wrap = false;
145
146 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
147 if (!fail_next) {
148 intel_batchbuffer_reset_to_saved(brw);
149 intel_batchbuffer_flush(brw);
150 fail_next = true;
151 goto retry;
152 } else {
153 if (intel_batchbuffer_flush(brw) == -ENOSPC) {
154 static bool warned = false;
155
156 if (!warned) {
157 fprintf(stderr, "i965: Single compute shader dispatch "
158 "exceeded available aperture space\n");
159 warned = true;
160 }
161 }
162 }
163 }
164
165 /* Now that we know we haven't run out of aperture space, we can safely
166 * reset the dirty bits.
167 */
168 brw_compute_state_finished(brw);
169
170 if (brw->always_flush_batch)
171 intel_batchbuffer_flush(brw);
172
173 brw_state_cache_check_size(brw);
174
175 /* Note: since compute shaders can't write to framebuffers, there's no need
176 * to call brw_postdraw_set_buffers_need_resolve().
177 */
178 }
179
180 static void
181 brw_dispatch_compute(struct gl_context *ctx, const GLuint *num_groups) {
182 struct brw_context *brw = brw_context(ctx);
183
184 brw->compute.num_work_groups_bo = NULL;
185 brw->compute.num_work_groups = num_groups;
186 ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
187
188 brw_dispatch_compute_common(ctx);
189 }
190
191 static void
192 brw_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect)
193 {
194 struct brw_context *brw = brw_context(ctx);
195 static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
196 struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
197 drm_intel_bo *bo =
198 intel_bufferobj_buffer(brw,
199 intel_buffer_object(indirect_buffer),
200 indirect, 3 * sizeof(GLuint));
201
202 brw->compute.num_work_groups_bo = bo;
203 brw->compute.num_work_groups_offset = indirect;
204 brw->compute.num_work_groups = indirect_group_counts;
205 ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
206
207 brw_dispatch_compute_common(ctx);
208 }
209
210 void
211 brw_init_compute_functions(struct dd_function_table *functions)
212 {
213 functions->DispatchCompute = brw_dispatch_compute;
214 functions->DispatchComputeIndirect = brw_dispatch_compute_indirect;
215 }