swr: [rasterizer core] Affinitize thread scratch space to numa node of worker
[mesa.git] / src / gallium / drivers / swr / swr_draw.cpp
1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ***************************************************************************/
23
24 #include "swr_screen.h"
25 #include "swr_context.h"
26 #include "swr_resource.h"
27 #include "swr_fence.h"
28 #include "swr_query.h"
29 #include "jit_api.h"
30
31 #include "util/u_draw.h"
32 #include "util/u_prim.h"
33
34 /*
35 * Convert mesa PIPE_PRIM_X to SWR enum PRIMITIVE_TOPOLOGY
36 */
37 static INLINE enum PRIMITIVE_TOPOLOGY
38 swr_convert_prim_topology(const unsigned mode)
39 {
40 switch (mode) {
41 case PIPE_PRIM_POINTS:
42 return TOP_POINT_LIST;
43 case PIPE_PRIM_LINES:
44 return TOP_LINE_LIST;
45 case PIPE_PRIM_LINE_LOOP:
46 return TOP_LINE_LOOP;
47 case PIPE_PRIM_LINE_STRIP:
48 return TOP_LINE_STRIP;
49 case PIPE_PRIM_TRIANGLES:
50 return TOP_TRIANGLE_LIST;
51 case PIPE_PRIM_TRIANGLE_STRIP:
52 return TOP_TRIANGLE_STRIP;
53 case PIPE_PRIM_TRIANGLE_FAN:
54 return TOP_TRIANGLE_FAN;
55 case PIPE_PRIM_QUADS:
56 return TOP_QUAD_LIST;
57 case PIPE_PRIM_QUAD_STRIP:
58 return TOP_QUAD_STRIP;
59 case PIPE_PRIM_POLYGON:
60 return TOP_TRIANGLE_FAN; /* XXX TOP_POLYGON; */
61 case PIPE_PRIM_LINES_ADJACENCY:
62 return TOP_LINE_LIST_ADJ;
63 case PIPE_PRIM_LINE_STRIP_ADJACENCY:
64 return TOP_LISTSTRIP_ADJ;
65 case PIPE_PRIM_TRIANGLES_ADJACENCY:
66 return TOP_TRI_LIST_ADJ;
67 case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
68 return TOP_TRI_STRIP_ADJ;
69 default:
70 assert(0 && "Unknown topology");
71 return TOP_UNKNOWN;
72 }
73 };
74
75
76 /*
77 * Draw vertex arrays, with optional indexing, optional instancing.
78 */
79 static void
80 swr_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
81 {
82 struct swr_context *ctx = swr_context(pipe);
83
84 if (!swr_check_render_cond(pipe))
85 return;
86
87 if (info->indirect) {
88 util_draw_indirect(pipe, info);
89 return;
90 }
91
92 /* Update derived state, pass draw info to update function */
93 if (ctx->dirty)
94 swr_update_derived(pipe, info);
95
96 swr_update_draw_context(ctx);
97
98 if (ctx->vs->pipe.stream_output.num_outputs) {
99 if (!ctx->vs->soFunc[info->mode]) {
100 STREAMOUT_COMPILE_STATE state = {0};
101 struct pipe_stream_output_info *so = &ctx->vs->pipe.stream_output;
102
103 state.numVertsPerPrim = u_vertices_per_prim(info->mode);
104
105 uint32_t offsets[MAX_SO_STREAMS] = {0};
106 uint32_t num = 0;
107
108 for (uint32_t i = 0; i < so->num_outputs; i++) {
109 assert(so->output[i].stream == 0); // @todo
110 uint32_t output_buffer = so->output[i].output_buffer;
111 if (so->output[i].dst_offset != offsets[output_buffer]) {
112 // hole - need to fill
113 state.stream.decl[num].bufferIndex = output_buffer;
114 state.stream.decl[num].hole = true;
115 state.stream.decl[num].componentMask =
116 (1 << (so->output[i].dst_offset - offsets[output_buffer]))
117 - 1;
118 num++;
119 offsets[output_buffer] = so->output[i].dst_offset;
120 }
121
122 state.stream.decl[num].bufferIndex = output_buffer;
123 state.stream.decl[num].attribSlot = so->output[i].register_index - 1;
124 state.stream.decl[num].componentMask =
125 ((1 << so->output[i].num_components) - 1)
126 << so->output[i].start_component;
127 state.stream.decl[num].hole = false;
128 num++;
129
130 offsets[output_buffer] += so->output[i].num_components;
131 }
132
133 state.stream.numDecls = num;
134
135 HANDLE hJitMgr = swr_screen(pipe->screen)->hJitMgr;
136 ctx->vs->soFunc[info->mode] = JitCompileStreamout(hJitMgr, state);
137 debug_printf("so shader %p\n", ctx->vs->soFunc[info->mode]);
138 assert(ctx->vs->soFunc[info->mode] && "Error: SoShader = NULL");
139 }
140
141 SwrSetSoFunc(ctx->swrContext, ctx->vs->soFunc[info->mode], 0);
142 }
143
144 struct swr_vertex_element_state *velems = ctx->velems;
145 if (!velems->fsFunc
146 || (velems->fsState.cutIndex != info->restart_index)
147 || (velems->fsState.bEnableCutIndex != info->primitive_restart)) {
148
149 velems->fsState.cutIndex = info->restart_index;
150 velems->fsState.bEnableCutIndex = info->primitive_restart;
151
152 /* Create Fetch Shader */
153 HANDLE hJitMgr = swr_screen(ctx->pipe.screen)->hJitMgr;
154 velems->fsFunc = JitCompileFetch(hJitMgr, velems->fsState);
155
156 debug_printf("fetch shader %p\n", velems->fsFunc);
157 assert(velems->fsFunc && "Error: FetchShader = NULL");
158 }
159
160 SwrSetFetchFunc(ctx->swrContext, velems->fsFunc);
161
162 if (info->indexed)
163 SwrDrawIndexedInstanced(ctx->swrContext,
164 swr_convert_prim_topology(info->mode),
165 info->count,
166 info->instance_count,
167 info->start,
168 info->index_bias,
169 info->start_instance);
170 else
171 SwrDrawInstanced(ctx->swrContext,
172 swr_convert_prim_topology(info->mode),
173 info->count,
174 info->instance_count,
175 info->start,
176 info->start_instance);
177 }
178
179
180 static void
181 swr_flush(struct pipe_context *pipe,
182 struct pipe_fence_handle **fence,
183 unsigned flags)
184 {
185 struct swr_context *ctx = swr_context(pipe);
186 struct swr_screen *screen = swr_screen(pipe->screen);
187 struct pipe_surface *cb = ctx->framebuffer.cbufs[0];
188
189 /* If the current renderTarget is the display surface, store tiles back to
190 * the surface, in preparation for present (swr_flush_frontbuffer).
191 * Other renderTargets get stored back when attachment changes or
192 * swr_surface_destroy */
193 if (cb && swr_resource(cb->texture)->display_target)
194 swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
195
196 if (fence)
197 swr_fence_reference(pipe->screen, fence, screen->flush_fence);
198 }
199
200 void
201 swr_finish(struct pipe_context *pipe)
202 {
203 struct pipe_fence_handle *fence = nullptr;
204
205 swr_flush(pipe, &fence, 0);
206 swr_fence_finish(pipe->screen, fence, 0);
207 swr_fence_reference(pipe->screen, &fence, NULL);
208 }
209
210
211 /*
212 * Store SWR HotTiles back to renderTarget surface.
213 */
214 void
215 swr_store_render_target(struct pipe_context *pipe,
216 uint32_t attachment,
217 enum SWR_TILE_STATE post_tile_state)
218 {
219 struct swr_context *ctx = swr_context(pipe);
220 struct swr_draw_context *pDC = &ctx->swrDC;
221 struct SWR_SURFACE_STATE *renderTarget = &pDC->renderTargets[attachment];
222
223 /* Only proceed if there's a valid surface to store to */
224 if (renderTarget->pBaseAddress) {
225 /* Set viewport to full renderTarget width/height and disable scissor
226 * before StoreTiles */
227 boolean change_viewport =
228 (ctx->derived.vp.x != 0.0f || ctx->derived.vp.y != 0.0f
229 || ctx->derived.vp.width != renderTarget->width
230 || ctx->derived.vp.height != renderTarget->height);
231 if (change_viewport) {
232 SWR_VIEWPORT vp = {0};
233 vp.width = renderTarget->width;
234 vp.height = renderTarget->height;
235 SwrSetViewports(ctx->swrContext, 1, &vp, NULL);
236 }
237
238 boolean scissor_enable = ctx->derived.rastState.scissorEnable;
239 if (scissor_enable) {
240 ctx->derived.rastState.scissorEnable = FALSE;
241 SwrSetRastState(ctx->swrContext, &ctx->derived.rastState);
242 }
243
244 swr_update_draw_context(ctx);
245 SwrStoreTiles(ctx->swrContext,
246 (enum SWR_RENDERTARGET_ATTACHMENT)attachment,
247 post_tile_state);
248
249 /* Restore viewport and scissor enable */
250 if (change_viewport)
251 SwrSetViewports(ctx->swrContext, 1, &ctx->derived.vp, &ctx->derived.vpm);
252 if (scissor_enable) {
253 ctx->derived.rastState.scissorEnable = scissor_enable;
254 SwrSetRastState(ctx->swrContext, &ctx->derived.rastState);
255 }
256 }
257 }
258
259 void
260 swr_store_dirty_resource(struct pipe_context *pipe,
261 struct pipe_resource *resource,
262 enum SWR_TILE_STATE post_tile_state)
263 {
264 /* Only store resource if it has been written to */
265 if (swr_resource(resource)->status & SWR_RESOURCE_WRITE) {
266 struct swr_context *ctx = swr_context(pipe);
267 struct swr_screen *screen = swr_screen(pipe->screen);
268 struct swr_resource *spr = swr_resource(resource);
269
270 swr_draw_context *pDC = &ctx->swrDC;
271 SWR_SURFACE_STATE *renderTargets = pDC->renderTargets;
272 for (uint32_t i = 0; i < SWR_NUM_ATTACHMENTS; i++)
273 if (renderTargets[i].pBaseAddress == spr->swr.pBaseAddress) {
274 swr_store_render_target(pipe, i, post_tile_state);
275
276 /* Mesa thinks depth/stencil are fused, so we'll never get an
277 * explicit resource for stencil. So, if checking depth, then
278 * also check for stencil. */
279 if (spr->has_stencil && (i == SWR_ATTACHMENT_DEPTH)) {
280 swr_store_render_target(
281 pipe, SWR_ATTACHMENT_STENCIL, post_tile_state);
282 }
283
284 /* This fence signals StoreTiles completion */
285 swr_fence_submit(ctx, screen->flush_fence);
286
287 break;
288 }
289 }
290 }
291
292 void
293 swr_draw_init(struct pipe_context *pipe)
294 {
295 pipe->draw_vbo = swr_draw_vbo;
296 pipe->flush = swr_flush;
297 }