swr/swr: Enable ARB_viewport_array
[mesa.git] / src / gallium / drivers / swr / swr_shader.cpp
1 /****************************************************************************
2 * Copyright (C) 2015 Intel Corporation. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 ***************************************************************************/
23
24 // llvm redefines DEBUG
25 #pragma push_macro("DEBUG")
26 #undef DEBUG
27 #include "JitManager.h"
28 #include "llvm-c/Core.h"
29 #include "llvm/Support/CBindingWrapping.h"
30 #include "llvm/IR/LegacyPassManager.h"
31 #pragma pop_macro("DEBUG")
32
33 #include "state.h"
34 #include "gen_state_llvm.h"
35 #include "builder.h"
36 #include "functionpasses/passes.h"
37
38 #include "tgsi/tgsi_strings.h"
39 #include "util/u_format.h"
40 #include "util/u_prim.h"
41 #include "gallivm/lp_bld_init.h"
42 #include "gallivm/lp_bld_flow.h"
43 #include "gallivm/lp_bld_struct.h"
44 #include "gallivm/lp_bld_tgsi.h"
45
46 #include "swr_context.h"
47 #include "gen_swr_context_llvm.h"
48 #include "swr_resource.h"
49 #include "swr_state.h"
50 #include "swr_screen.h"
51
52 using namespace SwrJit;
53 using namespace llvm;
54
55 static unsigned
56 locate_linkage(ubyte name, ubyte index, struct tgsi_shader_info *info);
57
58 bool operator==(const swr_jit_fs_key &lhs, const swr_jit_fs_key &rhs)
59 {
60 return !memcmp(&lhs, &rhs, sizeof(lhs));
61 }
62
63 bool operator==(const swr_jit_vs_key &lhs, const swr_jit_vs_key &rhs)
64 {
65 return !memcmp(&lhs, &rhs, sizeof(lhs));
66 }
67
68 bool operator==(const swr_jit_fetch_key &lhs, const swr_jit_fetch_key &rhs)
69 {
70 return !memcmp(&lhs, &rhs, sizeof(lhs));
71 }
72
73 bool operator==(const swr_jit_gs_key &lhs, const swr_jit_gs_key &rhs)
74 {
75 return !memcmp(&lhs, &rhs, sizeof(lhs));
76 }
77
78 static void
79 swr_generate_sampler_key(const struct lp_tgsi_info &info,
80 struct swr_context *ctx,
81 enum pipe_shader_type shader_type,
82 struct swr_jit_sampler_key &key)
83 {
84 key.nr_samplers = info.base.file_max[TGSI_FILE_SAMPLER] + 1;
85
86 for (unsigned i = 0; i < key.nr_samplers; i++) {
87 if (info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
88 lp_sampler_static_sampler_state(
89 &key.sampler[i].sampler_state,
90 ctx->samplers[shader_type][i]);
91 }
92 }
93
94 /*
95 * XXX If TGSI_FILE_SAMPLER_VIEW exists assume all texture opcodes
96 * are dx10-style? Can't really have mixed opcodes, at least not
97 * if we want to skip the holes here (without rescanning tgsi).
98 */
99 if (info.base.file_max[TGSI_FILE_SAMPLER_VIEW] != -1) {
100 key.nr_sampler_views =
101 info.base.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
102 for (unsigned i = 0; i < key.nr_sampler_views; i++) {
103 if (info.base.file_mask[TGSI_FILE_SAMPLER_VIEW] & (1u << (i & 31))) {
104 const struct pipe_sampler_view *view =
105 ctx->sampler_views[shader_type][i];
106 lp_sampler_static_texture_state(
107 &key.sampler[i].texture_state, view);
108 if (view) {
109 struct swr_resource *swr_res = swr_resource(view->texture);
110 const struct util_format_description *desc =
111 util_format_description(view->format);
112 if (swr_res->has_depth && swr_res->has_stencil &&
113 !util_format_has_depth(desc))
114 key.sampler[i].texture_state.format = PIPE_FORMAT_S8_UINT;
115 }
116 }
117 }
118 } else {
119 key.nr_sampler_views = key.nr_samplers;
120 for (unsigned i = 0; i < key.nr_sampler_views; i++) {
121 if (info.base.file_mask[TGSI_FILE_SAMPLER] & (1 << i)) {
122 const struct pipe_sampler_view *view =
123 ctx->sampler_views[shader_type][i];
124 lp_sampler_static_texture_state(
125 &key.sampler[i].texture_state, view);
126 if (view) {
127 struct swr_resource *swr_res = swr_resource(view->texture);
128 const struct util_format_description *desc =
129 util_format_description(view->format);
130 if (swr_res->has_depth && swr_res->has_stencil &&
131 !util_format_has_depth(desc))
132 key.sampler[i].texture_state.format = PIPE_FORMAT_S8_UINT;
133 }
134 }
135 }
136 }
137 }
138
139 void
140 swr_generate_fs_key(struct swr_jit_fs_key &key,
141 struct swr_context *ctx,
142 swr_fragment_shader *swr_fs)
143 {
144 memset(&key, 0, sizeof(key));
145
146 key.nr_cbufs = ctx->framebuffer.nr_cbufs;
147 key.light_twoside = ctx->rasterizer->light_twoside;
148 key.sprite_coord_enable = ctx->rasterizer->sprite_coord_enable;
149
150 struct tgsi_shader_info *pPrevShader;
151 if (ctx->gs)
152 pPrevShader = &ctx->gs->info.base;
153 else
154 pPrevShader = &ctx->vs->info.base;
155
156 memcpy(&key.vs_output_semantic_name,
157 &pPrevShader->output_semantic_name,
158 sizeof(key.vs_output_semantic_name));
159 memcpy(&key.vs_output_semantic_idx,
160 &pPrevShader->output_semantic_index,
161 sizeof(key.vs_output_semantic_idx));
162
163 swr_generate_sampler_key(swr_fs->info, ctx, PIPE_SHADER_FRAGMENT, key);
164
165 key.poly_stipple_enable = ctx->rasterizer->poly_stipple_enable &&
166 ctx->poly_stipple.prim_is_poly;
167 }
168
169 void
170 swr_generate_vs_key(struct swr_jit_vs_key &key,
171 struct swr_context *ctx,
172 swr_vertex_shader *swr_vs)
173 {
174 memset(&key, 0, sizeof(key));
175
176 key.clip_plane_mask =
177 swr_vs->info.base.clipdist_writemask ?
178 swr_vs->info.base.clipdist_writemask & ctx->rasterizer->clip_plane_enable :
179 ctx->rasterizer->clip_plane_enable;
180
181 swr_generate_sampler_key(swr_vs->info, ctx, PIPE_SHADER_VERTEX, key);
182 }
183
184 void
185 swr_generate_fetch_key(struct swr_jit_fetch_key &key,
186 struct swr_vertex_element_state *velems)
187 {
188 memset(&key, 0, sizeof(key));
189
190 key.fsState = velems->fsState;
191 }
192
193 void
194 swr_generate_gs_key(struct swr_jit_gs_key &key,
195 struct swr_context *ctx,
196 swr_geometry_shader *swr_gs)
197 {
198 memset(&key, 0, sizeof(key));
199
200 struct tgsi_shader_info *pPrevShader = &ctx->vs->info.base;
201
202 memcpy(&key.vs_output_semantic_name,
203 &pPrevShader->output_semantic_name,
204 sizeof(key.vs_output_semantic_name));
205 memcpy(&key.vs_output_semantic_idx,
206 &pPrevShader->output_semantic_index,
207 sizeof(key.vs_output_semantic_idx));
208
209 swr_generate_sampler_key(swr_gs->info, ctx, PIPE_SHADER_GEOMETRY, key);
210 }
211
212 struct BuilderSWR : public Builder {
213 BuilderSWR(JitManager *pJitMgr, const char *pName)
214 : Builder(pJitMgr)
215 {
216 pJitMgr->SetupNewModule();
217 gallivm = gallivm_create(pName, wrap(&JM()->mContext));
218 pJitMgr->mpCurrentModule = unwrap(gallivm->module);
219 }
220
221 ~BuilderSWR() {
222 gallivm_free_ir(gallivm);
223 }
224
225 void WriteVS(Value *pVal, Value *pVsContext, Value *pVtxOutput,
226 unsigned slot, unsigned channel);
227
228 struct gallivm_state *gallivm;
229 PFN_VERTEX_FUNC CompileVS(struct swr_context *ctx, swr_jit_vs_key &key);
230 PFN_PIXEL_KERNEL CompileFS(struct swr_context *ctx, swr_jit_fs_key &key);
231 PFN_GS_FUNC CompileGS(struct swr_context *ctx, swr_jit_gs_key &key);
232
233 LLVMValueRef
234 swr_gs_llvm_fetch_input(const struct lp_build_tgsi_gs_iface *gs_iface,
235 struct lp_build_tgsi_context * bld_base,
236 boolean is_vindex_indirect,
237 LLVMValueRef vertex_index,
238 boolean is_aindex_indirect,
239 LLVMValueRef attrib_index,
240 LLVMValueRef swizzle_index);
241 void
242 swr_gs_llvm_emit_vertex(const struct lp_build_tgsi_gs_iface *gs_base,
243 struct lp_build_tgsi_context * bld_base,
244 LLVMValueRef (*outputs)[4],
245 LLVMValueRef emitted_vertices_vec);
246
247 void
248 swr_gs_llvm_end_primitive(const struct lp_build_tgsi_gs_iface *gs_base,
249 struct lp_build_tgsi_context * bld_base,
250 LLVMValueRef verts_per_prim_vec,
251 LLVMValueRef emitted_prims_vec);
252
253 void
254 swr_gs_llvm_epilogue(const struct lp_build_tgsi_gs_iface *gs_base,
255 struct lp_build_tgsi_context * bld_base,
256 LLVMValueRef total_emitted_vertices_vec,
257 LLVMValueRef emitted_prims_vec);
258
259 };
260
261 struct swr_gs_llvm_iface {
262 struct lp_build_tgsi_gs_iface base;
263 struct tgsi_shader_info *info;
264
265 BuilderSWR *pBuilder;
266
267 Value *pGsCtx;
268 SWR_GS_STATE *pGsState;
269 uint32_t num_outputs;
270 uint32_t num_verts_per_prim;
271
272 Value *pVtxAttribMap;
273 };
274
275 // trampoline functions so we can use the builder llvm construction methods
276 static LLVMValueRef
277 swr_gs_llvm_fetch_input(const struct lp_build_tgsi_gs_iface *gs_iface,
278 struct lp_build_tgsi_context * bld_base,
279 boolean is_vindex_indirect,
280 LLVMValueRef vertex_index,
281 boolean is_aindex_indirect,
282 LLVMValueRef attrib_index,
283 LLVMValueRef swizzle_index)
284 {
285 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_iface;
286
287 return iface->pBuilder->swr_gs_llvm_fetch_input(gs_iface, bld_base,
288 is_vindex_indirect,
289 vertex_index,
290 is_aindex_indirect,
291 attrib_index,
292 swizzle_index);
293 }
294
295 static void
296 swr_gs_llvm_emit_vertex(const struct lp_build_tgsi_gs_iface *gs_base,
297 struct lp_build_tgsi_context * bld_base,
298 LLVMValueRef (*outputs)[4],
299 LLVMValueRef emitted_vertices_vec)
300 {
301 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
302
303 iface->pBuilder->swr_gs_llvm_emit_vertex(gs_base, bld_base,
304 outputs,
305 emitted_vertices_vec);
306 }
307
308 static void
309 swr_gs_llvm_end_primitive(const struct lp_build_tgsi_gs_iface *gs_base,
310 struct lp_build_tgsi_context * bld_base,
311 LLVMValueRef verts_per_prim_vec,
312 LLVMValueRef emitted_prims_vec)
313 {
314 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
315
316 iface->pBuilder->swr_gs_llvm_end_primitive(gs_base, bld_base,
317 verts_per_prim_vec,
318 emitted_prims_vec);
319 }
320
321 static void
322 swr_gs_llvm_epilogue(const struct lp_build_tgsi_gs_iface *gs_base,
323 struct lp_build_tgsi_context * bld_base,
324 LLVMValueRef total_emitted_vertices_vec,
325 LLVMValueRef emitted_prims_vec)
326 {
327 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
328
329 iface->pBuilder->swr_gs_llvm_epilogue(gs_base, bld_base,
330 total_emitted_vertices_vec,
331 emitted_prims_vec);
332 }
333
334 LLVMValueRef
335 BuilderSWR::swr_gs_llvm_fetch_input(const struct lp_build_tgsi_gs_iface *gs_iface,
336 struct lp_build_tgsi_context * bld_base,
337 boolean is_vindex_indirect,
338 LLVMValueRef vertex_index,
339 boolean is_aindex_indirect,
340 LLVMValueRef attrib_index,
341 LLVMValueRef swizzle_index)
342 {
343 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_iface;
344 Value *vert_index = unwrap(vertex_index);
345 Value *attr_index = unwrap(attrib_index);
346
347 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
348
349 if (is_vindex_indirect || is_aindex_indirect) {
350 int i;
351 Value *res = unwrap(bld_base->base.zero);
352 struct lp_type type = bld_base->base.type;
353
354 for (i = 0; i < type.length; i++) {
355 Value *vert_chan_index = vert_index;
356 Value *attr_chan_index = attr_index;
357
358 if (is_vindex_indirect) {
359 vert_chan_index = VEXTRACT(vert_index, C(i));
360 }
361 if (is_aindex_indirect) {
362 attr_chan_index = VEXTRACT(attr_index, C(i));
363 }
364
365 Value *attrib =
366 LOAD(GEP(iface->pVtxAttribMap, {C(0), attr_chan_index}));
367
368 Value *pVertex = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_pVerts});
369 Value *pInputVertStride = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_inputVertStride});
370
371 Value *pVector = ADD(MUL(vert_chan_index, pInputVertStride), attrib);
372 Value *pInput = LOAD(GEP(pVertex, {pVector, unwrap(swizzle_index)}));
373
374 Value *value = VEXTRACT(pInput, C(i));
375 res = VINSERT(res, value, C(i));
376 }
377
378 return wrap(res);
379 } else {
380 Value *attrib = LOAD(GEP(iface->pVtxAttribMap, {C(0), attr_index}));
381
382 Value *pVertex = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_pVerts});
383 Value *pInputVertStride = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_inputVertStride});
384
385 Value *pVector = ADD(MUL(vert_index, pInputVertStride), attrib);
386
387 Value *pInput = LOAD(GEP(pVertex, {pVector, unwrap(swizzle_index)}));
388
389 return wrap(pInput);
390 }
391 }
392
393 // GS output stream layout
394 #define VERTEX_COUNT_SIZE 32
395 #define CONTROL_HEADER_SIZE (8*32)
396
397 void
398 BuilderSWR::swr_gs_llvm_emit_vertex(const struct lp_build_tgsi_gs_iface *gs_base,
399 struct lp_build_tgsi_context * bld_base,
400 LLVMValueRef (*outputs)[4],
401 LLVMValueRef emitted_vertices_vec)
402 {
403 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
404
405 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
406
407 const uint32_t headerSize = VERTEX_COUNT_SIZE + CONTROL_HEADER_SIZE;
408 const uint32_t attribSize = 4 * sizeof(float);
409 const uint32_t vertSize = attribSize * SWR_VTX_NUM_SLOTS;
410 Value *pVertexOffset = MUL(unwrap(emitted_vertices_vec), VIMMED1(vertSize));
411
412 Value *vMask = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_mask});
413 Value *vMask1 = TRUNC(vMask, VectorType::get(mInt1Ty, mVWidth));
414
415 Value *pStack = STACKSAVE();
416 Value *pTmpPtr = ALLOCA(mFP32Ty, C(4)); // used for dummy write for lane masking
417
418 for (uint32_t attrib = 0; attrib < iface->num_outputs; ++attrib) {
419 uint32_t attribSlot = attrib;
420 uint32_t sgvChannel = 0;
421 if (iface->info->output_semantic_name[attrib] == TGSI_SEMANTIC_PSIZE) {
422 attribSlot = VERTEX_SGV_SLOT;
423 sgvChannel = VERTEX_SGV_POINT_SIZE_COMP;
424 } else if (iface->info->output_semantic_name[attrib] == TGSI_SEMANTIC_LAYER) {
425 attribSlot = VERTEX_SGV_SLOT;
426 sgvChannel = VERTEX_SGV_RTAI_COMP;
427 } else if (iface->info->output_semantic_name[attrib] == TGSI_SEMANTIC_VIEWPORT_INDEX) {
428 attribSlot = VERTEX_SGV_SLOT;
429 sgvChannel = VERTEX_SGV_VAI_COMP;
430 } else if (iface->info->output_semantic_name[attrib] == TGSI_SEMANTIC_POSITION) {
431 attribSlot = VERTEX_POSITION_SLOT;
432 } else {
433 attribSlot = VERTEX_ATTRIB_START_SLOT + attrib;
434 if (iface->info->writes_position) {
435 attribSlot--;
436 }
437 }
438
439 Value *pOutputOffset = ADD(pVertexOffset, VIMMED1(headerSize + attribSize * attribSlot)); // + sgvChannel ?
440
441 for (uint32_t lane = 0; lane < mVWidth; ++lane) {
442 Value *pLaneOffset = VEXTRACT(pOutputOffset, C(lane));
443 Value *pStream = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_pStreams, lane});
444 Value *pStreamOffset = GEP(pStream, pLaneOffset);
445 pStreamOffset = BITCAST(pStreamOffset, mFP32PtrTy);
446
447 Value *pLaneMask = VEXTRACT(vMask1, C(lane));
448 pStreamOffset = SELECT(pLaneMask, pStreamOffset, pTmpPtr);
449
450 for (uint32_t channel = 0; channel < 4; ++channel) {
451 Value *vData;
452
453 if (attribSlot == VERTEX_SGV_SLOT)
454 vData = LOAD(unwrap(outputs[attrib][0]));
455 else
456 vData = LOAD(unwrap(outputs[attrib][channel]));
457
458 if (attribSlot != VERTEX_SGV_SLOT ||
459 sgvChannel == channel) {
460 vData = VEXTRACT(vData, C(lane));
461 STORE(vData, pStreamOffset);
462 }
463 pStreamOffset = GEP(pStreamOffset, C(1));
464 }
465 }
466 }
467
468 STACKRESTORE(pStack);
469 }
470
471 void
472 BuilderSWR::swr_gs_llvm_end_primitive(const struct lp_build_tgsi_gs_iface *gs_base,
473 struct lp_build_tgsi_context * bld_base,
474 LLVMValueRef verts_per_prim_vec,
475 LLVMValueRef emitted_prims_vec)
476 {
477 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
478
479 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
480
481 Value *vMask = LOAD(iface->pGsCtx, { 0, SWR_GS_CONTEXT_mask });
482 Value *vMask1 = TRUNC(vMask, VectorType::get(mInt1Ty, 8));
483
484 uint32_t vertsPerPrim = iface->num_verts_per_prim;
485
486 Value *vCount =
487 ADD(MUL(unwrap(emitted_prims_vec), VIMMED1(vertsPerPrim)),
488 unwrap(verts_per_prim_vec));
489
490 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
491 vCount = LOAD(unwrap(bld->total_emitted_vertices_vec_ptr));
492
493 struct lp_exec_mask *exec_mask = &bld->exec_mask;
494 Value *mask = unwrap(lp_build_mask_value(bld->mask));
495 if (exec_mask->has_mask)
496 mask = AND(mask, unwrap(exec_mask->exec_mask));
497
498 Value *cmpMask = VMASK(ICMP_NE(unwrap(verts_per_prim_vec), VIMMED1(0)));
499 mask = AND(mask, cmpMask);
500 vMask1 = TRUNC(mask, VectorType::get(mInt1Ty, 8));
501
502 vCount = SUB(vCount, VIMMED1(1));
503 Value *vOffset = ADD(UDIV(vCount, VIMMED1(8)), VIMMED1(VERTEX_COUNT_SIZE));
504 Value *vValue = SHL(VIMMED1(1), UREM(vCount, VIMMED1(8)));
505
506 vValue = TRUNC(vValue, VectorType::get(mInt8Ty, 8));
507
508 Value *pStack = STACKSAVE();
509 Value *pTmpPtr = ALLOCA(mInt8Ty, C(4)); // used for dummy read/write for lane masking
510
511 for (uint32_t lane = 0; lane < mVWidth; ++lane) {
512 Value *vLaneOffset = VEXTRACT(vOffset, C(lane));
513 Value *pStream = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_pStreams, lane});
514 Value *pStreamOffset = GEP(pStream, vLaneOffset);
515
516 Value *pLaneMask = VEXTRACT(vMask1, C(lane));
517 pStreamOffset = SELECT(pLaneMask, pStreamOffset, pTmpPtr);
518
519 Value *vVal = LOAD(pStreamOffset);
520 vVal = OR(vVal, VEXTRACT(vValue, C(lane)));
521 STORE(vVal, pStreamOffset);
522 }
523
524 STACKRESTORE(pStack);
525 }
526
527 void
528 BuilderSWR::swr_gs_llvm_epilogue(const struct lp_build_tgsi_gs_iface *gs_base,
529 struct lp_build_tgsi_context * bld_base,
530 LLVMValueRef total_emitted_vertices_vec,
531 LLVMValueRef emitted_prims_vec)
532 {
533 swr_gs_llvm_iface *iface = (swr_gs_llvm_iface*)gs_base;
534
535 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
536
537 // Store emit count to each output stream in the first DWORD
538 for (uint32_t lane = 0; lane < mVWidth; ++lane)
539 {
540 Value* pStream = LOAD(iface->pGsCtx, {0, SWR_GS_CONTEXT_pStreams, lane});
541 pStream = BITCAST(pStream, mInt32PtrTy);
542 Value* pLaneCount = VEXTRACT(unwrap(total_emitted_vertices_vec), C(lane));
543 STORE(pLaneCount, pStream);
544 }
545 }
546
547 PFN_GS_FUNC
548 BuilderSWR::CompileGS(struct swr_context *ctx, swr_jit_gs_key &key)
549 {
550 SWR_GS_STATE *pGS = &ctx->gs->gsState;
551 struct tgsi_shader_info *info = &ctx->gs->info.base;
552
553 memset(pGS, 0, sizeof(*pGS));
554
555 pGS->gsEnable = true;
556
557 pGS->numInputAttribs = info->num_inputs;
558 pGS->outputTopology =
559 swr_convert_prim_topology(info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]);
560 pGS->maxNumVerts = info->properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
561 pGS->instanceCount = info->properties[TGSI_PROPERTY_GS_INVOCATIONS];
562
563 // XXX: single stream for now...
564 pGS->isSingleStream = true;
565 pGS->singleStreamID = 0;
566
567 pGS->vertexAttribOffset = VERTEX_ATTRIB_START_SLOT; // TODO: optimize
568 pGS->srcVertexAttribOffset = VERTEX_ATTRIB_START_SLOT; // TODO: optimize
569 pGS->inputVertStride = pGS->numInputAttribs + pGS->vertexAttribOffset;
570 pGS->outputVertexSize = SWR_VTX_NUM_SLOTS;
571 pGS->controlDataSize = 8; // GS ouputs max of 8 32B units
572 pGS->controlDataOffset = VERTEX_COUNT_SIZE;
573 pGS->outputVertexOffset = pGS->controlDataOffset + CONTROL_HEADER_SIZE;
574
575 pGS->allocationSize =
576 VERTEX_COUNT_SIZE + // vertex count
577 CONTROL_HEADER_SIZE + // control header
578 (SWR_VTX_NUM_SLOTS * 16) * // sizeof vertex
579 pGS->maxNumVerts; // num verts
580
581 struct swr_geometry_shader *gs = ctx->gs;
582
583 LLVMValueRef inputs[PIPE_MAX_SHADER_INPUTS][TGSI_NUM_CHANNELS];
584 LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
585
586 memset(outputs, 0, sizeof(outputs));
587
588 AttrBuilder attrBuilder;
589 attrBuilder.addStackAlignmentAttr(JM()->mVWidth * sizeof(float));
590
591 std::vector<Type *> gsArgs{PointerType::get(Gen_swr_draw_context(JM()), 0),
592 PointerType::get(mInt8Ty, 0),
593 PointerType::get(Gen_SWR_GS_CONTEXT(JM()), 0)};
594 FunctionType *vsFuncType =
595 FunctionType::get(Type::getVoidTy(JM()->mContext), gsArgs, false);
596
597 // create new vertex shader function
598 auto pFunction = Function::Create(vsFuncType,
599 GlobalValue::ExternalLinkage,
600 "GS",
601 JM()->mpCurrentModule);
602 #if HAVE_LLVM < 0x0500
603 AttributeSet attrSet = AttributeSet::get(
604 JM()->mContext, AttributeSet::FunctionIndex, attrBuilder);
605 pFunction->addAttributes(AttributeSet::FunctionIndex, attrSet);
606 #else
607 pFunction->addAttributes(AttributeList::FunctionIndex, attrBuilder);
608 #endif
609
610 BasicBlock *block = BasicBlock::Create(JM()->mContext, "entry", pFunction);
611 IRB()->SetInsertPoint(block);
612 LLVMPositionBuilderAtEnd(gallivm->builder, wrap(block));
613
614 auto argitr = pFunction->arg_begin();
615 Value *hPrivateData = &*argitr++;
616 hPrivateData->setName("hPrivateData");
617 Value *pWorkerData = &*argitr++;
618 pWorkerData->setName("pWorkerData");
619 Value *pGsCtx = &*argitr++;
620 pGsCtx->setName("gsCtx");
621
622 Value *consts_ptr =
623 GEP(hPrivateData, {C(0), C(swr_draw_context_constantGS)});
624 consts_ptr->setName("gs_constants");
625 Value *const_sizes_ptr =
626 GEP(hPrivateData, {0, swr_draw_context_num_constantsGS});
627 const_sizes_ptr->setName("num_gs_constants");
628
629 struct lp_build_sampler_soa *sampler =
630 swr_sampler_soa_create(key.sampler, PIPE_SHADER_GEOMETRY);
631
632 struct lp_bld_tgsi_system_values system_values;
633 memset(&system_values, 0, sizeof(system_values));
634 system_values.prim_id = wrap(LOAD(pGsCtx, {0, SWR_GS_CONTEXT_PrimitiveID}));
635 system_values.instance_id = wrap(LOAD(pGsCtx, {0, SWR_GS_CONTEXT_InstanceID}));
636
637 std::vector<Constant*> mapConstants;
638 Value *vtxAttribMap = ALLOCA(ArrayType::get(mInt32Ty, PIPE_MAX_SHADER_INPUTS));
639 for (unsigned slot = 0; slot < info->num_inputs; slot++) {
640 ubyte semantic_name = info->input_semantic_name[slot];
641 ubyte semantic_idx = info->input_semantic_index[slot];
642
643 unsigned vs_slot = locate_linkage(semantic_name, semantic_idx, &ctx->vs->info.base);
644
645 vs_slot += VERTEX_ATTRIB_START_SLOT;
646
647 if (ctx->vs->info.base.output_semantic_name[0] == TGSI_SEMANTIC_POSITION)
648 vs_slot--;
649
650 if (semantic_name == TGSI_SEMANTIC_POSITION)
651 vs_slot = VERTEX_POSITION_SLOT;
652
653 STORE(C(vs_slot), vtxAttribMap, {0, slot});
654 mapConstants.push_back(C(vs_slot));
655 }
656
657 struct lp_build_mask_context mask;
658 Value *mask_val = LOAD(pGsCtx, {0, SWR_GS_CONTEXT_mask}, "gsMask");
659 lp_build_mask_begin(&mask, gallivm,
660 lp_type_float_vec(32, 32 * 8), wrap(mask_val));
661
662 // zero out cut buffer so we can load/modify/store bits
663 for (uint32_t lane = 0; lane < mVWidth; ++lane)
664 {
665 Value* pStream = LOAD(pGsCtx, {0, SWR_GS_CONTEXT_pStreams, lane});
666 MEMSET(pStream, C((char)0), VERTEX_COUNT_SIZE + CONTROL_HEADER_SIZE, sizeof(float) * KNOB_SIMD_WIDTH);
667 }
668
669 struct swr_gs_llvm_iface gs_iface;
670 gs_iface.base.fetch_input = ::swr_gs_llvm_fetch_input;
671 gs_iface.base.emit_vertex = ::swr_gs_llvm_emit_vertex;
672 gs_iface.base.end_primitive = ::swr_gs_llvm_end_primitive;
673 gs_iface.base.gs_epilogue = ::swr_gs_llvm_epilogue;
674 gs_iface.pBuilder = this;
675 gs_iface.pGsCtx = pGsCtx;
676 gs_iface.pGsState = pGS;
677 gs_iface.num_outputs = gs->info.base.num_outputs;
678 gs_iface.num_verts_per_prim =
679 u_vertices_per_prim((pipe_prim_type)info->properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]);
680 gs_iface.info = info;
681 gs_iface.pVtxAttribMap = vtxAttribMap;
682
683 lp_build_tgsi_soa(gallivm,
684 gs->pipe.tokens,
685 lp_type_float_vec(32, 32 * 8),
686 &mask,
687 wrap(consts_ptr),
688 wrap(const_sizes_ptr),
689 &system_values,
690 inputs,
691 outputs,
692 wrap(hPrivateData), // (sampler context)
693 NULL, // thread data
694 sampler,
695 &gs->info.base,
696 &gs_iface.base);
697
698 lp_build_mask_end(&mask);
699
700 sampler->destroy(sampler);
701
702 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
703
704 RET_VOID();
705
706 gallivm_verify_function(gallivm, wrap(pFunction));
707 gallivm_compile_module(gallivm);
708
709 PFN_GS_FUNC pFunc =
710 (PFN_GS_FUNC)gallivm_jit_function(gallivm, wrap(pFunction));
711
712 debug_printf("geom shader %p\n", pFunc);
713 assert(pFunc && "Error: GeomShader = NULL");
714
715 JM()->mIsModuleFinalized = true;
716
717 return pFunc;
718 }
719
720 PFN_GS_FUNC
721 swr_compile_gs(struct swr_context *ctx, swr_jit_gs_key &key)
722 {
723 BuilderSWR builder(
724 reinterpret_cast<JitManager *>(swr_screen(ctx->pipe.screen)->hJitMgr),
725 "GS");
726 PFN_GS_FUNC func = builder.CompileGS(ctx, key);
727
728 ctx->gs->map.insert(std::make_pair(key, make_unique<VariantGS>(builder.gallivm, func)));
729 return func;
730 }
731
732 void
733 BuilderSWR::WriteVS(Value *pVal, Value *pVsContext, Value *pVtxOutput, unsigned slot, unsigned channel)
734 {
735 #if USE_SIMD16_FRONTEND && !USE_SIMD16_VS
736 // interleave the simdvertex components into the dest simd16vertex
737 // slot16offset = slot8offset * 2
738 // comp16offset = comp8offset * 2 + alternateOffset
739
740 Value *offset = LOAD(pVsContext, { 0, SWR_VS_CONTEXT_AlternateOffset });
741 Value *pOut = GEP(pVtxOutput, { C(0), C(0), C(slot * 2), offset } );
742 STORE(pVal, pOut, {channel * 2});
743 #else
744 Value *pOut = GEP(pVtxOutput, {0, 0, slot});
745 STORE(pVal, pOut, {0, channel});
746 #endif
747 }
748
749 PFN_VERTEX_FUNC
750 BuilderSWR::CompileVS(struct swr_context *ctx, swr_jit_vs_key &key)
751 {
752 struct swr_vertex_shader *swr_vs = ctx->vs;
753
754 LLVMValueRef inputs[PIPE_MAX_SHADER_INPUTS][TGSI_NUM_CHANNELS];
755 LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
756
757 memset(outputs, 0, sizeof(outputs));
758
759 AttrBuilder attrBuilder;
760 attrBuilder.addStackAlignmentAttr(JM()->mVWidth * sizeof(float));
761
762 std::vector<Type *> vsArgs{PointerType::get(Gen_swr_draw_context(JM()), 0),
763 PointerType::get(mInt8Ty, 0),
764 PointerType::get(Gen_SWR_VS_CONTEXT(JM()), 0)};
765 FunctionType *vsFuncType =
766 FunctionType::get(Type::getVoidTy(JM()->mContext), vsArgs, false);
767
768 // create new vertex shader function
769 auto pFunction = Function::Create(vsFuncType,
770 GlobalValue::ExternalLinkage,
771 "VS",
772 JM()->mpCurrentModule);
773 #if HAVE_LLVM < 0x0500
774 AttributeSet attrSet = AttributeSet::get(
775 JM()->mContext, AttributeSet::FunctionIndex, attrBuilder);
776 pFunction->addAttributes(AttributeSet::FunctionIndex, attrSet);
777 #else
778 pFunction->addAttributes(AttributeList::FunctionIndex, attrBuilder);
779 #endif
780
781 BasicBlock *block = BasicBlock::Create(JM()->mContext, "entry", pFunction);
782 IRB()->SetInsertPoint(block);
783 LLVMPositionBuilderAtEnd(gallivm->builder, wrap(block));
784
785 auto argitr = pFunction->arg_begin();
786 Value *hPrivateData = &*argitr++;
787 hPrivateData->setName("hPrivateData");
788 Value *pWorkerData = &*argitr++;
789 pWorkerData->setName("pWorkerData");
790 Value *pVsCtx = &*argitr++;
791 pVsCtx->setName("vsCtx");
792
793 Value *consts_ptr = GEP(hPrivateData, {C(0), C(swr_draw_context_constantVS)});
794
795 consts_ptr->setName("vs_constants");
796 Value *const_sizes_ptr =
797 GEP(hPrivateData, {0, swr_draw_context_num_constantsVS});
798 const_sizes_ptr->setName("num_vs_constants");
799
800 Value *vtxInput = LOAD(pVsCtx, {0, SWR_VS_CONTEXT_pVin});
801 #if USE_SIMD16_VS
802 vtxInput = BITCAST(vtxInput, PointerType::get(Gen_simd16vertex(JM()), 0));
803 #endif
804
805 for (uint32_t attrib = 0; attrib < PIPE_MAX_SHADER_INPUTS; attrib++) {
806 const unsigned mask = swr_vs->info.base.input_usage_mask[attrib];
807 for (uint32_t channel = 0; channel < TGSI_NUM_CHANNELS; channel++) {
808 if (mask & (1 << channel)) {
809 inputs[attrib][channel] =
810 wrap(LOAD(vtxInput, {0, 0, attrib, channel}));
811 }
812 }
813 }
814
815 struct lp_build_sampler_soa *sampler =
816 swr_sampler_soa_create(key.sampler, PIPE_SHADER_VERTEX);
817
818 struct lp_bld_tgsi_system_values system_values;
819 memset(&system_values, 0, sizeof(system_values));
820 system_values.instance_id = wrap(LOAD(pVsCtx, {0, SWR_VS_CONTEXT_InstanceID}));
821
822 #if USE_SIMD16_VS
823 system_values.vertex_id = wrap(LOAD(pVsCtx, {0, SWR_VS_CONTEXT_VertexID16}));
824 #else
825 system_values.vertex_id = wrap(LOAD(pVsCtx, {0, SWR_VS_CONTEXT_VertexID}));
826 #endif
827
828 #if USE_SIMD16_VS
829 uint32_t vectorWidth = mVWidth16;
830 #else
831 uint32_t vectorWidth = mVWidth;
832 #endif
833
834 lp_build_tgsi_soa(gallivm,
835 swr_vs->pipe.tokens,
836 lp_type_float_vec(32, 32 * vectorWidth),
837 NULL, // mask
838 wrap(consts_ptr),
839 wrap(const_sizes_ptr),
840 &system_values,
841 inputs,
842 outputs,
843 wrap(hPrivateData), // (sampler context)
844 NULL, // thread data
845 sampler, // sampler
846 &swr_vs->info.base,
847 NULL); // geometry shader face
848
849 sampler->destroy(sampler);
850
851 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
852
853 Value *vtxOutput = LOAD(pVsCtx, {0, SWR_VS_CONTEXT_pVout});
854 #if USE_SIMD16_VS
855 vtxOutput = BITCAST(vtxOutput, PointerType::get(Gen_simd16vertex(JM()), 0));
856 #endif
857
858 for (uint32_t channel = 0; channel < TGSI_NUM_CHANNELS; channel++) {
859 for (uint32_t attrib = 0; attrib < PIPE_MAX_SHADER_OUTPUTS; attrib++) {
860 if (!outputs[attrib][channel])
861 continue;
862
863 Value *val;
864 uint32_t outSlot;
865
866 if (swr_vs->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_PSIZE) {
867 if (channel != VERTEX_SGV_POINT_SIZE_COMP)
868 continue;
869 val = LOAD(unwrap(outputs[attrib][0]));
870 outSlot = VERTEX_SGV_SLOT;
871 } else if (swr_vs->info.base.output_semantic_name[attrib] == TGSI_SEMANTIC_POSITION) {
872 val = LOAD(unwrap(outputs[attrib][channel]));
873 outSlot = VERTEX_POSITION_SLOT;
874 } else {
875 val = LOAD(unwrap(outputs[attrib][channel]));
876 outSlot = VERTEX_ATTRIB_START_SLOT + attrib;
877 if (swr_vs->info.base.output_semantic_name[0] == TGSI_SEMANTIC_POSITION)
878 outSlot--;
879 }
880
881 WriteVS(val, pVsCtx, vtxOutput, outSlot, channel);
882 }
883 }
884
885 if (ctx->rasterizer->clip_plane_enable ||
886 swr_vs->info.base.culldist_writemask) {
887 unsigned clip_mask = ctx->rasterizer->clip_plane_enable;
888
889 unsigned cv = 0;
890 if (swr_vs->info.base.writes_clipvertex) {
891 cv = locate_linkage(TGSI_SEMANTIC_CLIPVERTEX, 0,
892 &swr_vs->info.base);
893 } else {
894 for (int i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
895 if (swr_vs->info.base.output_semantic_name[i] == TGSI_SEMANTIC_POSITION &&
896 swr_vs->info.base.output_semantic_index[i] == 0) {
897 cv = i;
898 break;
899 }
900 }
901 }
902 LLVMValueRef cx = LLVMBuildLoad(gallivm->builder, outputs[cv][0], "");
903 LLVMValueRef cy = LLVMBuildLoad(gallivm->builder, outputs[cv][1], "");
904 LLVMValueRef cz = LLVMBuildLoad(gallivm->builder, outputs[cv][2], "");
905 LLVMValueRef cw = LLVMBuildLoad(gallivm->builder, outputs[cv][3], "");
906
907 for (unsigned val = 0; val < PIPE_MAX_CLIP_PLANES; val++) {
908 // clip distance overrides user clip planes
909 if ((swr_vs->info.base.clipdist_writemask & clip_mask & (1 << val)) ||
910 ((swr_vs->info.base.culldist_writemask << swr_vs->info.base.num_written_clipdistance) & (1 << val))) {
911 unsigned cv = locate_linkage(TGSI_SEMANTIC_CLIPDIST, val < 4 ? 0 : 1,
912 &swr_vs->info.base);
913 if (val < 4) {
914 LLVMValueRef dist = LLVMBuildLoad(gallivm->builder, outputs[cv][val], "");
915 WriteVS(unwrap(dist), pVsCtx, vtxOutput, VERTEX_CLIPCULL_DIST_LO_SLOT, val);
916 } else {
917 LLVMValueRef dist = LLVMBuildLoad(gallivm->builder, outputs[cv][val - 4], "");
918 WriteVS(unwrap(dist), pVsCtx, vtxOutput, VERTEX_CLIPCULL_DIST_HI_SLOT, val - 4);
919 }
920 continue;
921 }
922
923 if (!(clip_mask & (1 << val)))
924 continue;
925
926 Value *px = LOAD(GEP(hPrivateData, {0, swr_draw_context_userClipPlanes, val, 0}));
927 Value *py = LOAD(GEP(hPrivateData, {0, swr_draw_context_userClipPlanes, val, 1}));
928 Value *pz = LOAD(GEP(hPrivateData, {0, swr_draw_context_userClipPlanes, val, 2}));
929 Value *pw = LOAD(GEP(hPrivateData, {0, swr_draw_context_userClipPlanes, val, 3}));
930 #if USE_SIMD16_VS
931 Value *bpx = VBROADCAST_16(px);
932 Value *bpy = VBROADCAST_16(py);
933 Value *bpz = VBROADCAST_16(pz);
934 Value *bpw = VBROADCAST_16(pw);
935 #else
936 Value *bpx = VBROADCAST(px);
937 Value *bpy = VBROADCAST(py);
938 Value *bpz = VBROADCAST(pz);
939 Value *bpw = VBROADCAST(pw);
940 #endif
941 Value *dist = FADD(FMUL(unwrap(cx), bpx),
942 FADD(FMUL(unwrap(cy), bpy),
943 FADD(FMUL(unwrap(cz), bpz),
944 FMUL(unwrap(cw), bpw))));
945
946 if (val < 4)
947 WriteVS(dist, pVsCtx, vtxOutput, VERTEX_CLIPCULL_DIST_LO_SLOT, val);
948 else
949 WriteVS(dist, pVsCtx, vtxOutput, VERTEX_CLIPCULL_DIST_HI_SLOT, val - 4);
950 }
951 }
952
953 RET_VOID();
954
955 gallivm_verify_function(gallivm, wrap(pFunction));
956 gallivm_compile_module(gallivm);
957
958 // lp_debug_dump_value(func);
959
960 PFN_VERTEX_FUNC pFunc =
961 (PFN_VERTEX_FUNC)gallivm_jit_function(gallivm, wrap(pFunction));
962
963 debug_printf("vert shader %p\n", pFunc);
964 assert(pFunc && "Error: VertShader = NULL");
965
966 JM()->mIsModuleFinalized = true;
967
968 return pFunc;
969 }
970
971 PFN_VERTEX_FUNC
972 swr_compile_vs(struct swr_context *ctx, swr_jit_vs_key &key)
973 {
974 if (!ctx->vs->pipe.tokens)
975 return NULL;
976
977 BuilderSWR builder(
978 reinterpret_cast<JitManager *>(swr_screen(ctx->pipe.screen)->hJitMgr),
979 "VS");
980 PFN_VERTEX_FUNC func = builder.CompileVS(ctx, key);
981
982 ctx->vs->map.insert(std::make_pair(key, make_unique<VariantVS>(builder.gallivm, func)));
983 return func;
984 }
985
986 unsigned
987 swr_so_adjust_attrib(unsigned in_attrib,
988 swr_vertex_shader *swr_vs)
989 {
990 ubyte semantic_name;
991 unsigned attrib;
992
993 attrib = in_attrib + VERTEX_ATTRIB_START_SLOT;
994
995 if (swr_vs) {
996 semantic_name = swr_vs->info.base.output_semantic_name[in_attrib];
997 if (semantic_name == TGSI_SEMANTIC_POSITION) {
998 attrib = VERTEX_POSITION_SLOT;
999 } else if (semantic_name == TGSI_SEMANTIC_PSIZE) {
1000 attrib = VERTEX_SGV_SLOT;
1001 } else if (semantic_name == TGSI_SEMANTIC_LAYER) {
1002 attrib = VERTEX_SGV_SLOT;
1003 } else {
1004 if (swr_vs->info.base.writes_position) {
1005 attrib--;
1006 }
1007 }
1008 }
1009
1010 return attrib;
1011 }
1012
1013 static unsigned
1014 locate_linkage(ubyte name, ubyte index, struct tgsi_shader_info *info)
1015 {
1016 for (int i = 0; i < PIPE_MAX_SHADER_OUTPUTS; i++) {
1017 if ((info->output_semantic_name[i] == name)
1018 && (info->output_semantic_index[i] == index)) {
1019 return i;
1020 }
1021 }
1022
1023 return 0xFFFFFFFF;
1024 }
1025
1026 PFN_PIXEL_KERNEL
1027 BuilderSWR::CompileFS(struct swr_context *ctx, swr_jit_fs_key &key)
1028 {
1029 struct swr_fragment_shader *swr_fs = ctx->fs;
1030
1031 struct tgsi_shader_info *pPrevShader;
1032 if (ctx->gs)
1033 pPrevShader = &ctx->gs->info.base;
1034 else
1035 pPrevShader = &ctx->vs->info.base;
1036
1037 LLVMValueRef inputs[PIPE_MAX_SHADER_INPUTS][TGSI_NUM_CHANNELS];
1038 LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
1039
1040 memset(inputs, 0, sizeof(inputs));
1041 memset(outputs, 0, sizeof(outputs));
1042
1043 struct lp_build_sampler_soa *sampler = NULL;
1044
1045 AttrBuilder attrBuilder;
1046 attrBuilder.addStackAlignmentAttr(JM()->mVWidth * sizeof(float));
1047
1048 std::vector<Type *> fsArgs{PointerType::get(Gen_swr_draw_context(JM()), 0),
1049 PointerType::get(mInt8Ty, 0),
1050 PointerType::get(Gen_SWR_PS_CONTEXT(JM()), 0)};
1051 FunctionType *funcType =
1052 FunctionType::get(Type::getVoidTy(JM()->mContext), fsArgs, false);
1053
1054 auto pFunction = Function::Create(funcType,
1055 GlobalValue::ExternalLinkage,
1056 "FS",
1057 JM()->mpCurrentModule);
1058 #if HAVE_LLVM < 0x0500
1059 AttributeSet attrSet = AttributeSet::get(
1060 JM()->mContext, AttributeSet::FunctionIndex, attrBuilder);
1061 pFunction->addAttributes(AttributeSet::FunctionIndex, attrSet);
1062 #else
1063 pFunction->addAttributes(AttributeList::FunctionIndex, attrBuilder);
1064 #endif
1065
1066 BasicBlock *block = BasicBlock::Create(JM()->mContext, "entry", pFunction);
1067 IRB()->SetInsertPoint(block);
1068 LLVMPositionBuilderAtEnd(gallivm->builder, wrap(block));
1069
1070 auto args = pFunction->arg_begin();
1071 Value *hPrivateData = &*args++;
1072 hPrivateData->setName("hPrivateData");
1073 Value *pWorkerData = &*args++;
1074 pWorkerData->setName("pWorkerData");
1075 Value *pPS = &*args++;
1076 pPS->setName("psCtx");
1077
1078 Value *consts_ptr = GEP(hPrivateData, {0, swr_draw_context_constantFS});
1079 consts_ptr->setName("fs_constants");
1080 Value *const_sizes_ptr =
1081 GEP(hPrivateData, {0, swr_draw_context_num_constantsFS});
1082 const_sizes_ptr->setName("num_fs_constants");
1083
1084 // load *pAttribs, *pPerspAttribs
1085 Value *pRawAttribs = LOAD(pPS, {0, SWR_PS_CONTEXT_pAttribs}, "pRawAttribs");
1086 Value *pPerspAttribs =
1087 LOAD(pPS, {0, SWR_PS_CONTEXT_pPerspAttribs}, "pPerspAttribs");
1088
1089 swr_fs->constantMask = 0;
1090 swr_fs->flatConstantMask = 0;
1091 swr_fs->pointSpriteMask = 0;
1092
1093 for (int attrib = 0; attrib < PIPE_MAX_SHADER_INPUTS; attrib++) {
1094 const unsigned mask = swr_fs->info.base.input_usage_mask[attrib];
1095 const unsigned interpMode = swr_fs->info.base.input_interpolate[attrib];
1096 const unsigned interpLoc = swr_fs->info.base.input_interpolate_loc[attrib];
1097
1098 if (!mask)
1099 continue;
1100
1101 // load i,j
1102 Value *vi = nullptr, *vj = nullptr;
1103 switch (interpLoc) {
1104 case TGSI_INTERPOLATE_LOC_CENTER:
1105 vi = LOAD(pPS, {0, SWR_PS_CONTEXT_vI, PixelPositions_center}, "i");
1106 vj = LOAD(pPS, {0, SWR_PS_CONTEXT_vJ, PixelPositions_center}, "j");
1107 break;
1108 case TGSI_INTERPOLATE_LOC_CENTROID:
1109 vi = LOAD(pPS, {0, SWR_PS_CONTEXT_vI, PixelPositions_centroid}, "i");
1110 vj = LOAD(pPS, {0, SWR_PS_CONTEXT_vJ, PixelPositions_centroid}, "j");
1111 break;
1112 case TGSI_INTERPOLATE_LOC_SAMPLE:
1113 vi = LOAD(pPS, {0, SWR_PS_CONTEXT_vI, PixelPositions_sample}, "i");
1114 vj = LOAD(pPS, {0, SWR_PS_CONTEXT_vJ, PixelPositions_sample}, "j");
1115 break;
1116 }
1117
1118 // load/compute w
1119 Value *vw = nullptr, *pAttribs;
1120 if (interpMode == TGSI_INTERPOLATE_PERSPECTIVE ||
1121 interpMode == TGSI_INTERPOLATE_COLOR) {
1122 pAttribs = pPerspAttribs;
1123 switch (interpLoc) {
1124 case TGSI_INTERPOLATE_LOC_CENTER:
1125 vw = VRCP(LOAD(pPS, {0, SWR_PS_CONTEXT_vOneOverW, PixelPositions_center}));
1126 break;
1127 case TGSI_INTERPOLATE_LOC_CENTROID:
1128 vw = VRCP(LOAD(pPS, {0, SWR_PS_CONTEXT_vOneOverW, PixelPositions_centroid}));
1129 break;
1130 case TGSI_INTERPOLATE_LOC_SAMPLE:
1131 vw = VRCP(LOAD(pPS, {0, SWR_PS_CONTEXT_vOneOverW, PixelPositions_sample}));
1132 break;
1133 }
1134 } else {
1135 pAttribs = pRawAttribs;
1136 vw = VIMMED1(1.f);
1137 }
1138
1139 vw->setName("w");
1140
1141 ubyte semantic_name = swr_fs->info.base.input_semantic_name[attrib];
1142 ubyte semantic_idx = swr_fs->info.base.input_semantic_index[attrib];
1143
1144 if (semantic_name == TGSI_SEMANTIC_FACE) {
1145 Value *ff =
1146 UI_TO_FP(LOAD(pPS, {0, SWR_PS_CONTEXT_frontFace}), mFP32Ty);
1147 ff = FSUB(FMUL(ff, C(2.0f)), C(1.0f));
1148 ff = VECTOR_SPLAT(JM()->mVWidth, ff, "vFrontFace");
1149
1150 inputs[attrib][0] = wrap(ff);
1151 inputs[attrib][1] = wrap(VIMMED1(0.0f));
1152 inputs[attrib][2] = wrap(VIMMED1(0.0f));
1153 inputs[attrib][3] = wrap(VIMMED1(1.0f));
1154 continue;
1155 } else if (semantic_name == TGSI_SEMANTIC_POSITION) { // gl_FragCoord
1156 if (swr_fs->info.base.properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] ==
1157 TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER) {
1158 inputs[attrib][0] = wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vX, PixelPositions_center}, "vX"));
1159 inputs[attrib][1] = wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vY, PixelPositions_center}, "vY"));
1160 } else {
1161 inputs[attrib][0] = wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vX, PixelPositions_UL}, "vX"));
1162 inputs[attrib][1] = wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vY, PixelPositions_UL}, "vY"));
1163 }
1164 inputs[attrib][2] = wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vZ}, "vZ"));
1165 inputs[attrib][3] =
1166 wrap(LOAD(pPS, {0, SWR_PS_CONTEXT_vOneOverW, PixelPositions_center}, "vOneOverW"));
1167 continue;
1168 }
1169
1170 unsigned linkedAttrib =
1171 locate_linkage(semantic_name, semantic_idx, pPrevShader) - 1;
1172
1173 uint32_t extraAttribs = 0;
1174 if (semantic_name == TGSI_SEMANTIC_PRIMID && !ctx->gs) {
1175 /* non-gs generated primID - need to grab from swizzleMap override */
1176 linkedAttrib = pPrevShader->num_outputs - 1;
1177 swr_fs->constantMask |= 1 << linkedAttrib;
1178 extraAttribs++;
1179 } else if (semantic_name == TGSI_SEMANTIC_GENERIC &&
1180 key.sprite_coord_enable & (1 << semantic_idx)) {
1181 /* we add an extra attrib to the backendState in swr_update_derived. */
1182 linkedAttrib = pPrevShader->num_outputs + extraAttribs - 1;
1183 swr_fs->pointSpriteMask |= (1 << linkedAttrib);
1184 extraAttribs++;
1185 } else if (linkedAttrib == 0xFFFFFFFF) {
1186 inputs[attrib][0] = wrap(VIMMED1(0.0f));
1187 inputs[attrib][1] = wrap(VIMMED1(0.0f));
1188 inputs[attrib][2] = wrap(VIMMED1(0.0f));
1189 inputs[attrib][3] = wrap(VIMMED1(1.0f));
1190 /* If we're reading in color and 2-sided lighting is enabled, we have
1191 * to keep going.
1192 */
1193 if (semantic_name != TGSI_SEMANTIC_COLOR || !key.light_twoside)
1194 continue;
1195 } else {
1196 if (interpMode == TGSI_INTERPOLATE_CONSTANT) {
1197 swr_fs->constantMask |= 1 << linkedAttrib;
1198 } else if (interpMode == TGSI_INTERPOLATE_COLOR) {
1199 swr_fs->flatConstantMask |= 1 << linkedAttrib;
1200 }
1201 }
1202
1203 unsigned bcolorAttrib = 0xFFFFFFFF;
1204 Value *offset = NULL;
1205 if (semantic_name == TGSI_SEMANTIC_COLOR && key.light_twoside) {
1206 bcolorAttrib = locate_linkage(
1207 TGSI_SEMANTIC_BCOLOR, semantic_idx, pPrevShader) - 1;
1208 /* Neither front nor back colors were available. Nothing to load. */
1209 if (bcolorAttrib == 0xFFFFFFFF && linkedAttrib == 0xFFFFFFFF)
1210 continue;
1211 /* If there is no front color, just always use the back color. */
1212 if (linkedAttrib == 0xFFFFFFFF)
1213 linkedAttrib = bcolorAttrib;
1214
1215 if (bcolorAttrib != 0xFFFFFFFF) {
1216 if (interpMode == TGSI_INTERPOLATE_CONSTANT) {
1217 swr_fs->constantMask |= 1 << bcolorAttrib;
1218 } else if (interpMode == TGSI_INTERPOLATE_COLOR) {
1219 swr_fs->flatConstantMask |= 1 << bcolorAttrib;
1220 }
1221
1222 unsigned diff = 12 * (bcolorAttrib - linkedAttrib);
1223
1224 if (diff) {
1225 Value *back =
1226 XOR(C(1), LOAD(pPS, {0, SWR_PS_CONTEXT_frontFace}), "backFace");
1227
1228 offset = MUL(back, C(diff));
1229 offset->setName("offset");
1230 }
1231 }
1232 }
1233
1234 for (int channel = 0; channel < TGSI_NUM_CHANNELS; channel++) {
1235 if (mask & (1 << channel)) {
1236 Value *indexA = C(linkedAttrib * 12 + channel);
1237 Value *indexB = C(linkedAttrib * 12 + channel + 4);
1238 Value *indexC = C(linkedAttrib * 12 + channel + 8);
1239
1240 if (offset) {
1241 indexA = ADD(indexA, offset);
1242 indexB = ADD(indexB, offset);
1243 indexC = ADD(indexC, offset);
1244 }
1245
1246 Value *va = VBROADCAST(LOAD(GEP(pAttribs, indexA)));
1247 Value *vb = VBROADCAST(LOAD(GEP(pAttribs, indexB)));
1248 Value *vc = VBROADCAST(LOAD(GEP(pAttribs, indexC)));
1249
1250 if (interpMode == TGSI_INTERPOLATE_CONSTANT) {
1251 inputs[attrib][channel] = wrap(va);
1252 } else {
1253 Value *vk = FSUB(FSUB(VIMMED1(1.0f), vi), vj);
1254
1255 vc = FMUL(vk, vc);
1256
1257 Value *interp = FMUL(va, vi);
1258 Value *interp1 = FMUL(vb, vj);
1259 interp = FADD(interp, interp1);
1260 interp = FADD(interp, vc);
1261 if (interpMode == TGSI_INTERPOLATE_PERSPECTIVE ||
1262 interpMode == TGSI_INTERPOLATE_COLOR)
1263 interp = FMUL(interp, vw);
1264 inputs[attrib][channel] = wrap(interp);
1265 }
1266 }
1267 }
1268 }
1269
1270 sampler = swr_sampler_soa_create(key.sampler, PIPE_SHADER_FRAGMENT);
1271
1272 struct lp_bld_tgsi_system_values system_values;
1273 memset(&system_values, 0, sizeof(system_values));
1274
1275 struct lp_build_mask_context mask;
1276 bool uses_mask = false;
1277
1278 if (swr_fs->info.base.uses_kill ||
1279 key.poly_stipple_enable) {
1280 Value *vActiveMask = NULL;
1281 if (swr_fs->info.base.uses_kill) {
1282 vActiveMask = LOAD(pPS, {0, SWR_PS_CONTEXT_activeMask}, "activeMask");
1283 }
1284 if (key.poly_stipple_enable) {
1285 // first get fragment xy coords and clip to stipple bounds
1286 Value *vXf = LOAD(pPS, {0, SWR_PS_CONTEXT_vX, PixelPositions_UL});
1287 Value *vYf = LOAD(pPS, {0, SWR_PS_CONTEXT_vY, PixelPositions_UL});
1288 Value *vXu = FP_TO_UI(vXf, mSimdInt32Ty);
1289 Value *vYu = FP_TO_UI(vYf, mSimdInt32Ty);
1290
1291 // stipple pattern is 32x32, which means that one line of stipple
1292 // is stored in one word:
1293 // vXstipple is bit offset inside 32-bit stipple word
1294 // vYstipple is word index is stipple array
1295 Value *vXstipple = AND(vXu, VIMMED1(0x1f)); // & (32-1)
1296 Value *vYstipple = AND(vYu, VIMMED1(0x1f)); // & (32-1)
1297
1298 // grab stipple pattern base address
1299 Value *stipplePtr = GEP(hPrivateData, {0, swr_draw_context_polyStipple, 0});
1300 stipplePtr = BITCAST(stipplePtr, mInt8PtrTy);
1301
1302 // peform a gather to grab stipple words for each lane
1303 Value *vStipple = GATHERDD(VUNDEF_I(), stipplePtr, vYstipple,
1304 VIMMED1(0xffffffff), 4);
1305
1306 // create a mask with one bit corresponding to the x stipple
1307 // and AND it with the pattern, to see if we have a bit
1308 Value *vBitMask = LSHR(VIMMED1(0x80000000), vXstipple);
1309 Value *vStippleMask = AND(vStipple, vBitMask);
1310 vStippleMask = ICMP_NE(vStippleMask, VIMMED1(0));
1311 vStippleMask = VMASK(vStippleMask);
1312
1313 if (swr_fs->info.base.uses_kill) {
1314 vActiveMask = AND(vActiveMask, vStippleMask);
1315 } else {
1316 vActiveMask = vStippleMask;
1317 }
1318 }
1319 lp_build_mask_begin(
1320 &mask, gallivm, lp_type_float_vec(32, 32 * 8), wrap(vActiveMask));
1321 uses_mask = true;
1322 }
1323
1324 lp_build_tgsi_soa(gallivm,
1325 swr_fs->pipe.tokens,
1326 lp_type_float_vec(32, 32 * 8),
1327 uses_mask ? &mask : NULL, // mask
1328 wrap(consts_ptr),
1329 wrap(const_sizes_ptr),
1330 &system_values,
1331 inputs,
1332 outputs,
1333 wrap(hPrivateData),
1334 NULL, // thread data
1335 sampler, // sampler
1336 &swr_fs->info.base,
1337 NULL); // geometry shader face
1338
1339 sampler->destroy(sampler);
1340
1341 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
1342
1343 for (uint32_t attrib = 0; attrib < swr_fs->info.base.num_outputs;
1344 attrib++) {
1345 switch (swr_fs->info.base.output_semantic_name[attrib]) {
1346 case TGSI_SEMANTIC_POSITION: {
1347 // write z
1348 LLVMValueRef outZ =
1349 LLVMBuildLoad(gallivm->builder, outputs[attrib][2], "");
1350 STORE(unwrap(outZ), pPS, {0, SWR_PS_CONTEXT_vZ});
1351 break;
1352 }
1353 case TGSI_SEMANTIC_COLOR: {
1354 for (uint32_t channel = 0; channel < TGSI_NUM_CHANNELS; channel++) {
1355 if (!outputs[attrib][channel])
1356 continue;
1357
1358 LLVMValueRef out =
1359 LLVMBuildLoad(gallivm->builder, outputs[attrib][channel], "");
1360 if (swr_fs->info.base.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
1361 swr_fs->info.base.output_semantic_index[attrib] == 0) {
1362 for (uint32_t rt = 0; rt < key.nr_cbufs; rt++) {
1363 STORE(unwrap(out),
1364 pPS,
1365 {0, SWR_PS_CONTEXT_shaded, rt, channel});
1366 }
1367 } else {
1368 STORE(unwrap(out),
1369 pPS,
1370 {0,
1371 SWR_PS_CONTEXT_shaded,
1372 swr_fs->info.base.output_semantic_index[attrib],
1373 channel});
1374 }
1375 }
1376 break;
1377 }
1378 default: {
1379 fprintf(stderr,
1380 "unknown output from FS %s[%d]\n",
1381 tgsi_semantic_names[swr_fs->info.base
1382 .output_semantic_name[attrib]],
1383 swr_fs->info.base.output_semantic_index[attrib]);
1384 break;
1385 }
1386 }
1387 }
1388
1389 LLVMValueRef mask_result = 0;
1390 if (uses_mask) {
1391 mask_result = lp_build_mask_end(&mask);
1392 }
1393
1394 IRB()->SetInsertPoint(unwrap(LLVMGetInsertBlock(gallivm->builder)));
1395
1396 if (uses_mask) {
1397 STORE(unwrap(mask_result), pPS, {0, SWR_PS_CONTEXT_activeMask});
1398 }
1399
1400 RET_VOID();
1401
1402 gallivm_verify_function(gallivm, wrap(pFunction));
1403
1404 gallivm_compile_module(gallivm);
1405
1406 // after the gallivm passes, we have to lower the core's intrinsics
1407 llvm::legacy::FunctionPassManager lowerPass(JM()->mpCurrentModule);
1408 lowerPass.add(createLowerX86Pass(this));
1409 lowerPass.run(*pFunction);
1410
1411 PFN_PIXEL_KERNEL kernel =
1412 (PFN_PIXEL_KERNEL)gallivm_jit_function(gallivm, wrap(pFunction));
1413 debug_printf("frag shader %p\n", kernel);
1414 assert(kernel && "Error: FragShader = NULL");
1415
1416 JM()->mIsModuleFinalized = true;
1417
1418 return kernel;
1419 }
1420
1421 PFN_PIXEL_KERNEL
1422 swr_compile_fs(struct swr_context *ctx, swr_jit_fs_key &key)
1423 {
1424 if (!ctx->fs->pipe.tokens)
1425 return NULL;
1426
1427 BuilderSWR builder(
1428 reinterpret_cast<JitManager *>(swr_screen(ctx->pipe.screen)->hJitMgr),
1429 "FS");
1430 PFN_PIXEL_KERNEL func = builder.CompileFS(ctx, key);
1431
1432 ctx->fs->map.insert(std::make_pair(key, make_unique<VariantFS>(builder.gallivm, func)));
1433 return func;
1434 }