bool is_ssa;
} nir_src;
-#define NIR_SRC_INIT (nir_src) { { NULL } }
+static inline nir_src
+nir_src_init(void)
+{
+ nir_src src = { { NULL } };
+ return src;
+}
+
+#define NIR_SRC_INIT nir_src_init()
-#define nir_foreach_use(reg_or_ssa_def, src) \
+#define nir_foreach_use(src, reg_or_ssa_def) \
list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
-#define nir_foreach_use_safe(reg_or_ssa_def, src) \
+#define nir_foreach_use_safe(src, reg_or_ssa_def) \
list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
-#define nir_foreach_if_use(reg_or_ssa_def, src) \
+#define nir_foreach_if_use(src, reg_or_ssa_def) \
list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
-#define nir_foreach_if_use_safe(reg_or_ssa_def, src) \
+#define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
typedef struct {
bool is_ssa;
} nir_dest;
-#define NIR_DEST_INIT (nir_dest) { { { NULL } } }
+static inline nir_dest
+nir_dest_init(void)
+{
+ nir_dest dest = { { { NULL } } };
+ return dest;
+}
-#define nir_foreach_def(reg, dest) \
+#define NIR_DEST_INIT nir_dest_init()
+
+#define nir_foreach_def(dest, reg) \
list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
-#define nir_foreach_def_safe(reg, dest) \
+#define nir_foreach_def_safe(dest, reg) \
list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
static inline nir_src
NIR_INTRINSIC_UCP_ID = 4,
/**
- * The ammount of data, starting from BASE, that this instruction may
+ * The amount of data, starting from BASE, that this instruction may
* access. This is used to provide bounds if the offset is not constant.
*/
NIR_INTRINSIC_RANGE = 5,
nir_src src;
} nir_phi_src;
-#define nir_foreach_phi_src(phi, entry) \
- foreach_list_typed(nir_phi_src, entry, node, &(phi)->srcs)
-#define nir_foreach_phi_src_safe(phi, entry) \
- foreach_list_typed_safe(nir_phi_src, entry, node, &(phi)->srcs)
+#define nir_foreach_phi_src(phi_src, phi) \
+ foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
+#define nir_foreach_phi_src_safe(phi_src, phi) \
+ foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
typedef struct {
nir_instr instr;
nir_dest dest;
} nir_parallel_copy_entry;
-#define nir_foreach_parallel_copy_entry(pcopy, entry) \
+#define nir_foreach_parallel_copy_entry(entry, pcopy) \
foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
typedef struct {
return exec_node_data(nir_instr, tail, node);
}
-#define nir_foreach_instr(block, instr) \
+#define nir_foreach_instr(instr, block) \
foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_reverse(block, instr) \
+#define nir_foreach_instr_reverse(instr, block) \
foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_safe(block, instr) \
+#define nir_foreach_instr_safe(instr, block) \
foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_reverse_safe(block, instr) \
+#define nir_foreach_instr_reverse_safe(instr, block) \
foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
typedef struct nir_if {
return (nir_block *) exec_list_get_head(&impl->body);
}
+static inline nir_block *
+nir_impl_last_block(nir_function_impl *impl)
+{
+ return (nir_block *) exec_list_get_tail(&impl->body);
+}
+
static inline nir_cf_node *
nir_cf_node_next(nir_cf_node *node)
{
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma;
- bool lower_flrp;
+ bool lower_flrp32;
+ /** Lowers flrp when it does not support doubles */
+ bool lower_flrp64;
bool lower_fpow;
bool lower_fsat;
bool lower_fsqrt;
- bool lower_fmod;
+ bool lower_fmod32;
+ bool lower_fmod64;
bool lower_bitfield_extract;
bool lower_bitfield_insert;
bool lower_uadd_carry;
return func;
}
-#define nir_foreach_function(shader, func) \
+#define nir_foreach_function(func, shader) \
foreach_list_typed(nir_function, func, node, &(shader)->functions)
nir_shader *nir_shader_create(void *mem_ctx,
nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
assert(block->cf_node.type == nir_cf_node_block);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
return nir_before_instr(instr);
}
void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
nir_instr *after_me);
-/* visits basic blocks in source-code order */
-typedef bool (*nir_foreach_block_cb)(nir_block *block, void *state);
-bool nir_foreach_block(nir_function_impl *impl, nir_foreach_block_cb cb,
- void *state);
-bool nir_foreach_block_reverse(nir_function_impl *impl, nir_foreach_block_cb cb,
- void *state);
-bool nir_foreach_block_in_cf_node(nir_cf_node *node, nir_foreach_block_cb cb,
- void *state);
+uint8_t nir_ssa_def_components_read(nir_ssa_def *def);
+
+/*
+ * finds the next basic block in source-code order, returns NULL if there is
+ * none
+ */
+
+nir_block *nir_block_cf_tree_next(nir_block *block);
+
+/* Performs the opposite of nir_block_cf_tree_next() */
+
+nir_block *nir_block_cf_tree_prev(nir_block *block);
+
+/* Gets the first block in a CF node in source-code order */
+
+nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
+
+/* Gets the last block in a CF node in source-code order */
+
+nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
+
+/* Gets the next block after a CF node in source-code order */
+
+nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
+
+/* Macros for loops that visit blocks in source-code order */
+
+#define nir_foreach_block(block, impl) \
+ for (nir_block *block = nir_start_block(impl); block != NULL; \
+ block = nir_block_cf_tree_next(block))
+
+#define nir_foreach_block_safe(block, impl) \
+ for (nir_block *block = nir_start_block(impl), \
+ *next = nir_block_cf_tree_next(block); \
+ block != NULL; \
+ block = next, next = nir_block_cf_tree_next(block))
+
+#define nir_foreach_block_reverse(block, impl) \
+ for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
+ block = nir_block_cf_tree_prev(block))
+
+#define nir_foreach_block_reverse_safe(block, impl) \
+ for (nir_block *block = nir_impl_last_block(impl), \
+ *prev = nir_block_cf_tree_prev(block); \
+ block != NULL; \
+ block = prev, prev = nir_block_cf_tree_prev(block))
+
+#define nir_foreach_block_in_cf_node(block, node) \
+ for (nir_block *block = nir_cf_node_cf_tree_first(node); \
+ block != nir_cf_node_cf_tree_next(node); \
+ block = nir_block_cf_tree_next(block))
/* If the following CF node is an if, this function returns that if.
* Otherwise, it returns NULL.
nir_function *entrypoint);
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
-void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
-
void nir_assign_var_locations(struct exec_list *var_list,
unsigned *size,
int (*type_size)(const struct glsl_type *));
* while 4 and 5 represent 0 and 1 respectively.
*/
uint8_t swizzles[32][4];
+
+ /**
+ * Bitmap of textures that need srgb to linear conversion. If
+ * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
+ * of the texture are lowered to linear.
+ */
+ unsigned lower_srgb;
} nir_lower_tex_options;
bool nir_lower_tex(nir_shader *shader,
void nir_lower_two_sided_color(nir_shader *shader);
+void nir_lower_clamp_color_outputs(nir_shader *shader);
+
+typedef struct nir_lower_wpos_ytransform_options {
+ int state_tokens[5];
+ bool fs_coord_origin_upper_left :1;
+ bool fs_coord_origin_lower_left :1;
+ bool fs_coord_pixel_center_integer :1;
+ bool fs_coord_pixel_center_half_integer :1;
+} nir_lower_wpos_ytransform_options;
+
+bool nir_lower_wpos_ytransform(nir_shader *shader,
+ const nir_lower_wpos_ytransform_options *options);
+
+typedef struct nir_lower_drawpixels_options {
+ int texcoord_state_tokens[5];
+ int scale_state_tokens[5];
+ int bias_state_tokens[5];
+ unsigned drawpix_sampler;
+ unsigned pixelmap_sampler;
+ bool pixel_maps :1;
+ bool scale_and_bias :1;
+} nir_lower_drawpixels_options;
+
+void nir_lower_drawpixels(nir_shader *shader,
+ const nir_lower_drawpixels_options *options);
+
void nir_lower_atomics(nir_shader *shader,
const struct gl_shader_program *shader_program);
void nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
+typedef enum {
+ nir_lower_drcp = (1 << 0),
+ nir_lower_dsqrt = (1 << 1),
+ nir_lower_drsq = (1 << 2),
+ nir_lower_dtrunc = (1 << 3),
+ nir_lower_dfloor = (1 << 4),
+ nir_lower_dceil = (1 << 5),
+ nir_lower_dfract = (1 << 6),
+ nir_lower_dround_even = (1 << 7),
+ nir_lower_dmod = (1 << 8)
+} nir_lower_doubles_options;
+
+void nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
void nir_lower_double_pack(nir_shader *shader);
bool nir_normalize_cubemap_coords(nir_shader *shader);