#include "nv50/nv50_stateobj.h"
#include "nv50/nv50_context.h"
+#include "nv50/nv50_query_hw.h"
#include "nv50/nv50_3d.xml.h"
#include "nv50/nv50_texture.xml.h"
* in advance to maintain elegant separate shader objects.)
*/
-static INLINE uint32_t
+static inline uint32_t
nv50_colormask(unsigned mask)
{
uint32_t ret = 0;
#define NV50_BLEND_FACTOR_CASE(a, b) \
case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
-static INLINE uint32_t
+static inline uint32_t
nv50_blend_fac(unsigned factor)
{
switch (factor) {
struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj);
int i;
bool emit_common_func = cso->rt[0].blend_enable;
- uint32_t ms;
if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
SB_BEGIN_3D(so, BLEND_INDEPENDENT, 1);
SB_DATA (so, nv50_colormask(cso->rt[0].colormask));
}
- ms = 0;
- if (cso->alpha_to_coverage)
- ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
- if (cso->alpha_to_one)
- ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;
-
- SB_BEGIN_3D(so, MULTISAMPLE_CTRL, 1);
- SB_DATA (so, ms);
-
assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
return so;
}
SB_DATA (so, 0);
}
+ SB_BEGIN_3D(so, DEPTH_BOUNDS_EN, 1);
+ if (cso->depth.bounds_test) {
+ SB_DATA (so, 1);
+ SB_BEGIN_3D(so, DEPTH_BOUNDS(0), 2);
+ SB_DATA (so, fui(cso->depth.bounds_min));
+ SB_DATA (so, fui(cso->depth.bounds_max));
+ } else {
+ SB_DATA (so, 0);
+ }
+
if (cso->stencil[0].enabled) {
SB_BEGIN_3D(so, STENCIL_ENABLE, 5);
SB_DATA (so, 1);
#define NV50_TSC_WRAP_CASE(n) \
case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
-static INLINE unsigned
+static inline unsigned
nv50_tsc_wrap_mode(unsigned wrap)
{
switch (wrap) {
FREE(hwcso);
}
-static INLINE void
+static inline void
nv50_stage_sampler_states_bind(struct nv50_context *nv50, int s,
unsigned nr, void **hwcso)
{
FREE(nv50_tic_entry(view));
}
-static INLINE void
+static inline void
nv50_stage_set_sampler_views(struct nv50_context *nv50, int s,
unsigned nr,
struct pipe_sampler_view **views)
if (cso->stream_output.num_outputs)
prog->pipe.stream_output = cso->stream_output;
+ prog->translated = nv50_program_translate(
+ prog, nv50_context(pipe)->screen->base.device->chipset,
+ &nouveau_context(pipe)->debug);
+
return (void *)prog;
}
if (nouveau_context(pipe)->screen->class_3d >= NVA0_3D_CLASS) {
targ->pq = pipe->create_query(pipe,
- NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET, 0);
+ NVA0_HW_QUERY_STREAM_OUTPUT_BUFFER_OFFSET, 0);
if (!targ->pq) {
FREE(targ);
return NULL;
return &targ->pipe;
}
+static void
+nva0_so_target_save_offset(struct pipe_context *pipe,
+ struct pipe_stream_output_target *ptarg,
+ unsigned index, bool serialize)
+{
+ struct nv50_so_target *targ = nv50_so_target(ptarg);
+
+ if (serialize) {
+ struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
+ PUSH_SPACE(push, 2);
+ BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
+ PUSH_DATA (push, 0);
+ }
+
+ nv50_query(targ->pq)->index = index;
+ pipe->end_query(pipe, targ->pq);
+}
+
static void
nv50_so_target_destroy(struct pipe_context *pipe,
struct pipe_stream_output_target *ptarg)