/* The callee will overwrite it if it supports a lower version.
*
* The caller should check the value and access up-to the version supported
- * by the the callee.
+ * by the callee.
*/
/* NOTE: Do not use the MESA_GLINTEROP_DEVICE_INFO_VERSION macro */
uint32_t version;
/* The callee will overwrite it if it supports a lower version.
*
* The caller should check the value and access up-to the version supported
- * by the the callee.
+ * by the callee.
*/
/* NOTE: Do not use the MESA_GLINTEROP_EXPORT_IN_VERSION macro */
uint32_t version;
/* The callee will overwrite it if it supports a lower version.
*
* The caller should check the value and access up-to the version supported
- * by the the callee.
+ * by the callee.
*/
/* NOTE: Do not use the MESA_GLINTEROP_EXPORT_OUT_VERSION macro */
uint32_t version;
ast_expression::set_is_lhs(bool new_value)
{
/* is_lhs is tracked only to print "variable used uninitialized" warnings,
- * if we lack a identifier we can just skip it.
+ * if we lack an identifier we can just skip it.
*/
if (this->primary_expression.identifier == NULL)
return;
nir_visitor::visit(ir_constant *ir)
{
/*
- * We don't know if this variable is an an array or struct that gets
+ * We don't know if this variable is an array or struct that gets
* dereferenced, so do the safe thing an make it a variable with a
* constant initializer and return a dereference.
*/
} nir_deref_var;
/* This enum describes how the array is referenced. If the deref is
- * direct then the base_offset is used. If the deref is indirect then then
+ * direct then the base_offset is used. If the deref is indirect then
* offset is given by base_offset + indirect. If the deref is a wildcard
* then the deref refers to all of the elements of the array at the same
* time. Wildcard dereferences are only ever allowed in copy_var
nir_instr *match = (nir_instr *) entry->key;
nir_ssa_def *new_def = nir_instr_get_dest_ssa_def(match);
- /* It's safe to replace a exact instruction with an inexact one as
+ /* It's safe to replace an exact instruction with an inexact one as
* long as we make it exact. If we got here, the two instructions are
* exactly identical in every other way so, once we've set the exact
* bit, they are the same.
/*
* Interpolation of input. The interp_var_at* intrinsics are similar to the
- * load_var intrinsic acting an a shader input except that they interpolate
+ * load_var intrinsic acting on a shader input except that they interpolate
* the input differently. The at_sample and at_offset intrinsics take an
- * aditional source that is a integer sample id or a vec2 position offset
+ * additional source that is an integer sample id or a vec2 position offset
* respectively.
*/
* fully-direct references we see and store them in the
* direct_deref_nodes hash table.
*
- * 2) Walk over the the list of fully-qualified direct derefs generated in
+ * 2) Walk over the list of fully-qualified direct derefs generated in
* the previous pass. For each deref, we determine if it can ever be
* aliased, i.e. if there is an indirect reference anywhere that may
* refer to it. If it cannot be aliased, we mark it for lowering to an
*
* The bias of the y-coordinate depends on whether y-inversion takes place
* (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
- * drawing to an FBO (causes additional inversion), and whether the the pipe
+ * drawing to an FBO (causes additional inversion), and whether the pipe
* driver origin and the requested origin differ (the latter condition is
* stored in the 'invert' variable).
*
/*
* This file implements an optimization that deletes statically
- * unreachable/dead code. In NIR, one way this can happen if if an if
+ * unreachable/dead code. In NIR, one way this can happen is when an if
* statement has a constant condition:
*
* if (true) {
b->shader->info.cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
- break; /* Nothing do do with this */
+ break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
``clear_depth_stencil`` clears a single depth, stencil or depth/stencil surface
with the specified depth and stencil values (for combined depth/stencil buffers,
-is is also possible to only clear one or the other part). While it is only
+it is also possible to only clear one or the other part). While it is only
possible to clear one surface at a time (which can include several layers),
this surface need not be bound to the framebuffer.
* | fp16[3] /
* 0x08: | padding
* 0x10: | int16[0] \
- * | int16[1] |___ swizzled int16 channels for for "small integer"
+ * | int16[1] |___ swizzled int16 channels for "small integer"
* | int16[2] | formats (<= 16 bits per component, integer)
* | int16[3] /
* 0x18: | padding
}
}
- /* need a integer number of instruction "groups" (sets of 16
+ /* need an integer number of instruction "groups" (sets of 16
* instructions on a4xx or sets of 4 instructions on a3xx),
* so pad out w/ NOPs if needed: (NOTE each instruction is 64bits)
*/
/*
* Note that we'd actually want to skip position (as we won't use
* the attribute in the fs) but can't. The reason is that we don't
- * actually have a input/output map for setup (even though it looks
+ * actually have an input/output map for setup (even though it looks
* like we do...). Could adjust for this though even without a map
* (in llvmpipe_create_fs_state()).
*/
virtual void erase();
virtual bool insert(void *data);
- // move item to a another list, no consistency with its iterators though
+ // move item to another list, no consistency with its iterators though
void moveToList(DLList&);
private:
return tx->map;
}
-/* Copies data from the resource into the the transfer's temporary GART
+/* Copies data from the resource into the transfer's temporary GART
* buffer. Also updates buf->data if present.
*
* Maybe just migrate to GART right away if we actually need to do this. */
* of the loop it reads the value written by instruction 0 and in all other
* iterations it reads the value written by instruction 3.
*
- * @param read_cb This function will be called for for every instruction that
+ * @param read_cb This function will be called for every instruction that
* has been determined to be a reader of writer.
* @param write_cb This function will be called for every instruction after
* writer.
} else {
fc_state->PredStack[fc_state->LoopDepth] =
fc_state->PredicateReg;
- /* Copy the the current predicate value to this loop's
+ /* Copy the current predicate value to this loop's
* predicate register */
/* Use the old predicate value for src0 */
/*
* Note that we'd actually want to skip position (as we won't use
* the attribute in the fs) but can't. The reason is that we don't
- * actually have a input/output map for setup (even though it looks
+ * actually have an input/output map for setup (even though it looks
* like we do...). Could adjust for this though even without a map.
*/
} else {
/*
* Note that we'd actually want to skip position (as we won't use
* the attribute in the fs) but can't. The reason is that we don't
- * actually have a input/output map for setup (even though it looks
+ * actually have an input/output map for setup (even though it looks
* like we do...). Could adjust for this though even without a map.
*/
draw_emit_vertex_attr(vinfo, EMIT_4F, vs_index);
*block_width = format_cap_table[format].block_width;
*block_height = format_cap_table[format].block_height;
*bytes_per_block = format_cap_table[format].block_bytes;
- /* Make sure the the table entry was valid */
+ /* Make sure the table entry was valid */
if (*block_width == 0)
debug_printf("Bad table entry for %s\n", svga_format_name(format));
assert(*block_width);
/**
- * Allocate space for a int[4] immediate.
+ * Allocate space for an int[4] immediate.
* \return the index/position of the immediate.
*/
static unsigned
mask[0] = _mm256_set_epi8(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0xC, 0x8, 0x4, 0x0,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0xC, 0x8, 0x4, 0x0);
- // pull out the the 8bit 4x2 coverage for samples 0-7 into the lower 32 bits of each 128bit lane
+ // pull out the 8bit 4x2 coverage for samples 0-7 into the lower 32 bits of each 128bit lane
__m256i packedCoverage0 = _simd_shuffle_epi8(sampleCoverage[0], mask[0]);
__m256i packedCoverage1;
if(T::MultisampleT::numSamples > 8)
{
- // pull out the the 8bit 4x2 coverage for samples 8-15 into the lower 32 bits of each 128bit lane
+ // pull out the 8bit 4x2 coverage for samples 8-15 into the lower 32 bits of each 128bit lane
packedCoverage1 = _simd_shuffle_epi8(sampleCoverage[1], mask[0]);
}
// The stream masks specify which attributes are sent to which streams.
// These masks help the FE to setup the pPrimData buffer that is passed
- // the the Stream Output Shader (SOS) function.
+ // the Stream Output Shader (SOS) function.
uint32_t streamMasks[MAX_SO_STREAMS];
// Number of attributes, including position, per vertex that are streamed out.
for (uint32_t tileID : macroTiles)
{
- // Only work on tiles for for this numa node
+ // Only work on tiles for this numa node
uint32_t x, y;
pDC->pTileMgr->getTileIndices(tileID, x, y);
if (((x ^ y) & numaMask) != numaNode)
return false;
}
- /* We assert that the the clamped address is the first
+ /* We assert that the clamped address is the first
* argument, and the UBO base address is the second argument.
* This is arbitrary, but simpler than supporting flipping the
* two either way.
}
/* If we would block on the previously chosen node, but would
- * block less on this one, then then prefer it.
+ * block less on this one, then prefer it.
*/
if (chosen->unblocked_time > state->time &&
n->unblocked_time < chosen->unblocked_time) {
void (*destroy)(struct pipe_video_buffer *buffer);
/**
- * get a individual sampler view for each plane
+ * get an individual sampler view for each plane
*/
struct pipe_sampler_view **(*get_sampler_view_planes)(struct pipe_video_buffer *buffer);
/**
- * get a individual sampler view for each component
+ * get an individual sampler view for each component
*/
struct pipe_sampler_view **(*get_sampler_view_components)(struct pipe_video_buffer *buffer);
/**
- * get a individual surfaces for each plane
+ * get an individual surfaces for each plane
*/
struct pipe_surface **(*get_surfaces)(struct pipe_video_buffer *buffer);
NineUnknown_ConvertRefToBind(NineUnknown(This->state.rt[i]));
}
- /* Initialize a dummy VBO to be used when a a vertex declaration does not
+ /* Initialize a dummy VBO to be used when a vertex declaration does not
* specify all the inputs needed by vertex shader, on win default behavior
* is to pass 0,0,0,0 to the shader */
{
* GBM_BO_IMPORT_EGL_IMAGE
* GBM_BO_IMPORT_FD
*
- * The the gbm bo shares the underlying pixels but its life-time is
+ * The gbm bo shares the underlying pixels but its life-time is
* independent of the foreign object.
*
* \param gbm The gbm device returned from gbm_create_device()
if (gc == new)
return;
- /* We are either switching to no context, away from a indirect
+ /* We are either switching to no context, away from an indirect
* context to a direct context or from one dpy to another and have
* to send a request to the dpy to unbind the previous context.
*/
// string.
//
// Integer types:
-// TypeWithSize - maps an integer to a int type.
+// TypeWithSize - maps an integer to an int type.
// Int32, UInt32, Int64, UInt64, TimeInMillis
// - integers of known sizes.
// BiggestInt - the biggest signed integer type.
return str;
}
-// The following two functions only make sense if the the system
+// The following two functions only make sense if the system
// uses UTF-16 for wide string encoding. All supported systems
// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
* In general, you can find shader thread maximums by looking at the "Maximum
* Number of Threads" field in the Intel PRM description of the 3DSTATE_VS,
* 3DSTATE_GS, 3DSTATE_HS, 3DSTATE_DS, and 3DSTATE_PS commands. URB entry
- * limits come from the "Number of URB Entries" field in the the
+ * limits come from the "Number of URB Entries" field in the
* 3DSTATE_URB_VS command and friends.
*
* These fields are used to calculate the scratch space to allocate. The
/* Data starts at suboffet 3 in 32-bit units (12 bytes), so it is not
* 64-bit aligned and the current implementation fails to read the
- * data properly. Instead, when there is is a double input varying,
+ * data properly. Instead, when there is a double input varying,
* read it as vector of floats with twice the number of components.
*/
if (attr->type == BRW_REGISTER_TYPE_DF) {
break;
case nir_op_fmul:
- /* Only absorb a fmul into a ffma if the fmul is is only used in fadd
+ /* Only absorb a fmul into a ffma if the fmul is only used in fadd
* operations. This prevents us from being too aggressive with our
* fusing which can actually lead to more instructions.
*/
}
/* This is fairly arbitrary; the trade off is memory usage vs. extra overhead
- * from wrapping. On Gen7, 32768 should be enough for for 128 snapshots before
+ * from wrapping. On Gen7, 32768 should be enough for 128 snapshots before
* wrapping (since each is 256 bytes).
*/
#define BOOKEND_BO_SIZE_BYTES 32768
assert(xrb->pixmap);
- /* Install error handler for XGetImage() in case the the window
+ /* Install error handler for XGetImage() in case the window
* isn't mapped. If we fail we'll create a temporary XImage.
*/
mesaXErrorFlag = 0;
/**
- * Record a GL_INVALID_VALUE error when a invalid vertex attribute
+ * Record a GL_INVALID_VALUE error when an invalid vertex attribute
* index is found.
*/
static void
*/
GLint buffer[1];
- /* At this point a internalformat is valid if it is valid as a texture or
+ /* At this point an internalformat is valid if it is valid as a texture or
* as a renderbuffer format. The checks are different because those methods
* return different values when passing non supported internalformats */
if (_mesa_base_tex_format(ctx, internalformat) < 0 &&
/**
* Sometimes we treat GLfloats as GLints. On x86 systems, moving a float
- * as a int (thereby using integer registers instead of FP registers) is
+ * as an int (thereby using integer registers instead of FP registers) is
* a performance win. Typically, this can be done with ordinary casts.
* But with gcc's -fstrict-aliasing flag (which defaults to on in gcc 3.0)
* these casts generate warnings.
* glCreateTextures should throw errors if target = 0. This is not exposed to
* the rest of Mesa to encourage Mesa internals to use nameless textures,
* which do not require expensive hash lookups.
- * \param target either 0 or a a valid / error-checked texture target enum
+ * \param target either 0 or a valid / error-checked texture target enum
*/
static void
create_textures(struct gl_context *ctx, GLenum target,
ir->coordinate->accept(this);
/* Put our coords in a temp. We'll need to modify them for shadow,
- * projection, or LOD, so the only case we'd use it as is is if
+ * projection, or LOD, so the only case we'd use it as-is is if
* we're doing plain old texturing. Mesa IR optimization should
* handle cleaning up our mess in that case.
*/
/* there's lot of ways how to do this. We just use first few bits,
since we have no knowledge of sample positions here. When
app-supplied mask though is used too might need to be smarter.
- Also, there's a interface restriction here in theory it is
+ Also, there's an interface restriction here in theory it is
encouraged this mask not be the same at each pixel. */
sample_mask = (1 << nr_bits) - 1;
if (st->ctx->Multisample.SampleCoverageInvert)
/**
- * Allocate a renderbuffer for a an on-screen window (not a user-created
+ * Allocate a renderbuffer for an on-screen window (not a user-created
* renderbuffer). The window system code determines the format.
*/
struct gl_renderbuffer *
ir->coordinate->accept(this);
/* Put our coords in a temp. We'll need to modify them for shadow,
- * projection, or LOD, so the only case we'd use it as is is if
+ * projection, or LOD, so the only case we'd use it as-is is if
* we're doing plain old texturing. The optimization passes on
* glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
*/
*
* The bias of the y-coordinate depends on whether y-inversion takes place
* (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
- * drawing to an FBO (causes additional inversion), and whether the the pipe
+ * drawing to an FBO (causes additional inversion), and whether the pipe
* driver origin and the requested origin differ (the latter condition is
* stored in the 'invert' variable).
*
*
* The bias of the y-coordinate depends on whether y-inversion takes place
* (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
- * drawing to an FBO (causes additional inversion), and whether the the pipe
+ * drawing to an FBO (causes additional inversion), and whether the pipe
* driver origin and the requested origin differ (the latter condition is
* stored in the 'invert' variable).
*