* command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
* packet requires that the shader type bit be set, we must initialize all
* context registers needed for compute in this function. The registers
- * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
+ * initialized by the start_cs_cmd atom can be found in evergreen_state.c in the
* functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
* on the GPU family.
*/
int num_threads;
int num_stack_entries;
- /* since all required registers are initialised in the
+ /* since all required registers are initialized in the
* start_compute_cs_cmd atom, we can EMIT_EARLY here.
*/
r600_init_command_buffer(cb, 256);
* R_008E28_SQ_STATIC_THREAD_MGMT3
*/
- /* XXX: We may need to adjust the thread and stack resouce
+ /* XXX: We may need to adjust the thread and stack resource
* values for 3D/compute interop */
r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
unsigned id = 1;
unsigned i;
/* !!!
- * To avoid GPU lockup registers must be emited in a specific order
+ * To avoid GPU lockup registers must be emitted in a specific order
* (no kidding ...). The order below is important and have been
- * partialy infered from analyzing fglrx command stream.
+ * partially inferred from analyzing fglrx command stream.
*
* Don't reorder atom without carefully checking the effect (GPU lockup
* or piglit regression).
unsigned ar_chan;
unsigned ar_handling;
unsigned r6xx_nop_after_rel_dst;
- bool index_loaded[2];
- unsigned index_reg[2]; /* indexing register CF_INDEX_[01] */
+ bool index_loaded[2];
+ unsigned index_reg[2]; /* indexing register CF_INDEX_[01] */
unsigned debug_id;
struct r600_isa* isa;
};