+2007-10-14 Kazu Hirata <kazu@codesourcery.com>
+
+ * config/fixed-bit.c, config/i386/cpuid.h, config/i386/i386.c,
+ config/i386/i386.md, config/i386/sse.md, function.c, jump.c,
+ modulo-sched.c, ra-conflict.c, toplev.c, tree-eh.c, tree-sra.c,
+ tree-ssa-dse.c, tree-vect-analyze.c, tree-vect-patterns.c,
+ tree-vect-transform.c: Fix comment typos.
+ * doc/extend.texi: Fix a typo.
+
2007-10-13 David Edelsohn <edelsohn@gnu.org>
* config/rs6000/aix53.h: New file.
r = pos_a >> (FIXED_WIDTH - FBITS);
#endif
- /* Unsigned divide r by pos_b to quo_r. The remanider is in mod. */
+ /* Unsigned divide r by pos_b to quo_r. The remainder is in mod. */
quo_r = (UINT_C_TYPE)r / (UINT_C_TYPE)pos_b;
mod = (UINT_C_TYPE)r % (UINT_C_TYPE)pos_b;
quo_s = 0;
/* Return cpuid data for requested cpuid level, as found in returned
eax, ebx, ecx and edx registers. The function checks if cpuid is
supported and returns 1 for valid cpuid information or 0 for
- unsupported cpuid level. All pointers are requred to be non-null. */
+ unsupported cpuid level. All pointers are required to be non-null. */
static __inline int
__get_cpuid (unsigned int __level,
replacement is long decoded, so this split helps here as well. */
m_K6,
- /* X86_TUNE_USE_VECTOR_CONVERTS: Preffer vector packed SSE conversion
+ /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
from integer to FP. */
m_AMDFAM10,
};
#define PPERM_REV_INV 0x60 /* bit reverse & invert src */
#define PPERM_ZERO 0x80 /* all 0's */
#define PPERM_ONES 0xa0 /* all 1's */
-#define PPERM_SIGN 0xc0 /* propigate sign bit */
-#define PPERM_INV_SIGN 0xe0 /* invert & propigate sign */
+#define PPERM_SIGN 0xc0 /* propagate sign bit */
+#define PPERM_INV_SIGN 0xe0 /* invert & propagate sign */
#define PPERM_SRC1 0x00 /* use first source byte */
#define PPERM_SRC2 0x10 /* use second source byte */
/* Validate whether a SSE5 instruction is valid or not.
OPERANDS is the array of operands.
NUM is the number of operands.
- USES_OC0 is true if the instruction uses OC0 and provides 4 varients.
+ USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
NUM_MEMORY is the maximum number of memory operands to accept. */
bool ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int num_memory)
{
else if (num == 4 && num_memory == 2)
{
/* If there are two memory operations, we can load one of the memory ops
- into the destination register. This is for optimizating the
+ into the destination register. This is for optimizing the
multiply/add ops, which the combiner has optimized both the multiply
and the add insns to have a memory operation. We have to be careful
that the destination doesn't overlap with the inputs. */
(UNSPECV_PROLOGUE_USE 14)
])
-;; Constants to represent pcomtrue/pcomfalse varients
+;; Constants to represent pcomtrue/pcomfalse variants
(define_constants
[(PCOM_FALSE 0)
(PCOM_TRUE 1)
}
/* Offload operand of cvtsi2ss and cvtsi2sd into memory for
!TARGET_INTER_UNIT_CONVERSIONS
- It is neccesary for the patterns to not accept nonemmory operands
+ It is necessary for the patterns to not accept nonmemory operands
as we would optimize out later. */
else if (!TARGET_INTER_UNIT_CONVERSIONS
&& TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; SSE5 parallel integer mutliply/add instructions for the intrinisics
+;; SSE5 parallel integer multiply/add instructions for the intrinisics
(define_insn "sse5_pmacsswd"
[(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(ss_plus:V4SI
v8hi __builtin_ia32_pshlw (v8hi, v8hi)
@end smallexample
-The following builtin-in functions are avaialble when @option{-msse5}
+The following builtin-in functions are available when @option{-msse5}
is used. The second argument must be an integer constant and generate
the machine instruction that is part of the name with the @samp{_imm}
suffix removed.
asm ("" : "=r" (output), "=m" (input) : "0" (input))
- Here 'input' is used in two occurences as input (once for the
+ Here 'input' is used in two occurrences as input (once for the
input operand, once for the address in the second output operand).
If we would replace only the occurence of the input operand (to
make the matching) we would be left with this:
value, but different pseudos) where we formerly had only one.
With more complicated asms this might lead to reload failures
which wouldn't have happen without this pass. So, iterate over
- all operands and replace all occurences of the register used. */
+ all operands and replace all occurrences of the register used. */
for (j = 0; j < noutputs; j++)
if (!rtx_equal_p (SET_DEST (p_sets[j]), input)
&& reg_overlap_mentioned_p (input, SET_DEST (p_sets[j])))
(insn != NULL && x == PATTERN (insn) && JUMP_P (insn)));
}
-/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurrs
+/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurs
within a (MEM ...). IS_TARGET is TRUE when X is to be treated as a
jump-target; when the JUMP_LABEL field of INSN should be set or a
REG_LABEL_TARGET note should be added, not a REG_LABEL_OPERAND
/* Given U_NODE which is the node that failed to be scheduled; LOW and
UP which are the boundaries of it's scheduling window; compute using
- SCHED_NODES and II a row in the partial schedule that can be splitted
+ SCHED_NODES and II a row in the partial schedule that can be split
which will separate a critical predecessor from a critical successor
thereby expanding the window, and return it. */
static int
}
/* Early clobbers, by definition, need to not only
- clobber the registers that are live accross the insn
+ clobber the registers that are live across the insn
but need to clobber the registers that die within the
insn. The clobbering for registers live across the
insn is handled above. */
void
target_reinit (void)
{
- /* Reinitialise RTL backend. */
+ /* Reinitialize RTL backend. */
backend_init_target ();
/* Reinitialize lang-dependent parts. */
}
/* Perform EH refactoring optimizations that are simpler to do when code
- flow has been lowered but EH structurs haven't. */
+ flow has been lowered but EH structures haven't. */
static void
refactor_eh_r (tree t)
};
/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
- expression (refereced as BF below) accesses any of the bits in FLD,
+ expression (referenced as BF below) accesses any of the bits in FLD,
false if it doesn't. If DATA is non-null, its field_len and
field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
field_len, field_pos> (referenced as BFLD below) represents the
bitmap_ior_into (variables_loaded,
LOADED_SYMS (bsi_stmt (bsi)));
- /* Look for statements writting into the write only variables.
+ /* Look for statements writing into the write only variables.
And try to remove them. */
FOR_EACH_BB (bb)
/* Analyze the access pattern of the data-reference DR.
- In case of non-consecutive accesse call vect_analyze_group_access() to
+ In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of strided accesses. */
static bool
stmts that constitute the pattern. In this case it will be:
WIDEN_SUM <x_t, sum_0>
- Note: The widneing-sum idiom is a widening reduction pattern that is
+ Note: The widening-sum idiom is a widening reduction pattern that is
vectorized without preserving all the intermediate results. It
produces only N/2 (widened) results (by summing up pairs of
intermediate results) rather than all N results. Therefore, we
}
-/* Get vectorized defintions from SLP_NODE that contains corresponding
+/* Get vectorized definitions from SLP_NODE that contains corresponding
vectorized def-stmts. */
static void