/* Disable a limit on the depth of recursion in mangled strings.
Note if this limit is disabled then stack exhaustion is possible when
demangling pathologically complicated strings. Bug reports about stack
- exhaustion when the option is enabled will be rejected. */
-#define DMGL_NO_RECURSE_LIMIT (1 << 18)
+ exhaustion when the option is enabled will be rejected. */
+#define DMGL_NO_RECURSE_LIMIT (1 << 18)
/* If DMGL_NO_RECURSE_LIMIT is not enabled, then this is the value used as
the maximum depth of recursion allowed. It should be enough for any
real-world mangled name. */
#define DEMANGLE_RECURSION_LIMIT 2048
-
+
/* Enumeration of possible demangling styles.
Lucid and ARM styles are still kept logically distinct, even though
Contributed by Mark Mitchell (mark@markmitchell.com).
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
Contributed by Daniel Berlin (dan@cgsoftware.com).
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
Amortized and real worst case time for operations:
ExtractMin: O(lg n) amortized. O(n) worst case.
- DecreaseKey: O(1) amortized. O(lg n) worst case.
- Insert: O(2) amortized. O(1) actual.
+ DecreaseKey: O(1) amortized. O(lg n) worst case.
+ Insert: O(2) amortized. O(1) actual.
Union: O(1) amortized. O(1) actual. */
#ifndef _FIBHEAP_H_
-/* An expandable hash tables datatype.
+/* An expandable hash tables datatype.
Copyright (C) 1999-2024 Free Software Foundation, Inc.
Contributed by Vladimir Makarov (vmakarov@cygnus.com).
/* Cleanup function called whenever a live element is removed from
the hash table. */
typedef void (*htab_del) (void *);
-
+
/* Function called by htab_traverse for each live element. The first
arg is the slot of the element (which can be passed to htab_clear_slot
if desired), the second arg is the auxiliary pointer handed to
* following its memory access model. The actual placement may vary or migrate
* due to the system's NUMA policy and state, which is beyond the scope of
* HSA APIs.
- */
+ */
typedef struct hsa_amd_memory_pool_s {
/**
* Opaque handle.
HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALIGNMENT = 7,
/**
* This memory_pool can be made directly accessible by all the agents in the
- * system (::hsa_amd_agent_memory_pool_get_info does not return
+ * system (::hsa_amd_agent_memory_pool_get_info does not return
* ::HSA_AMD_MEMORY_POOL_ACCESS_NEVER_ALLOWED for any agent). The type of this
* attribute is bool.
*/
#ifdef __cplusplus
extern "C" {
-#endif /*__cplusplus*/
+#endif /*__cplusplus*/
/** \defgroup ext-images Images and Samplers
* @{
* @brief A fixed-size type used to represent ::hsa_ext_image_channel_type_t constants.
*/
typedef uint32_t hsa_ext_image_channel_type32_t;
-
+
/**
*
* @brief Channel order associated with the elements of an image. See
* @brief A fixed-size type used to represent ::hsa_ext_image_channel_order_t constants.
*/
typedef uint32_t hsa_ext_image_channel_order32_t;
-
+
/**
* @brief Image format.
* @brief A fixed-size type used to represent ::hsa_ext_sampler_coordinate_mode_t constants.
*/
typedef uint32_t hsa_ext_sampler_coordinate_mode32_t;
-
+
/**
* @brief Sampler filter modes. See the <em>Filter Mode</em> section
} hsa_ext_images_1_pfn_t;
/** @} */
-
+
#ifdef __cplusplus
} // end extern "C" block
-#endif /*__cplusplus*/
+#endif /*__cplusplus*/
#endif
/* Define the macros that actually get inserted in the caller's code. */
#define collector_sample(x) (collector_sample ? collector_sample(x), 0 : 0)
#define collector_pause() (collector_pause ? collector_pause(), 0 : 0)
-#define collector_resume() (collector_resume ? collector_resume(),0 : 0
+#define collector_resume() (collector_resume ? collector_resume(),0 : 0
#define collector_thread_pause(tid) \
(collector_thread_pause ? collector_thread_pause(tid), 0 : 0)
#define collector_thread_resume(tid) \
/* Function declarations for libiberty.
Copyright (C) 1997-2024 Free Software Foundation, Inc.
-
+
Note - certain prototypes declared in this header file are for
functions whoes implementation copyright does not belong to the
FSF. Those prototypes are present in this file for reference
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor,
Boston, MA 02110-1301, USA.
-
+
Written by Cygnus Support, 1994.
The libiberty library provides a number of functions which are
#if defined (__GNU_LIBRARY__ ) || defined (__linux__) \
|| defined (__FreeBSD__) || defined (__OpenBSD__) || defined (__NetBSD__) \
|| defined (__CYGWIN__) || defined (__CYGWIN32__) || defined (__MINGW32__) \
- || defined (__DragonFly__) || defined (HAVE_DECL_BASENAME)
+ || defined (__DragonFly__) || defined (HAVE_DECL_BASENAME)
extern char *basename (const char *) ATTRIBUTE_RETURNS_NONNULL ATTRIBUTE_NONNULL(1);
#else
/* Do not allow basename to be used if there is no prototype seen. We
#ifdef __MINGW32__
/* Forward declaration to avoid #include <sys/time.h>. */
struct timeval;
-extern int gettimeofday (struct timeval *, void *);
+extern int gettimeofday (struct timeval *, void *);
#endif
/* Get the amount of time the process has run, in microseconds. */
/* Capture stderr to a pipe. The output can be read by
calling pex_read_err and reading from the returned
FILE object. This flag may be specified only for
- the last program in a pipeline.
+ the last program in a pipeline.
This flag is supported only on Unix and Windows. */
#define PEX_STDERR_TO_PIPE 0x40
unsigned class_count;
};
-typedef struct partition_def
+typedef struct partition_def
{
/* The number of elements in this partition. */
int num_elements;
ld_plugin_get_input_section_contents tv_get_input_section_contents;
ld_plugin_update_section_order tv_update_section_order;
ld_plugin_allow_section_ordering tv_allow_section_ordering;
- ld_plugin_allow_unique_segment_for_sections tv_allow_unique_segment_for_sections;
+ ld_plugin_allow_unique_segment_for_sections tv_allow_unique_segment_for_sections;
ld_plugin_unique_segment_for_sections tv_unique_segment_for_sections;
ld_plugin_get_input_section_alignment tv_get_input_section_alignment;
ld_plugin_get_input_section_size tv_get_input_section_size;
Contributed by Mark Mitchell <mark@codesourcery.com>.
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
#endif /* SORT_H */
-
-
+
+
-/* A splay-tree datatype.
+/* A splay-tree datatype.
Copyright (C) 1998-2024 Free Software Foundation, Inc.
Contributed by Mark Mitchell (mark@markmitchell.com).
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
/* For an easily readable description of splay-trees, see:
Lewis, Harry R. and Denenberg, Larry. Data Structures and Their
- Algorithms. Harper-Collins, Inc. 1991.
+ Algorithms. Harper-Collins, Inc. 1991.
The major feature of splay trees is that all basic tree operations
are amortized O(log n) time for a tree with n nodes. */
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
bar", "XSTRING(foo)", to yield "bar". Be aware that this only
works for __STDC__, not for traditional C which will still resolve
to "foo". */
-#define XSTRING(s) STRINGX(s)
+#define XSTRING(s) STRINGX(s)
#endif /* SYM_CAT_H */
/* Copyright (C) 2013-2024 Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
modify it under the terms of the GNU Library General Public License
as published by the Free Software Foundation; either version 2, or
(at your option) any later version.
#ifdef BIG_PAGE_SIZE
/* TODO - Replace '4096' below with correct big page size. */
#define VTV_PAGE_SIZE 4096
-#else
+#else
#if defined(__sun__) && defined(__svr4__) && defined(__sparc__)
#define VTV_PAGE_SIZE 8192
#elif defined(__loongarch_lp64)
#ifndef _XREGEX_H
#define _XREGEX_H 1
-# define regfree xregfree
+# define regfree xregfree
# define regexec xregexec
# define regcomp xregcomp
# define regerror xregerror
(int) (str - base), base);
result = 1;
}
- else if (identifier_pos && result == 0x24
+ else if (identifier_pos && result == 0x24
&& CPP_OPTION (pfile, dollars_in_ident)
/* In C++26 when dollars are allowed in identifiers,
we should still reject \u0024 as $ is part of the basic
uchar * buf = (uchar *) alloca (len + 1);
uchar * bufp = buf;
size_t idp;
-
+
for (idp = 0; idp < len; idp++)
if (id[idp] != '\\')
*bufp++ = id[idp];
}
}
- return CPP_HASHNODE (ht_lookup (pfile->hash_table,
+ return CPP_HASHNODE (ht_lookup (pfile->hash_table,
buf, bufp - buf, HT_ALLOC));
}
\f
PFILE is only used to generate diagnostics; setting it to NULL suppresses
diagnostics, and causes a return of NULL if there was any error instead. */
-uchar *
+uchar *
_cpp_convert_input (cpp_reader *pfile, const char *input_charset,
uchar *input, size_t size, size_t len,
const unsigned char **buffer_start, off_t *st_size)
compiled with -save-temps, we recognize directives in
-fpreprocessed mode only if the # is in column 1. macro.cc
puts a space in front of any '#' at the start of a macro.
-
+
We exclude the -fdirectives-only case because macro expansion
has not been performed yet, and block comments can cause spaces
to precede the directive. */
/* Prevent #include recursion. */
if (pfile->line_table->depth >= CPP_OPTION (pfile, max_include_depth))
- cpp_error (pfile,
- CPP_DL_ERROR,
+ cpp_error (pfile,
+ CPP_DL_ERROR,
"%<#include%> nested depth %u exceeds maximum of %u"
" (use %<-fmax-include-depth=DEPTH%> to increase the maximum)",
pfile->line_table->depth,
entry->allow_expansion = allow_expansion;
entry->u.ident = ident;
}
-}
+}
/* Register the pragmas the preprocessor itself handles. */
void
pfile->directive = save_directive;
/* We always insert at least one token, the directive result. It'll
- either be a CPP_PADDING or a CPP_PRAGMA. In the later case, we
+ either be a CPP_PADDING or a CPP_PRAGMA. In the later case, we
need to insert *all* of the tokens, including the CPP_PRAGMA_EOL. */
/* If we're not handling the pragma internally, read all of the tokens from
const char *msgid, va_list *ap)
{
bool ret;
-
+
if (!pfile->cb.diagnostic)
abort ();
/* Don't override note locations, which will likely make the note
if ((result & CPP_N_WIDTH) == CPP_N_LARGE
&& CPP_OPTION (pfile, cpp_warn_long_long))
{
- const char *message = CPP_OPTION (pfile, cplusplus)
+ const char *message = CPP_OPTION (pfile, cplusplus)
? N_("use of C++11 long long integer constant")
: N_("use of C99 long long integer constant");
case CPP_OPEN_PAREN:
if (op != CPP_CLOSE_PAREN)
{
- cpp_error_with_line (pfile, CPP_DL_ERROR,
+ cpp_error_with_line (pfile, CPP_DL_ERROR,
top->token->src_loc,
0, "missing %<)%> in expression");
return 0;
/* The call to stat may have reset errno. */
errno = EACCES;
}
-#endif
+#endif
else if (errno == ENOTDIR)
errno = ENOENT;
{
return file2;
}
- else
+ else
{
free (file2);
return NULL;
}
else
{
- file->err_no = ENOENT;
+ file->err_no = ENOENT;
file->path = NULL;
}
{
/* An identifier. */
struct cpp_identifier GTY ((tag ("CPP_TOKEN_FLD_NODE"))) node;
-
+
/* Inherit padding from this token. */
cpp_token * GTY ((tag ("CPP_TOKEN_FLD_SOURCE"))) source;
normalized_none
};
-enum cpp_main_search
+enum cpp_main_search
{
CMS_none, /* A regular source file. */
CMS_header, /* Is a directly-specified header file (eg PCH or
/* Nonzero means we're looking at already preprocessed code, so don't
bother trying to do macro expansion and whatnot. */
unsigned char preprocessed;
-
+
/* Nonzero means we are going to emit debugging logs during
preprocessing. */
unsigned char debug;
/* Is this a user-supplied directory? */
bool user_supplied_p;
- /* The canonicalized NAME as determined by lrealpath. This field
+ /* The canonicalized NAME as determined by lrealpath. This field
is only used by hosts that lack reliable inode numbers. */
char *canonical_name;
/* Each macro definition is recorded in a cpp_macro structure.
Variadic macros cannot occur with traditional cpp. */
struct GTY(()) cpp_macro {
- union cpp_parm_u
+ union cpp_parm_u
{
/* Parameters, if any. If parameter names use extended identifiers,
the original spelling of those identifiers, not the canonical
return macro ? macro->line : 0;
}
/* Return an idempotent time stamp (possibly from SOURCE_DATE_EPOCH). */
-enum class CPP_time_kind
+enum class CPP_time_kind
{
FIXED = -1, /* Fixed time via source epoch. */
DYNAMIC = -2, /* Dynamic via time(2). */
The text is the same as the command line argument. */
extern void cpp_define (cpp_reader *, const char *);
extern void cpp_define_unused (cpp_reader *, const char *);
-extern void cpp_define_formatted (cpp_reader *pfile,
+extern void cpp_define_formatted (cpp_reader *pfile,
const char *fmt, ...) ATTRIBUTE_PRINTF_2;
extern void cpp_define_formatted_unused (cpp_reader *pfile,
const char *fmt,
/* An ordinary line map encodes physical source locations. Those
physical source locations are called "spelling locations".
-
+
Physical source file TO_FILE at line TO_LINE at column 0 is represented
by the logical START_LOCATION. TO_LINE+L at column C is represented by
START_LOCATION+(L*(1<<m_column_and_range_bits))+(C*1<<m_range_bits), as
/* A macro line map encodes location of tokens coming from a macro
expansion.
-
+
The offset from START_LOCATION is used to index into
MACRO_LOCATIONS; this holds the original location of the token. */
struct GTY((tag ("2"))) line_map_macro : public line_map {
hashnode *entries;
/* Call back, allocate a node. */
hashnode (*alloc_node) (cpp_hash_table *);
- /* Call back, allocate something that hangs off a node like a cpp_macro.
+ /* Call back, allocate something that hangs off a node like a cpp_macro.
NULL means use the usual allocator. */
void * (*alloc_subobject) (size_t);
/* Smash the string directly, it's dead at this point */
char *smashy = (char *)text;
smashy[len - 3] = 0;
-
+
pfile->cb.dir_change (pfile, smashy + 1);
}
cpp_hashnode *n__VA_OPT__; /* C++ vararg macros */
enum {M_EXPORT, M_MODULE, M_IMPORT, M__IMPORT, M_HWM};
-
+
/* C++20 modules, only set when module_directives is in effect.
incoming variants [0], outgoing ones [1] */
cpp_hashnode *n_modules[M_HWM][2];
It starts initialized to all zeros, and at the end
'level' is the normalization level of the sequence. */
-struct normalize_state
+struct normalize_state
{
/* The previous starter character. */
cppchar_t previous;
path below. Since this loop is very hot it's worth doing these kinds
of optimizations.
- One of the paths through the ifdefs should provide
+ One of the paths through the ifdefs should provide
const uchar *search_line_fast (const uchar *s, const uchar *end);
acc_char_cmp (word_type val, word_type c)
{
#if defined(__GNUC__) && defined(__alpha__)
- /* We can get exact results using a compare-bytes instruction.
+ /* We can get exact results using a compare-bytes instruction.
Get (val == c) via (0 >= (val ^ c)). */
return __builtin_alpha_cmpbge (0, val ^ c);
#else
}
/* A version of the fast scanner using bit fiddling techniques.
-
+
For 32-bit words, one would normally perform 16 comparisons and
16 branches. With this algorithm one performs 24 arithmetic
operations and one branch. Whether this is faster with a 32-bit
unsigned int misalign;
const word_type *p;
word_type val, t;
-
+
/* Align the buffer. Mask out any bytes from before the beginning. */
p = (word_type *)((uintptr_t)s & -sizeof(word_type));
val = *p;
typedef __attribute__((altivec(vector))) unsigned char vc;
const vc repl_nl = {
- '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
+ '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n'
};
const vc repl_cr = {
- '\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
+ '\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r'
};
const vc repl_bs = {
- '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
+ '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\'
};
const vc repl_qm = {
- '?', '?', '?', '?', '?', '?', '?', '?',
- '?', '?', '?', '?', '?', '?', '?', '?',
+ '?', '?', '?', '?', '?', '?', '?', '?',
+ '?', '?', '?', '?', '?', '?', '?', '?',
};
const vc zero = { 0 };
typedef __attribute__((altivec(vector))) unsigned char vc;
const vc repl_nl = {
- '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
+ '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n',
'\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n'
};
const vc repl_cr = {
- '\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
+ '\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r',
'\r', '\r', '\r', '\r', '\r', '\r', '\r', '\r'
};
const vc repl_bs = {
- '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
+ '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\',
'\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\'
};
const vc repl_qm = {
- '?', '?', '?', '?', '?', '?', '?', '?',
- '?', '?', '?', '?', '?', '?', '?', '?',
+ '?', '?', '?', '?', '?', '?', '?', '?',
+ '?', '?', '?', '?', '?', '?', '?', '?',
};
const vc ones = {
-1, -1, -1, -1, -1, -1, -1, -1,
l = vpadd_u8 (vget_low_u8 (t), vget_high_u8 (t));
m = vpaddl_u8 (l);
n = vpaddl_u16 (m);
-
- found = vget_lane_u32 ((uint32x2_t) vorr_u64 ((uint64x1_t) n,
+
+ found = vget_lane_u32 ((uint32x2_t) vorr_u64 ((uint64x1_t) n,
vshr_n_u64 ((uint64x1_t) n, 24)), 0);
found &= mask;
}
/* After parsing an identifier or other sequence, produce a warning about
sequences not in NFC/NFKC. */
static void
-warn_about_normalization (cpp_reader *pfile,
+warn_about_normalization (cpp_reader *pfile,
const cpp_token *token,
const struct normalize_state *s,
bool identifier)
++note;
lit_accum accum;
-
+
uchar prefix[17];
unsigned prefix_len = 0;
enum Phase
}
/* Append a comment to the end of the comment table. */
-static void
-store_comment (cpp_reader *pfile, cpp_token *token)
+static void
+store_comment (cpp_reader *pfile, cpp_token *token)
{
int len;
if (pfile->comments.allocated == 0)
{
- pfile->comments.allocated = 256;
+ pfile->comments.allocated = 256;
pfile->comments.entries = (cpp_comment *) xmalloc
(pfile->comments.allocated * sizeof (cpp_comment));
}
len = token->val.str.len;
/* Copy comment. Note, token may not be NULL terminated. */
- pfile->comments.entries[pfile->comments.count].comment =
+ pfile->comments.entries[pfile->comments.count].comment =
(char *) xmalloc (sizeof (char) * (len + 1));
memcpy (pfile->comments.entries[pfile->comments.count].comment,
token->val.str.text, len);
if (_cpp_defined_macro_p (node)
&& _cpp_maybe_notify_macro_use (pfile, node, tok->src_loc)
&& !cpp_fun_like_macro_p (node))
- cpp_error_with_line (pfile, CPP_DL_ERROR, tok->src_loc, 0,
+ cpp_error_with_line (pfile, CPP_DL_ERROR, tok->src_loc, 0,
"module control-line %qs cannot be"
" an object-like macro",
NODE_NAME (node));
/* A potential block or line comment. */
comment_start = buffer->cur;
c = *buffer->cur;
-
+
if (c == '*')
{
if (_cpp_skip_block_comment (pfile))
int ucn_len_c;
unsigned t;
unsigned long utf32;
-
+
/* Compute the length of the UTF-8 sequence. */
for (t = *name; t & 0x80; t <<= 1)
ucn_len++;
-
+
utf32 = *name & (0x7F >> ucn_len);
for (ucn_len_c = 1; ucn_len_c < ucn_len; ucn_len_c++)
{
utf32 = (utf32 << 6) | (*++name & 0x3F);
-
+
/* Ill-formed UTF-8. */
if ((*name & ~0x3F) != 0x80)
abort ();
}
-
+
*buffer++ = '\\';
*buffer++ = 'U';
for (j = 7; j >= 0; j--)
{
size_t i;
const unsigned char *name = NODE_NAME (ident);
-
+
for (i = 0; i < NODE_LEN (ident); i++)
if (name[i] & ~0x7F)
{
freed when the reader is destroyed. Useful for diagnostics. */
unsigned char *
cpp_token_as_text (cpp_reader *pfile, const cpp_token *token)
-{
+{
unsigned int len = cpp_token_len (token) + 1;
unsigned char *start = _cpp_unaligned_alloc (pfile, len), *end;
preprocessing tokens, or module followed by identifier, ':' or
';' preprocessing tokens. */
unsigned char p = *peek++;
-
+
/* A character literal is ... single quotes, ... optionally preceded
by u8, u, U, or L */
/* A string-literal is a ... double quotes, optionally prefixed by
goto next_line;
}
goto dflt;
-
+
case '#':
if (bol)
{
bad_string:
cpp_error_with_line (pfile, CPP_DL_ERROR, sloc, 0,
"unterminated literal");
-
+
done_string:
raw = false;
lwm = pos - 1;
{
unsigned num_maps_allocated = LINEMAPS_ALLOCATED (set, macro_p);
unsigned num_maps_used = LINEMAPS_USED (set, macro_p);
-
+
if (num > num_maps_allocated - num_maps_used)
{
/* We need more space! */
/* We are going to execute some dance to try to reduce the
overhead of the memory allocator, in case we are using the
ggc-page.cc one.
-
+
The actual size of memory we are going to get back from the
allocator may well be larger than what we ask for. Use this
hook to find what that size is. */
if (column >= (1u << (map->m_column_and_range_bits - map->m_range_bits)))
return loc;
- location_t r =
+ location_t r =
linemap_position_for_line_and_column (set, map, line, column);
if (linemap_assert_fails (r <= set->highest_location)
|| linemap_assert_fails (map == linemap_lookup (set, r)))
linemap_assert (token_no < MACRO_MAP_NUM_MACRO_TOKENS (map));
location = MACRO_MAP_LOCATIONS (map)[2 * token_no];
-
+
return location;
}
macro. Otherwise, return LOCATION. SET is the set of maps
location come from. ORIGINAL_MAP is an output parm. If non NULL,
the function sets *ORIGINAL_MAP to the ordinary (non-macro) map the
- returned location comes from.
+ returned location comes from.
This is a subroutine of linemap_resolve_location. */
}
}
-/*
+/*
Suppose that LOC is the virtual location of a token T coming from
the expansion of a macro M. This function then steps up to get the
location L of the point where M got expanded. If L is a spelling
location inside a macro expansion M', then this function returns
the locus of the point where M' was expanded. Said otherwise, this
function returns the location of T in the context that triggered
- the expansion of M.
+ the expansion of M.
*LOC_MAP must be set to the map of LOC. This function then sets it
to the map of the returned location. */
struct _cpp_file *file = cpp_get_file (pbuffer);
if (file)
{
- /* Generate __TIMESTAMP__ string, that represents
- the date and time of the last modification
- of the current source file. The string constant
+ /* Generate __TIMESTAMP__ string, that represents
+ the date and time of the last modification
+ of the current source file. The string constant
looks like "Sun Sep 16 01:03:52 1973". */
struct tm *tb = NULL;
struct stat *st = _cpp_get_file_stat (file);
{
cpp_errno (pfile, CPP_DL_WARNING,
"could not determine date and time");
-
+
pfile->date = UC"\"??? ?? ????\"";
pfile->time = UC"\"??:??:??\"";
}
sprintf ((char *) result, "%u", number);
}
- return result;
+ return result;
}
/* Get an idempotent date. Either the cached value, the value from
pfile->state.parsing_args = 2;
return collect_args (pfile, node, pragma_buff, num_args);
}
-
+
/* Back up. A CPP_EOF is either an EOF from an argument we're
expanding, or a fake one from lex_direct. We want to backup the
former, but not the latter. We may have skipped padding, in
case MACRO_ARG_TOKEN_NORMAL:
tokens_ptr = arg->first;
break;
- case MACRO_ARG_TOKEN_STRINGIFIED:
+ case MACRO_ARG_TOKEN_STRINGIFIED:
tokens_ptr = (const cpp_token **) &arg->stringified;
break;
case MACRO_ARG_TOKEN_EXPANDED:
want each tokens resulting from function-like macro arguments
expansion to have a different location or not.
- E.g, consider this function-like macro:
+ E.g, consider this function-like macro:
#define M(x) x - 3
Then consider us "calling" it (and thus expanding it) like:
-
+
M(1+4)
It will be expanded into:
location that records many things like the locus of the expansion
point as well as the original locus inside the definition of the
macro. This location is called a virtual location.
-
+
So the buffer BUFF holds a set of cpp_token*, and the buffer
VIRT_LOCS holds the virtual locations of the tokens held by BUFF.
context, when the latter is pushed. The memory allocated to
store the tokens and their locations is going to be freed once
the context of macro expansion is popped.
-
+
As far as tokens are concerned, the memory overhead of
-ftrack-macro-expansion is proportional to the number of
macros that get expanded multiplied by sizeof (location_t).
" in ISO C++98",
NODE_NAME (node), src->val.macro_arg.arg_no);
else if (CPP_OPTION (pfile, cpp_warn_c90_c99_compat))
- cpp_pedwarning (pfile,
+ cpp_pedwarning (pfile,
CPP_OPTION (pfile, cpp_warn_c90_c99_compat) > 0
? CPP_W_C90_C99_COMPAT : CPP_W_PEDANTIC,
"invoking macro %s argument %d: "
{
const cpp_token **result;
location_t *virt_loc_dest = NULL;
- unsigned token_index =
+ unsigned token_index =
(BUFF_FRONT (buffer) - buffer->base) / sizeof (cpp_token *);
/* Abort if we pass the end the buffer. */
if (pfile == NULL)
return false;
- return (pfile->about_to_expand_macro_p
+ return (pfile->about_to_expand_macro_p
|| macro_of_context (pfile->context));
}
*location = (*token)->src_loc;
FIRST (c).token++;
}
- else if ((c)->tokens_kind == TOKENS_KIND_INDIRECT)
+ else if ((c)->tokens_kind == TOKENS_KIND_INDIRECT)
{
*token = *FIRST (c).ptoken;
*location = (*token)->src_loc;
_cpp_extend_buff (pfile, &pfile->u_buff, len + 1 + dotme * 2);
unsigned char *buf = BUFF_FRONT (pfile->u_buff);
size_t pos = 0;
-
+
if (dotme)
{
buf[pos++] = '.';
tmp->type = CPP_HEADER_NAME;
XDELETEVEC (fname);
-
+
result = tmp;
}
}
= XRESIZEVEC (unsigned char, pfile->macro_buffer, len);
pfile->macro_buffer_len = len;
}
-
+
macro_arg_saved_data *saved = (macro_arg_saved_data *)pfile->macro_buffer;
saved[n].canonical_node = node;
saved[n].value = node->value;
{
FILE *f = fopen (fname, "r");
unsigned fl = 0;
-
+
if (!f)
fail ("opening ucnid.tab");
for (;;)
read_table (char *fname)
{
FILE * f = fopen (fname, "r");
-
+
if (!f)
fail ("opening UnicodeData.txt");
for (;;)
combining_value[codepoint] = strtoul (l, &l, 10);
if (*l++ != ';')
fail ("parsing UnicodeData.txt, junk after combining class");
-
+
/* Skip over bidi value. */
do {
l++;
} while (*l != ';');
-
+
/* Decomposition mapping. */
decomp_useful = flags[codepoint];
if (*++l == '<') /* Compatibility mapping. */
read_derived (const char *fname)
{
FILE * f = fopen (fname, "r");
-
+
if (!f)
fail ("opening DerivedNormalizationProps.txt");
for (;;)
maybe_not_NFC_p = (strstr (line, "; NFC_QC; M") != NULL);
if (! not_NFC_p && ! not_NFKC_p && ! maybe_not_NFC_p)
continue;
-
+
start = strtoul (line, &l, 16);
if (l == line)
fail ("parsing DerivedNormalizationProps.txt, reading start");
end = start;
while (start <= end)
- flags[start++] |= ((not_NFC_p ? not_NFC : 0)
+ flags[start++] |= ((not_NFC_p ? not_NFC : 0)
| (not_NFKC_p ? not_NFKC : 0)
| (maybe_not_NFC_p ? maybe_not_NFC : 0)
);
read_derivedcore (char *fname)
{
FILE * f = fopen (fname, "r");
-
+
if (!f)
fail ("opening DerivedCoreProperties.txt");
for (;;)
fail ("parsing DerivedCoreProperties.txt, reading code point");
if (codepoint_start > MAX_CODE_POINT)
fail ("parsing DerivedCoreProperties.txt, code point too large");
-
+
if (*l == '.' && l[1] == '.')
{
char *l2 = l + 2;
unsigned char last_combine = combining_value[0];
printf ("static const struct ucnrange ucnranges[] = {\n");
-
+
for (i = 1; i <= NUM_CODE_POINTS; i++)
if (i == NUM_CODE_POINTS
|| (flags[i] != last_flag && ((flags[i] | last_flag) & all_languages))
shall not be used in advertising or otherwise to promote the sale,\n\
use or other dealings in these Data Files or Software without prior\n\
written authorization of the copyright holder. */\n";
-
+
puts (copyright);
}
const char *cmi, bool is_header_unit, bool is_exported)
{
gcc_assert (!d->module_name);
-
+
d->module_name = xstrdup (m);
d->is_header_unit = is_header_unit;
d->is_exported = is_exported;
/* Some of these are #define on some systems, e.g. on AIX to redirect
the names to 64bit capable functions for LARGE_FILES support. These
redefs are pointless here so we can override them. */
-
-#undef fopen
-#undef freopen
+
+#undef fopen
+#undef freopen
#define fopen(PATH,MODE) fopen_unlocked(PATH,MODE)
#define fdopen(FILDES,MODE) fdopen_unlocked(FILDES,MODE)
unterminated = skip_macro_block_comment (pfile);
else
unterminated = _cpp_skip_block_comment (pfile);
-
+
if (unterminated)
cpp_error_with_line (pfile, CPP_DL_ERROR, src_loc, 0,
"unterminated comment");
}
else
len = macro->count;
-
+
return len;
}
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
the handler_data field from there. This field contains the offset
from FP at which the address of the currently installed handler is
to be found. */
-
+
PDSCDEF * pd = PV_FOR (fp);
/* Procedure descriptor pointer for the live subprogram with FP as the frame
pointer, and to which _gcc_shell_handler is attached as a condition
case PDSC$K_KIND_FP_STACK: /* [3.4.2 PD for stack frame procedures] */
handler_data_offset = 40;
break;
-
+
case PDSC$K_KIND_FP_REGISTER: /* [3.4.5 PD for reg frame procedures] */
handler_data_offset = 32;
break;
-
+
default:
handler_data_offset = 0;
break;
/* Otherwise, fetch the fp offset at which the real handler address is to be
found, then fetch and return the latter in turn. */
-
+
handler_slot_offset = REG_AT ((REG)pd + handler_data_offset);
return (ADDR) REG_AT (fp + handler_slot_offset);
return ret;
}
-
+
up if we're handed anything else. */
if (pkind != PDSC$K_KIND_FP_STACK && pkind != PDSC$K_KIND_FP_REGISTER)
return _URC_END_OF_STACK;
-
+
if (eh_debug)
printf ("FALLBACK: CTX FP = 0x%p, PV = 0x%p, EN = 0x%llx, RA = 0x%p\n",
ADDR_AT (context->reg[29]), pv, pv->pdsc$q_entry, context->ra);
status = LIB$GET_PREV_INVO_CONTEXT (&icb);
GIVEUP_ON_FAILURE (status);
- new_cfa = (ADDR) icb.libicb$q_ireg[30];
+ new_cfa = (ADDR) icb.libicb$q_ireg[30];
}
else
{
register value + frame size. Note that the frame base may differ
from CONTEXT->cfa, typically if the caller has performed dynamic
stack allocations. */
-
+
int base_reg = pv->pdsc$w_flags & PDSC$M_BASE_REG_IS_FP ? 29 : 30;
ADDR base_addr = ADDR_AT (context->reg[base_reg]);
-
+
new_cfa = base_addr + pv->pdsc$l_size;
}
for (i = 0, j = 0; i < 32; i++)
if ((1 << i) & pv->pdsc$l_ireg_mask)
UPDATE_FS_FOR_CFA_GR (fs, i, rsa_addr + 8 * ++j, new_cfa);
-
+
/* ??? floating point registers ? */
break;
fs->regs.how[RA_COLUMN] = REG_SAVED_REG;
fs->regs.reg[RA_COLUMN].loc.reg = pv->pdsc$b_save_ra;
-
+
fs->regs.how[29] = REG_SAVED_REG;
fs->regs.reg[29].loc.reg = pv->pdsc$b_save_fp;
-
+
break;
}
UPDATE_FS_FOR_CFA_GR (fs, 26, &mechargs->chf$q_mch_savr26, new_cfa);
UPDATE_FS_FOR_CFA_GR (fs, 27, &mechargs->chf$q_mch_savr27, new_cfa);
UPDATE_FS_FOR_CFA_GR (fs, 28, &mechargs->chf$q_mch_savr28, new_cfa);
-
+
/* Registers R2 to R7 are available from the rei frame pointer. */
-
+
for (i = 2; i <= 7; i ++)
UPDATE_FS_FOR_CFA_GR (fs, i, rei_frame_addr+(i - 2)*8, new_cfa);
-
+
/* ??? floating point registers ? */
}
If this result can't be rounded to the exact result with confidence, we
round to the value between the two closest representable values, and
test if the correctly rounded value is above or below this value.
-
+
Because of the Newton-raphson iteration step, an error in the seed at X
is amplified by X. Therefore, we don't want a Tchebycheff polynom
or a polynom that is close to optimal according to the maximum norm
If this result can't be rounded to the exact result with confidence, we
round to the value between the two closest representable values, and
test if the correctly rounded value is above or below this value.
-
+
Because of the Newton-raphson iteration step, an error in the seed at X
is amplified by X. Therefore, we don't want a Tchebycheff polynom
or a polynom that is close to optimal according to the maximum norm
= { (func_ptr) (-1) };
/* Run all the global destructors on exit from the program. */
-
+
/* Some systems place the number of pointers in the first word of the
table. On SVR4 however, that word is -1. In all cases, the table is
null-terminated. On SVR4, we start from the beginning of the list and
/* Configuration file for ARM BPABI targets, library renames.
Copyright (C) 2010-2024 Free Software Foundation, Inc.
- Contributed by CodeSourcery, LLC
+ Contributed by CodeSourcery, LLC
This file is part of GCC.
<http://www.gnu.org/licenses/>. */
extern long long __divdi3 (long long, long long);
-extern unsigned long long __udivdi3 (unsigned long long,
+extern unsigned long long __udivdi3 (unsigned long long,
unsigned long long);
extern long long __gnu_ldivmod_helper (long long, long long, long long *);
long long
-__gnu_ldivmod_helper (long long a,
- long long b,
+__gnu_ldivmod_helper (long long a,
+ long long b,
long long *remainder)
{
long long quotient;
__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int actual_oldval, fail;
-
+
while (1)
{
actual_oldval = *ptr;
return actual_oldval;
fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
-
+
if (__builtin_expect (!fail, 1))
return oldval;
}
/* ARM EABI compliant unwinding routines
Copyright (C) 2004-2024 Free Software Foundation, Inc.
Contributed by Paul Brook
-
+
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
_Unwind_VRS_Set (context, _UVRSC_CORE, R_SP, _UVRSD_UINT32, ®);
continue;
}
-
+
if ((op & 0xf0) == 0x80)
{
op = (op << 8) | next_unwind_byte (uws);
{
/* Pop r4-r[4+nnn], [lr]. */
_uw mask;
-
+
mask = (0xff0 >> (7 - (op & 7))) & 0xff0;
if (op & 8)
mask |= (1 << R_LR);
default:
return 0;
- }
+ }
}
/* ABI defined personality routine entry points. */
#define F7_(name) __f7_##name
#define F7P __f7_
-
+
/* Renames for libf7.c, libf7.h. */
-
+
#define f7_fabs __f7_fabs
#define f7_neg __f7_neg
#define f7_add __f7_add
#define f7_ne_impl __f7_ne_impl
#define f7_eq_impl __f7_eq_impl
#define f7_unord_impl __f7_unord_impl
-
+
/* Renames for libf7.c, libf7.h. */
-
+
#define f7_const_1 __f7_const_1
#define f7_const_1_P __f7_const_1_P
#define f7_const_2 __f7_const_2
#define f7_const_1_ln10_P __f7_const_1_ln10_P
#define f7_const_sqrt2 __f7_const_sqrt2
#define f7_const_sqrt2_P __f7_const_sqrt2_P
-
+
/* Renames for libf7-asm.sx, f7-wraps.h. */
-
+
#define f7_classify_asm __f7_classify_asm
#define f7_store_expo_asm __f7_store_expo_asm
#define f7_clr_asm __f7_clr_asm
/* C6X ABI compliant unwinding routines
Copyright (C) 2011-2024 Free Software Foundation, Inc.
-
+
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
}
static const int
-unwind_frame_regs[13] =
+unwind_frame_regs[13] =
{
R_A15, R_B15, R_B14, R_B13, R_B12, R_B11, R_B10, R_B3,
R_A14, R_A13, R_A12, R_A11, R_A10
_Unwind_VRS_Set (context, _UVRSC_CORE, R_B3, _UVRSD_UINT32, ®);
continue;
}
-
+
/* Reserved. */
return _URC_FAILURE;
}
_Unwind_GetRegionStart (_Unwind_Context *context)
{
_Unwind_Control_Block *ucbp;
-
+
ucbp = (_Unwind_Control_Block *) _Unwind_GetGR (context, UNWIND_POINTER_REG);
return (_Unwind_Ptr) ucbp->pr_cache.fnstart;
}
{
_Unwind_Control_Block *ucbp;
_uw *ptr;
-
+
ucbp = (_Unwind_Control_Block *) _Unwind_GetGR (context, UNWIND_POINTER_REG);
ptr = (_uw *) ucbp->pr_cache.ehtp;
/* Skip the personality routine address. */
static inline void *getTMCloneTable (const void *f, size_t *tmct_siz)
{
char *tmct_fixed, *tmct = NULL;
- unsigned int i, img_count;
+ unsigned int i, img_count;
struct mach_header *mh;
Dl_info info;
-
+
if (! dladdr (f, &info) || info.dli_fbase == NULL)
abort ();
-
+
mh = (struct mach_header *) info.dli_fbase;
tmct_fixed = GET_DATA_TMCT (mh, tmct_siz);
*tmct_siz /= (sizeof (size_t) * 2);
/* No tm_clone_table or no clones. */
if (tmct_fixed == NULL || *tmct_siz == 0)
- return NULL;
+ return NULL;
img_count = _dyld_image_count();
for (i = 0; i < img_count && tmct == NULL; i++)
{
if (mh == _dyld_get_image_header(i))
- tmct = tmct_fixed + (unsigned long)_dyld_get_image_vmaddr_slide(i);
+ tmct = tmct_fixed + (unsigned long)_dyld_get_image_vmaddr_slide(i);
}
return tmct;
/* This structure holds a routine to call. There may be extra fields
at the end of the structure that this code doesn't know about. */
-struct one_atexit_routine
+struct one_atexit_routine
{
union {
atexit_callback ac;
cxa_atexit_callback cac;
} callback;
- /* has_arg is 0/2/4 if 'ac' is live, 1/3/5 if 'cac' is live.
+ /* has_arg is 0/2/4 if 'ac' is live, 1/3/5 if 'cac' is live.
Higher numbers indicate a later version of the structure that this
code doesn't understand and will ignore. */
int has_arg;
fails to call routines registered while an atexit routine is
running. Return 1 if it works properly, and -1 if an error occurred. */
-struct atexit_data
+struct atexit_data
{
int result;
cxa_atexit_p cxa_atexit;
unsigned int (*dyld_image_count_fn)(void);
const char *(*dyld_get_image_name_fn)(unsigned int image_index);
const void *(*dyld_get_image_header_fn)(unsigned int image_index);
- const void *(*NSLookupSymbolInImage_fn)(const void *image,
+ const void *(*NSLookupSymbolInImage_fn)(const void *image,
const char *symbolName,
unsigned int options);
void *(*NSAddressOfSymbol_fn)(const void *symbol);
unsigned i, count;
-
+
/* Find some dyld functions. */
_dyld_func_lookup("__dyld_image_count", &dyld_image_count_fn);
_dyld_func_lookup("__dyld_get_image_name", &dyld_get_image_name_fn);
|| ! dyld_get_image_header_fn || ! NSLookupSymbolInImage_fn
|| ! NSAddressOfSymbol_fn)
return NULL;
-
+
count = dyld_image_count_fn ();
for (i = 0; i < count; i++)
{
const char * path = dyld_get_image_name_fn (i);
const void * image;
const void * symbol;
-
+
if (strcmp (path, "/usr/lib/libSystem.B.dylib") != 0)
continue;
image = dyld_get_image_header_fn (i);
}
#endif
-/* Create (if necessary), find, lock, fill in, and return our globals.
- Return NULL on error, in which case the globals will not be locked.
+/* Create (if necessary), find, lock, fill in, and return our globals.
+ Return NULL on error, in which case the globals will not be locked.
The caller should call keymgr_set_and_unlock. */
static struct keymgr_atexit_list *
get_globals (void)
{
struct keymgr_atexit_list * r;
-
+
#ifdef __ppc__
/* 10.3.9 doesn't have _keymgr_get_and_lock_processwide_ptr_2 so the
PPC side can't use it. On 10.4 this just means the error gets
return NULL;
r = rr;
#endif
-
+
if (r == NULL)
{
r = calloc (sizeof (struct keymgr_atexit_list), 1);
}
return r;
-
+
error:
_keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, r);
return NULL;
struct atexit_routine_list * s
= malloc (sizeof (struct atexit_routine_list));
int result;
-
+
if (!s)
{
_keymgr_set_and_unlock_processwide_ptr (KEYMGR_ATEXIT_LIST, g);
struct keymgr_atexit_list *g;
struct atexit_routine_list * base = NULL;
char prev_running = 0;
-
+
g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
if (g)
{
g = _keymgr_get_and_lock_processwide_ptr (KEYMGR_ATEXIT_LIST);
if (! g || g->version != 0 || g->atexit_status != atexit_status_missing)
return;
-
+
prev_running = g->running_routines;
g->running_routines = 1;
g = run_routines (g, NULL);
if (! g)
return -1;
-
+
if (g->running_routines || g->atexit_status == atexit_status_missing)
return add_routine (g, r);
/* These are the actual replacement routines; they just funnel into
atexit_common. */
-int __cxa_atexit (cxa_atexit_callback func, void* arg,
+int __cxa_atexit (cxa_atexit_callback func, void* arg,
const void* dso) __attribute__((visibility("hidden")));
int
/* Move double-word library function.
Copyright (C) 2000-2024 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
/* Move half-word library function.
Copyright (C) 2000-2024 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
/* Move word library function.
Copyright (C) 2000-2024 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
/* Frv initialization file linked before all user modules
Copyright (C) 1999-2024 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
/* Frv initialization file linked after all user modules
Copyright (C) 1999-2024 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
-
+
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
Copyright (C) 2023-2024 Free Software Foundation, Inc.
Contributed by Siemens.
-
+
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
/* Busy-wait until we have exclusive access to the state. Check if
another thread managed to perform the init call in the interim. */
-
+
while (!__TAS(&__guard->busy))
{
if (__guard->done)
union _FP_UNION_Q
{
__float128 flt;
- struct
+ struct
{
unsigned long frac0 : 32;
unsigned long frac1 : 32;
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
set_fast_math_sse (unsigned int edx)
{
unsigned int mxcsr;
-
+
if (edx & bit_FXSAVE)
{
/* Check if DAZ is available. */
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
#define IN_LIBGCC2
/* auto-host.h is needed by cygming.h for HAVE_GAS_WEAK and here
- for HAVE_LD_RO_RW_SECTION_MIXING. */
+ for HAVE_LD_RO_RW_SECTION_MIXING. */
#include "auto-host.h"
#include "tconfig.h"
#include "tsystem.h"
= { 0 };
#endif
-extern void __gcc_register_frame (void);
+extern void __gcc_register_frame (void);
extern void __gcc_deregister_frame (void);
#pragma GCC diagnostic push
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
some foreign code for which no unwind frame can be found. If this is
a call from the Windows signal handler, then:
- 2) We must get the signal context information.
+ 2) We must get the signal context information.
* With the standard exception filter:
#define MD_FALLBACK_FRAME_STATE_FOR i386_w32_fallback_frame_state
static _Unwind_Reason_Code
-i386_w32_fallback_frame_state (struct _Unwind_Context *context,
+i386_w32_fallback_frame_state (struct _Unwind_Context *context,
_Unwind_FrameState *fs)
{
PCONTEXT proc_ctx_;
long new_cfa_;
- if (SIG_SEH1)
+ if (SIG_SEH1)
proc_ctx_ = (PCONTEXT) (*(int*)(ctx_cfa_ + 56));
else if (SIG_SEH2)
proc_ctx_ = (PCONTEXT) (*(int*)(ctx_cfa_ + 8));
one of it's probes prior to the real SP adjustment. The only
operations of interest performed is "pushl %ecx", followed by
ecx clobbering. */
- else if (SIG_ALLOCA)
+ else if (SIG_ALLOCA)
{
- /* Only one push between entry in _alloca and the probe trap. */
+ /* Only one push between entry in _alloca and the probe trap. */
long new_cfa_ = (long) ctx_cfa_ + 4;
fs->regs.cfa_how = CFA_REG_OFFSET;
/* ENTRY is the unwind table entry found for a PC part of call chain we're
unwinding through. Return whether we should force the generic unwinder
to resort to "fallback" processing. */
-
+
static int
force_fallback_processing_for (void * pc, vms_unw_table_entry * entry)
{
return 0;
/* The OSSD block is found past the header, unwind descriptor area
- and condition handler pointer, if any. */
+ and condition handler pointer, if any. */
ossd = (ossddef *)
/* Beware: uqword pointer arithmetic below. */
(unw_info_block
/* "A General Information segment may be omitted if all of its fields
would have their default values. If a General Information segment
is present, it must be the first in the OSSD area." So ... */
-
+
if (eh_debug)
printf ("ossd @ 0x%p\n", ossd);
-
+
if (eh_debug && ossd->ossd$v_type == OSSD$K_GENERAL_INFO)
printf ("exc_frame = %d - bot_frame = %d - base_frame = %d\n",
- ossd->ossd$v_exception_frame,
+ ossd->ossd$v_exception_frame,
ossd->ossd$v_bottom_of_stack,
ossd->ossd$v_base_frame);
-
+
return
ossd->ossd$v_type == OSSD$K_GENERAL_INFO
&& (ossd->ossd$v_exception_frame
{
return (_U_Qfcmp (a, b, 4) == 0);
}
-
+
int
_U_Qfgt (long double a, long double b)
{
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
union _FP_UNION_Q
{
__float128 flt;
- struct
+ struct
{
unsigned long frac0 : 64;
unsigned long frac1 : 48;
UNW_WHERE_BR, /* register is saved in a branch register */
UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */
UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */
-
+
/* At the end of each prologue these locations get resolved to
UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively. */
UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */
struct unw_reg_state curr; /* current state */
_Unwind_Personality_Fn personality;
-
+
} _Unwind_FrameState;
enum unw_nat_type
{
off = sr->spill_offset;
alloc_spill_area (&off, 16, sr->curr.reg + UNW_REG_F2,
- sr->curr.reg + UNW_REG_F31);
+ sr->curr.reg + UNW_REG_F31);
alloc_spill_area (&off, 8, sr->curr.reg + UNW_REG_B1,
sr->curr.reg + UNW_REG_B5);
alloc_spill_area (&off, 8, sr->curr.reg + UNW_REG_R4,
sr->first_region = 0;
/* Check if we're done. */
- if (sr->when_target < sr->region_start + sr->region_len)
+ if (sr->when_target < sr->region_start + sr->region_len)
{
sr->done = 1;
return;
return 0;
if (qp > 0)
{
- if ((sr->pr_val & (1UL << qp)) == 0)
+ if ((sr->pr_val & (1UL << qp)) == 0)
return 0;
sr->pr_mask |= (1UL << qp);
}
* macros/constants before including this file:
*
* Types:
- * unw_word Unsigned integer type with at least 64 bits
+ * unw_word Unsigned integer type with at least 64 bits
*
* Register names:
* UNW_REG_BSP
designate "condition handlers" with very different arguments than what we
would be providing. Such cases are typically identified from OS specific
bits in the unwind information block header, and checked by the target
- MD_UNW_COMPATIBLE_PERSONALITY_P macro.
+ MD_UNW_COMPATIBLE_PERSONALITY_P macro.
We just pretend there is no personality from our standpoint in such
situations, and expect GCC not to set the identifying bits itself so that
For other frames the procedure is by definition non-leaf so the pfs
is saved and restored and thus effectively dead in the body; only
the cfm need therefore be restored.
-
+
Here we have 2 cases:
- either the pfs is saved and restored and thus effectively dead
like in regular frames; then we do nothing special and restore
/* Fill in CONTEXT for top-of-stack. The only valid registers at this
level will be the return address and the CFA. Note that CFA = SP+16. */
-
+
#define uw_init_context(CONTEXT) \
do { \
/* ??? There is a whole lot o code in uw_install_context that \
}
}
- /* The value in uc_bsp that we've computed is that for the
+ /* The value in uc_bsp that we've computed is that for the
target function. The value that we install below will be
adjusted by the BR.RET instruction based on the contents
of AR.PFS. So we must unadjust that here. */
";; \n\t"
"mov.m ar.rsc = r29 \n\t"
";; \n\t"
- /* This must be done before setting AR.BSPSTORE, otherwise
+ /* This must be done before setting AR.BSPSTORE, otherwise
AR.BSP will be initialized with a random displacement
below the value we want, based on the current number of
dirty stacked registers. */
/* Accessors to fields of an unwind info block header. In this common file to
be visible from all the units involved in a target implementation. */
-
+
#ifndef __USING_SJLJ_EXCEPTIONS__
#define UNW_VER(x) ((x) >> 48)
#define UNW_FLAG_MASK 0x0000ffff00000000
INVO_CONTEXT_BLK local_icb;
INVO_CONTEXT_BLK *icb = &local_icb;
-
+
CHFCTX * chfctx;
CHF$MECH_ARRAY * chfmech;
CHF64$SIGNAL_ARRAY *chfsig64;
const uint try_bs_copy_mask = (1 << 16);
eh_debug = EH_DEBUG ? atoi (EH_DEBUG) : 0;
-
+
/* Fetch and clear the try_bs_copy bit. */
try_bs_copy = (uint)eh_debug & try_bs_copy_mask;
eh_debug &= ~try_bs_copy_mask;
/* Beware: we might be unwinding through nested condition handlers, so the
dispatcher frame we seek might not be the first one on the way up. Loop
- thus. */
+ thus. */
do {
-
+
/* Seek the next dispatcher frame up the "current" point. Stop if we
either get past the target context or hit the bottom-of-stack along
the way. */
FAIL_IF (status == 0);
FAIL_IF ((uw_reg)icb->libicb$ih_sp > (uw_reg)context->psp
|| DENOTES_BOTTOM_OF_STACK (icb));
-
+
if (eh_debug)
printf ("frame%s sp @ 0x%llx, pc @ 0x%llx bsp=0x%llx\n",
DENOTES_VMS_DISPATCHER_FRAME (icb) ? " (dispatcher)" : "",
chfctx = icb->libicb$ph_chfctx_addr;
FAIL_IF (chfctx == 0);
-
+
chfmech = (CHF$MECH_ARRAY *)chfctx->chfctx$q_mcharglst;
FAIL_IF (chfmech == 0);
chfsig64 = (CHF64$SIGNAL_ARRAY *)chfmech->chf$ph_mch_sig64_addr;
FAIL_IF (chfsig64 == 0);
-
+
intstk = (INTSTK *)chfmech->chf$q_mch_esf_addr;
FAIL_IF (intstk == 0 || intstk->intstk$b_subtype == DYN$C_SSENTRY);
context->unat_loc = (uw_loc)&intstk->intstk$q_unat;
/* Branch register locations. */
-
+
{
uw_reg * ctxregs = (uw_reg *)&intstk->intstk$q_b0;
uw_reg q_bspstore = (uw_reg) intstk->intstk$q_bspstore;
uw_reg q_bspbase = (uw_reg) intstk->intstk$q_bspbase;
uw_reg ih_bspbase = (uw_reg) icb->libicb$ih_bspbase;
-
+
if (eh_debug)
printf ("q_bspstore = 0x%lx, q_bsp = 0x%lx, q_bspbase = 0x%lx\n"
"ih_bspbase = 0x%lx\n",
/* Not clear if these are the proper arguments here. This is what
looked the closest to what is performed in the Linux case. */
}
-
+
}
context->bsp = (uw_reg)intstk->intstk$q_bsp;
/* We're directly setting up the "context" for a VMS exception handler.
The "previous SP" for it is the SP upon the handler's entry, that is
- the SP at the condition/interruption/exception point. */
+ the SP at the condition/interruption/exception point. */
context->psp = (uw_reg)icb->libicb$ih_sp;
/* Previous Frame State location. What eventually ends up in pfs_loc is
return _URC_NO_REASON;
}
-
+
#include "bid_conf.h"
#include "bid_functions.h"
-#include "bid_gcc_intrinsics.h"
+#include "bid_gcc_intrinsics.h"
_Decimal128
__bid_addtd3 (_Decimal128 x, _Decimal128 y) {
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal32
__bid_truncddsd2 (_Decimal64 x) {
union decimal32 res;
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal128
__bid_extendddtd2 (_Decimal64 x) {
union decimal128 res;
__bid_eqdd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_equal (ux.i, uy.i);
__bid_eqtd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_equal (ux.i, uy.i);
__bid_gedd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_greater_equal (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_getd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_greater_equal (ux.i, uy.i);
__bid_gtdd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_greater (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_gttd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_greater (ux.i, uy.i);
__bid_ledd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_less_equal (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_letd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_less_equal (ux.i, uy.i);
__bid_ltdd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = -__bid64_quiet_less (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_lttd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = -__bid128_quiet_less (ux.i, uy.i);
_Decimal64
__bid_muldd3 (_Decimal64 x, _Decimal64 y) {
union decimal64 ux, uy, res;
-
+
ux.d = x;
uy.d = y;
res.i = __bid64_mul (ux.i, uy.i);
__bid_mulsd3 (_Decimal32 x, _Decimal32 y) {
UINT64 x64, y64, res64;
union decimal32 ux, uy, res;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_nedd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_not_equal (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_netd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_not_equal (ux.i, uy.i);
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal64
__bid_extendsddd2 (_Decimal32 x) {
union decimal64 res;
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal128
__bid_extendsdtd2 (_Decimal32 x) {
union decimal128 res;
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal64
__bid_trunctddd2 (_Decimal128 x) {
union decimal128 ux;
#include "bid_conf.h"
#include "bid_functions.h"
#include "bid_gcc_intrinsics.h"
-
+
_Decimal32
__bid_trunctdsd2 (_Decimal128 x) {
union decimal128 ux;
__bid_unorddd2 (_Decimal64 x, _Decimal64 y) {
CMPtype res;
union decimal64 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid64_quiet_unordered (ux.i, uy.i);
CMPtype res;
UINT64 x64, y64;
union decimal32 ux, uy;
-
+
ux.d = x;
uy.d = y;
x64 = __bid32_to_bid64 (ux.i);
__bid_unordtd2 (_Decimal128 x, _Decimal128 y) {
CMPtype res;
union decimal128 ux, uy;
-
+
ux.d = x;
uy.d = y;
res = __bid128_quiet_unordered (ux.i, uy.i);
#include "bid_internal.h"
-// the first entry of nr_digits[i - 1] (where 1 <= i <= 113), indicates
+// the first entry of nr_digits[i - 1] (where 1 <= i <= 113), indicates
// the number of decimal digits needed to represent a binary number with i bits;
// however, if a binary number of i bits may require either k or k + 1 decimal
// digits, then the first entry of nr_digits[i - 1] is 0; in this case if the
};
-// maskhigh128[] contains the mask to apply to the top 128 bits of the
+// maskhigh128[] contains the mask to apply to the top 128 bits of the
// 128x128-bit product in order to obtain the high bits of f2*
// the 64-bit word order is L, H
UINT64 maskhigh128[] = {
};
-// onehalf128[] contains the high bits of 1/2 positioned correctly for
+// onehalf128[] contains the high bits of 1/2 positioned correctly for
// comparison with the high bits of f2*
// the 64-bit word order is L, H
UINT64 onehalf128[] = {
UINT64 ten2mk64[] = {
0x199999999999999aULL, // 10^(-1) * 2^ 64
- 0x028f5c28f5c28f5dULL, // 10^(-2) * 2^ 64
- 0x004189374bc6a7f0ULL, // 10^(-3) * 2^ 64
- 0x00346dc5d638865aULL, // 10^(-4) * 2^ 67
- 0x0029f16b11c6d1e2ULL, // 10^(-5) * 2^ 70
- 0x00218def416bdb1bULL, // 10^(-6) * 2^ 73
- 0x0035afe535795e91ULL, // 10^(-7) * 2^ 77
- 0x002af31dc4611874ULL, // 10^(-8) * 2^ 80
- 0x00225c17d04dad2aULL, // 10^(-9) * 2^ 83
- 0x0036f9bfb3af7b76ULL, // 10^(-10) * 2^ 87
- 0x002bfaffc2f2c92bULL, // 10^(-11) * 2^ 90
- 0x00232f33025bd423ULL, // 10^(-12) * 2^ 93
- 0x00384b84d092ed04ULL, // 10^(-13) * 2^ 97
- 0x002d09370d425737ULL, // 10^(-14) * 2^100
- 0x0024075f3dceac2cULL, // 10^(-15) * 2^103
- 0x0039a5652fb11379ULL, // 10^(-16) * 2^107
+ 0x028f5c28f5c28f5dULL, // 10^(-2) * 2^ 64
+ 0x004189374bc6a7f0ULL, // 10^(-3) * 2^ 64
+ 0x00346dc5d638865aULL, // 10^(-4) * 2^ 67
+ 0x0029f16b11c6d1e2ULL, // 10^(-5) * 2^ 70
+ 0x00218def416bdb1bULL, // 10^(-6) * 2^ 73
+ 0x0035afe535795e91ULL, // 10^(-7) * 2^ 77
+ 0x002af31dc4611874ULL, // 10^(-8) * 2^ 80
+ 0x00225c17d04dad2aULL, // 10^(-9) * 2^ 83
+ 0x0036f9bfb3af7b76ULL, // 10^(-10) * 2^ 87
+ 0x002bfaffc2f2c92bULL, // 10^(-11) * 2^ 90
+ 0x00232f33025bd423ULL, // 10^(-12) * 2^ 93
+ 0x00384b84d092ed04ULL, // 10^(-13) * 2^ 97
+ 0x002d09370d425737ULL, // 10^(-14) * 2^100
+ 0x0024075f3dceac2cULL, // 10^(-15) * 2^103
+ 0x0039a5652fb11379ULL, // 10^(-16) * 2^107
};
-// ten2mk128trunc[] contains T*, the top Ex >= 128 bits of 10^(-k),
-// for 1 <= k <= 34
+// ten2mk128trunc[] contains T*, the top Ex >= 128 bits of 10^(-k),
+// for 1 <= k <= 34
// the 64-bit word order is L, H
UINT128 ten2mk128trunc[] = {
{{0x9999999999999999ULL, 0x1999999999999999ULL}}, // 10^(-1) * 2^128
- {{0x28f5c28f5c28f5c2ULL, 0x028f5c28f5c28f5cULL}}, // 10^(-2) * 2^128
- {{0x9db22d0e56041893ULL, 0x004189374bc6a7efULL}}, // 10^(-3) * 2^128
- {{0x4af4f0d844d013a9ULL, 0x00346dc5d6388659ULL}}, // 10^(-4) * 2^131
- {{0x08c3f3e0370cdc87ULL, 0x0029f16b11c6d1e1ULL}}, // 10^(-5) * 2^134
- {{0x6d698fe69270b06cULL, 0x00218def416bdb1aULL}}, // 10^(-6) * 2^137
- {{0xaf0f4ca41d811a46ULL, 0x0035afe535795e90ULL}}, // 10^(-7) * 2^141
- {{0xbf3f70834acdae9fULL, 0x002af31dc4611873ULL}}, // 10^(-8) * 2^144
- {{0x65cc5a02a23e254cULL, 0x00225c17d04dad29ULL}}, // 10^(-9) * 2^147
- {{0x6fad5cd10396a213ULL, 0x0036f9bfb3af7b75ULL}}, // 10^(-10) * 2^151
- {{0xbfbde3da69454e75ULL, 0x002bfaffc2f2c92aULL}}, // 10^(-11) * 2^154
- {{0x32fe4fe1edd10b91ULL, 0x00232f33025bd422ULL}}, // 10^(-12) * 2^157
- {{0x84ca19697c81ac1bULL, 0x00384b84d092ed03ULL}}, // 10^(-13) * 2^161
- {{0x03d4e1213067bce3ULL, 0x002d09370d425736ULL}}, // 10^(-14) * 2^164
- {{0x3643e74dc052fd82ULL, 0x0024075f3dceac2bULL}}, // 10^(-15) * 2^167
- {{0x56d30baf9a1e626aULL, 0x0039a5652fb11378ULL}}, // 10^(-16) * 2^171
- {{0x12426fbfae7eb521ULL, 0x002e1dea8c8da92dULL}}, // 10^(-17) * 2^174
- {{0x41cebfcc8b9890e7ULL, 0x0024e4bba3a48757ULL}}, // 10^(-18) * 2^177
- {{0x694acc7a78f41b0cULL, 0x003b07929f6da558ULL}}, // 10^(-19) * 2^181
- {{0xbaa23d2ec729af3dULL, 0x002f394219248446ULL}}, // 10^(-20) * 2^184
- {{0xfbb4fdbf05baf297ULL, 0x0025c768141d369eULL}}, // 10^(-21) * 2^187
- {{0x2c54c931a2c4b758ULL, 0x003c7240202ebdcbULL}}, // 10^(-22) * 2^191
- {{0x89dd6dc14f03c5e0ULL, 0x00305b66802564a2ULL}}, // 10^(-23) * 2^194
- {{0xd4b1249aa59c9e4dULL, 0x0026af8533511d4eULL}}, // 10^(-24) * 2^197
- {{0x544ea0f76f60fd48ULL, 0x003de5a1ebb4fbb1ULL}}, // 10^(-25) * 2^201
- {{0x76a54d92bf80caa0ULL, 0x00318481895d9627ULL}}, // 10^(-26) * 2^204
- {{0x921dd7a89933d54dULL, 0x00279d346de4781fULL}}, // 10^(-27) * 2^207
- {{0x8362f2a75b862214ULL, 0x003f61ed7ca0c032ULL}}, // 10^(-28) * 2^211
- {{0xcf825bb91604e810ULL, 0x0032b4bdfd4d668eULL}}, // 10^(-29) * 2^214
- {{0x0c684960de6a5340ULL, 0x00289097fdd7853fULL}}, // 10^(-30) * 2^217
- {{0x3d203ab3e521dc33ULL, 0x002073accb12d0ffULL}}, // 10^(-31) * 2^220
- {{0x2e99f7863b696052ULL, 0x0033ec47ab514e65ULL}}, // 10^(-32) * 2^224
- {{0x587b2c6b62bab375ULL, 0x002989d2ef743eb7ULL}}, // 10^(-33) * 2^227
- {{0xad2f56bc4efbc2c4ULL, 0x00213b0f25f69892ULL}}, // 10^(-34) * 2^230
+ {{0x28f5c28f5c28f5c2ULL, 0x028f5c28f5c28f5cULL}}, // 10^(-2) * 2^128
+ {{0x9db22d0e56041893ULL, 0x004189374bc6a7efULL}}, // 10^(-3) * 2^128
+ {{0x4af4f0d844d013a9ULL, 0x00346dc5d6388659ULL}}, // 10^(-4) * 2^131
+ {{0x08c3f3e0370cdc87ULL, 0x0029f16b11c6d1e1ULL}}, // 10^(-5) * 2^134
+ {{0x6d698fe69270b06cULL, 0x00218def416bdb1aULL}}, // 10^(-6) * 2^137
+ {{0xaf0f4ca41d811a46ULL, 0x0035afe535795e90ULL}}, // 10^(-7) * 2^141
+ {{0xbf3f70834acdae9fULL, 0x002af31dc4611873ULL}}, // 10^(-8) * 2^144
+ {{0x65cc5a02a23e254cULL, 0x00225c17d04dad29ULL}}, // 10^(-9) * 2^147
+ {{0x6fad5cd10396a213ULL, 0x0036f9bfb3af7b75ULL}}, // 10^(-10) * 2^151
+ {{0xbfbde3da69454e75ULL, 0x002bfaffc2f2c92aULL}}, // 10^(-11) * 2^154
+ {{0x32fe4fe1edd10b91ULL, 0x00232f33025bd422ULL}}, // 10^(-12) * 2^157
+ {{0x84ca19697c81ac1bULL, 0x00384b84d092ed03ULL}}, // 10^(-13) * 2^161
+ {{0x03d4e1213067bce3ULL, 0x002d09370d425736ULL}}, // 10^(-14) * 2^164
+ {{0x3643e74dc052fd82ULL, 0x0024075f3dceac2bULL}}, // 10^(-15) * 2^167
+ {{0x56d30baf9a1e626aULL, 0x0039a5652fb11378ULL}}, // 10^(-16) * 2^171
+ {{0x12426fbfae7eb521ULL, 0x002e1dea8c8da92dULL}}, // 10^(-17) * 2^174
+ {{0x41cebfcc8b9890e7ULL, 0x0024e4bba3a48757ULL}}, // 10^(-18) * 2^177
+ {{0x694acc7a78f41b0cULL, 0x003b07929f6da558ULL}}, // 10^(-19) * 2^181
+ {{0xbaa23d2ec729af3dULL, 0x002f394219248446ULL}}, // 10^(-20) * 2^184
+ {{0xfbb4fdbf05baf297ULL, 0x0025c768141d369eULL}}, // 10^(-21) * 2^187
+ {{0x2c54c931a2c4b758ULL, 0x003c7240202ebdcbULL}}, // 10^(-22) * 2^191
+ {{0x89dd6dc14f03c5e0ULL, 0x00305b66802564a2ULL}}, // 10^(-23) * 2^194
+ {{0xd4b1249aa59c9e4dULL, 0x0026af8533511d4eULL}}, // 10^(-24) * 2^197
+ {{0x544ea0f76f60fd48ULL, 0x003de5a1ebb4fbb1ULL}}, // 10^(-25) * 2^201
+ {{0x76a54d92bf80caa0ULL, 0x00318481895d9627ULL}}, // 10^(-26) * 2^204
+ {{0x921dd7a89933d54dULL, 0x00279d346de4781fULL}}, // 10^(-27) * 2^207
+ {{0x8362f2a75b862214ULL, 0x003f61ed7ca0c032ULL}}, // 10^(-28) * 2^211
+ {{0xcf825bb91604e810ULL, 0x0032b4bdfd4d668eULL}}, // 10^(-29) * 2^214
+ {{0x0c684960de6a5340ULL, 0x00289097fdd7853fULL}}, // 10^(-30) * 2^217
+ {{0x3d203ab3e521dc33ULL, 0x002073accb12d0ffULL}}, // 10^(-31) * 2^220
+ {{0x2e99f7863b696052ULL, 0x0033ec47ab514e65ULL}}, // 10^(-32) * 2^224
+ {{0x587b2c6b62bab375ULL, 0x002989d2ef743eb7ULL}}, // 10^(-33) * 2^227
+ {{0xad2f56bc4efbc2c4ULL, 0x00213b0f25f69892ULL}}, // 10^(-34) * 2^230
};
// ten2mk128M[k - 1] = 10^(-k) * 2^exp (k), where 1 <= k <= 4 and
};
// shiftright192M[] contains the right shift count to obtain C2* from the top
-// 192 bits of the 192x192-bit product C2 * Kx if 0 <= ind <= 14 where ind is
+// 192 bits of the 192x192-bit product C2 * Kx if 0 <= ind <= 14 where ind is
// the index in the table, or from the top 128 bits if 15 <= ind <= 18
int shiftright192M[] = {
16, // 208 - 192
};
// shiftright256M[] contains the right shift count to obtain C2* from the top
-// 192 bits of the 256x256-bit product C2 * Kx
+// 192 bits of the 256x256-bit product C2 * Kx
int shiftright256M[] = {
15, // 335 - 320
19, // 339 - 320
0x0000ffffffffffffULL // 48 = 368 - 320 bits
};
-// onehalf256M[] contains 1/2 positioned correctly for comparison with the
+// onehalf256M[] contains 1/2 positioned correctly for comparison with the
// high bits of f*; the high 128 bits of the 512-bit mask are 0
UINT64 onehalf256M[] = {
0x0000000000004000ULL, // 15 = 335 - 320 bits
UINT256 Kx256[] = {
{{0xcccccccccccccccdULL, 0xccccccccccccccccULL,
0xccccccccccccccccULL, 0xccccccccccccccccULL}},
- // 10^-1 ~= cccccccccccccccc cccccccccccccccc
+ // 10^-1 ~= cccccccccccccccc cccccccccccccccc
// cccccccccccccccccccccccccccccccd * 2^-259
{{0x70a3d70a3d70a3d8ULL, 0xd70a3d70a3d70a3dULL,
0x3d70a3d70a3d70a3ULL, 0xa3d70a3d70a3d70aULL}},
- // 10^-2 ~= a3d70a3d70a3d70a 3d70a3d70a3d70a3
+ // 10^-2 ~= a3d70a3d70a3d70a 3d70a3d70a3d70a3
// d70a3d70a3d70a3d70a3d70a3d70a3d8 * 2^-262
{{0xc083126e978d4fe0ULL, 0x78d4fdf3b645a1caULL,
0x645a1cac083126e9ULL, 0x83126e978d4fdf3bULL}},
- // 10^-3 ~= 83126e978d4fdf3b 645a1cac083126e9
+ // 10^-3 ~= 83126e978d4fdf3b 645a1cac083126e9
// 78d4fdf3b645a1cac083126e978d4fe0 * 2^-265
{{0x67381d7dbf487fccULL, 0xc154c985f06f6944ULL,
0xd3c36113404ea4a8ULL, 0xd1b71758e219652bULL}},
- // 10^-4 ~= d1b71758e219652b d3c36113404ea4a8
+ // 10^-4 ~= d1b71758e219652b d3c36113404ea4a8
// c154c985f06f694467381d7dbf487fcc * 2^-269
{{0x85c67dfe32a0663dULL, 0xcddd6e04c0592103ULL,
0x0fcf80dc33721d53ULL, 0xa7c5ac471b478423ULL}},
- // 10^-5 ~= a7c5ac471b478423 fcf80dc33721d53
+ // 10^-5 ~= a7c5ac471b478423 fcf80dc33721d53
// cddd6e04c059210385c67dfe32a0663d * 2^-272
{{0x37d1fe64f54d1e97ULL, 0xd7e45803cd141a69ULL,
0xa63f9a49c2c1b10fULL, 0x8637bd05af6c69b5ULL}},
- // 10^-6 ~= 8637bd05af6c69b5 a63f9a49c2c1b10f
+ // 10^-6 ~= 8637bd05af6c69b5 a63f9a49c2c1b10f
// d7e45803cd141a6937d1fe64f54d1e97 * 2^-275
{{0x8c8330a1887b6425ULL, 0x8ca08cd2e1b9c3dbULL,
0x3d32907604691b4cULL, 0xd6bf94d5e57a42bcULL}},
- // 10^-7 ~= d6bf94d5e57a42bc 3d32907604691b4c
+ // 10^-7 ~= d6bf94d5e57a42bc 3d32907604691b4c
// 8ca08cd2e1b9c3db8c8330a1887b6425 * 2^-279
{{0x7068f3b46d2f8351ULL, 0x3d4d3d758161697cULL,
0xfdc20d2b36ba7c3dULL, 0xabcc77118461cefcULL}},
- // 10^-8 ~= abcc77118461cefc fdc20d2b36ba7c3d
+ // 10^-8 ~= abcc77118461cefc fdc20d2b36ba7c3d
// 3d4d3d758161697c7068f3b46d2f8351 * 2^-282
{{0xf387295d242602a7ULL, 0xfdd7645e011abac9ULL,
0x31680a88f8953030ULL, 0x89705f4136b4a597ULL}},
- // 10^-9 ~= 89705f4136b4a597 31680a88f8953030
+ // 10^-9 ~= 89705f4136b4a597 31680a88f8953030
// fdd7645e011abac9f387295d242602a7 * 2^-285
{{0xb8d8422ea03cd10bULL, 0x2fbf06fcce912adcULL,
0xb573440e5a884d1bULL, 0xdbe6fecebdedd5beULL}},
- // 10^-10 ~= dbe6fecebdedd5be b573440e5a884d1b
+ // 10^-10 ~= dbe6fecebdedd5be b573440e5a884d1b
// 2fbf06fcce912adcb8d8422ea03cd10b * 2^-289
{{0x93e034f219ca40d6ULL, 0xf2ff38ca3eda88b0ULL,
0xf78f69a51539d748ULL, 0xafebff0bcb24aafeULL}},
- // 10^-11 ~= afebff0bcb24aafe f78f69a51539d748
+ // 10^-11 ~= afebff0bcb24aafe f78f69a51539d748
// f2ff38ca3eda88b093e034f219ca40d6 * 2^-292
{{0x4319c3f4e16e9a45ULL, 0xf598fa3b657ba08dULL,
0xf93f87b7442e45d3ULL, 0x8cbccc096f5088cbULL}},
- // 10^-12 ~= 8cbccc096f5088cb f93f87b7442e45d3
+ // 10^-12 ~= 8cbccc096f5088cb f93f87b7442e45d3
// f598fa3b657ba08d4319c3f4e16e9a45 * 2^-295
{{0x04f606549be42a07ULL, 0x88f4c3923bf900e2ULL,
0x2865a5f206b06fb9ULL, 0xe12e13424bb40e13ULL}},
- // 10^-13 ~= e12e13424bb40e13 2865a5f206b06fb9
+ // 10^-13 ~= e12e13424bb40e13 2865a5f206b06fb9
// 88f4c3923bf900e204f606549be42a07 * 2^-299
{{0x03f805107cb68806ULL, 0x6d909c74fcc733e8ULL,
0x538484c19ef38c94ULL, 0xb424dc35095cd80fULL}},
- // 10^-14 ~= b424dc35095cd80f 538484c19ef38c94
+ // 10^-14 ~= b424dc35095cd80f 538484c19ef38c94
// 6d909c74fcc733e803f805107cb68806 * 2^-302
{{0x3660040d3092066bULL, 0x57a6e390ca38f653ULL,
0x0f9d37014bf60a10ULL, 0x901d7cf73ab0acd9ULL}},
- // 10^-15 ~= 901d7cf73ab0acd9 f9d37014bf60a10
+ // 10^-15 ~= 901d7cf73ab0acd9 f9d37014bf60a10
// 57a6e390ca38f6533660040d3092066b * 2^-305
{{0x23ccd3484db670abULL, 0xbf716c1add27f085ULL,
0x4c2ebe687989a9b3ULL, 0xe69594bec44de15bULL}},
- // 10^-16 ~= e69594bec44de15b 4c2ebe687989a9b3
+ // 10^-16 ~= e69594bec44de15b 4c2ebe687989a9b3
// bf716c1add27f08523ccd3484db670ab * 2^-309
{{0x4fd70f6d0af85a23ULL, 0xff8df0157db98d37ULL,
0x09befeb9fad487c2ULL, 0xb877aa3236a4b449ULL}},
- // 10^-17 ~= b877aa3236a4b449 9befeb9fad487c2
+ // 10^-17 ~= b877aa3236a4b449 9befeb9fad487c2
// ff8df0157db98d374fd70f6d0af85a23 * 2^-312
{{0x0cac0c573bf9e1b6ULL, 0x32d7f344649470f9ULL,
0x3aff322e62439fcfULL, 0x9392ee8e921d5d07ULL}},
- // 10^-18 ~= 9392ee8e921d5d07 3aff322e62439fcf
+ // 10^-18 ~= 9392ee8e921d5d07 3aff322e62439fcf
// 32d7f344649470f90cac0c573bf9e1b6 * 2^-315
{{0xe11346f1f98fcf89ULL, 0x1e2652070753e7f4ULL,
0x2b31e9e3d06c32e5ULL, 0xec1e4a7db69561a5ULL}},
- // 10^-19 ~= ec1e4a7db69561a5 2b31e9e3d06c32e5
+ // 10^-19 ~= ec1e4a7db69561a5 2b31e9e3d06c32e5
// 1e2652070753e7f4e11346f1f98fcf89 * 2^-319
{{0x4da9058e613fd93aULL, 0x181ea8059f76532aULL,
0x88f4bb1ca6bcf584ULL, 0xbce5086492111aeaULL}},
- // 10^-20 ~= bce5086492111aea 88f4bb1ca6bcf584
+ // 10^-20 ~= bce5086492111aea 88f4bb1ca6bcf584
// 181ea8059f76532a4da9058e613fd93a * 2^-322
{{0xa48737a51a997a95ULL, 0x467eecd14c5ea8eeULL,
0xd3f6fc16ebca5e03ULL, 0x971da05074da7beeULL}},
- // 10^-21 ~= 971da05074da7bee d3f6fc16ebca5e03
+ // 10^-21 ~= 971da05074da7bee d3f6fc16ebca5e03
// 467eecd14c5ea8eea48737a51a997a95 * 2^-325
{{0x3a71f2a1c428c421ULL, 0x70cb148213caa7e4ULL,
0x5324c68b12dd6338ULL, 0xf1c90080baf72cb1ULL}},
- // 10^-22 ~= f1c90080baf72cb1 5324c68b12dd6338
+ // 10^-22 ~= f1c90080baf72cb1 5324c68b12dd6338
// 70cb148213caa7e43a71f2a1c428c421 * 2^-329
{{0x2ec18ee7d0209ce8ULL, 0x8d6f439b43088650ULL,
0x75b7053c0f178293ULL, 0xc16d9a0095928a27ULL}},
- // 10^-23 ~= c16d9a0095928a27 75b7053c0f178293
+ // 10^-23 ~= c16d9a0095928a27 75b7053c0f178293
// 8d6f439b430886502ec18ee7d0209ce8 * 2^-332
{{0xf23472530ce6e3edULL, 0xd78c3615cf3a050cULL,
0xc4926a9672793542ULL, 0x9abe14cd44753b52ULL}},
- // 10^-24 ~= 9abe14cd44753b52 c4926a9672793542
+ // 10^-24 ~= 9abe14cd44753b52 c4926a9672793542
// d78c3615cf3a050cf23472530ce6e3ed * 2^-335
{{0xe9ed83b814a49fe1ULL, 0x8c1389bc7ec33b47ULL,
0x3a83ddbd83f52204ULL, 0xf79687aed3eec551ULL}},
- // 10^-25 ~= f79687aed3eec551 3a83ddbd83f52204
+ // 10^-25 ~= f79687aed3eec551 3a83ddbd83f52204
// 8c1389bc7ec33b47e9ed83b814a49fe1 * 2^-339
{{0x87f1362cdd507fe7ULL, 0x3cdc6e306568fc39ULL,
0x95364afe032a819dULL, 0xc612062576589ddaULL}},
- // 10^-26 ~= c612062576589dda 95364afe032a819d
+ // 10^-26 ~= c612062576589dda 95364afe032a819d
// 3cdc6e306568fc3987f1362cdd507fe7 * 2^-342
{{0x9ff42b5717739986ULL, 0xca49f1c05120c9c7ULL,
0x775ea264cf55347dULL, 0x9e74d1b791e07e48ULL}},
- // 10^-27 ~= 9e74d1b791e07e48 775ea264cf55347d
+ // 10^-27 ~= 9e74d1b791e07e48 775ea264cf55347d
// ca49f1c05120c9c79ff42b5717739986 * 2^-345
{{0xccb9def1bf1f5c09ULL, 0x76dcb60081ce0fa5ULL,
0x8bca9d6e188853fcULL, 0xfd87b5f28300ca0dULL}},
- // 10^-28 ~= fd87b5f28300ca0d 8bca9d6e188853fc
+ // 10^-28 ~= fd87b5f28300ca0d 8bca9d6e188853fc
// 76dcb60081ce0fa5ccb9def1bf1f5c09 * 2^-349
{{0xa3c7e58e327f7cd4ULL, 0x5f16f80067d80c84ULL,
0x096ee45813a04330ULL, 0xcad2f7f5359a3b3eULL}},
- // 10^-29 ~= cad2f7f5359a3b3e 96ee45813a04330
+ // 10^-29 ~= cad2f7f5359a3b3e 96ee45813a04330
// 5f16f80067d80c84a3c7e58e327f7cd4 * 2^-352
{{0xb6398471c1ff9710ULL, 0x18df2ccd1fe00a03ULL,
0xa1258379a94d028dULL, 0xa2425ff75e14fc31ULL}},
- // 10^-30 ~= a2425ff75e14fc31 a1258379a94d028d
+ // 10^-30 ~= a2425ff75e14fc31 a1258379a94d028d
// 18df2ccd1fe00a03b6398471c1ff9710 * 2^-355
{{0xf82e038e34cc78daULL, 0x4718f0a419800802ULL,
0x80eacf948770ced7ULL, 0x81ceb32c4b43fcf4ULL}},
- // 10^-31 ~= 81ceb32c4b43fcf4 80eacf948770ced7
+ // 10^-31 ~= 81ceb32c4b43fcf4 80eacf948770ced7
// 4718f0a419800802f82e038e34cc78da * 2^-358
{{0x59e338e387ad8e29ULL, 0x0b5b1aa028ccd99eULL,
0x67de18eda5814af2ULL, 0xcfb11ead453994baULL}},
- // 10^-32 ~= cfb11ead453994ba 67de18eda5814af2
+ // 10^-32 ~= cfb11ead453994ba 67de18eda5814af2
// b5b1aa028ccd99e59e338e387ad8e29 * 2^-362
{{0x47e8fa4f9fbe0b54ULL, 0x6f7c154ced70ae18ULL,
0xecb1ad8aeacdd58eULL, 0xa6274bbdd0fadd61ULL}},
- // 10^-33 ~= a6274bbdd0fadd61 ecb1ad8aeacdd58e
+ // 10^-33 ~= a6274bbdd0fadd61 ecb1ad8aeacdd58e
// 6f7c154ced70ae1847e8fa4f9fbe0b54 * 2^-365
{{0xd320c83fb2fe6f76ULL, 0xbf967770bdf3be79ULL,
0xbd5af13bef0b113eULL, 0x84ec3c97da624ab4ULL}},
- // 10^-34 ~= 84ec3c97da624ab4 bd5af13bef0b113e
+ // 10^-34 ~= 84ec3c97da624ab4 bd5af13bef0b113e
// bf967770bdf3be79d320c83fb2fe6f76 * 2^-368
{{0x85014065eb30b257ULL, 0x65bd8be79652ca5cULL,
0x955e4ec64b44e864ULL, 0xd4ad2dbfc3d07787ULL}},
- // 10^-35 ~= d4ad2dbfc3d07787 955e4ec64b44e864
+ // 10^-35 ~= d4ad2dbfc3d07787 955e4ec64b44e864
// 65bd8be79652ca5c85014065eb30b257 * 2^-372
{{0xd0cdcd1e55c08eacULL, 0xeafe098611dbd516ULL,
0xdde50bd1d5d0b9e9ULL, 0xaa242499697392d2ULL}},
- // 10^-36 ~= aa242499697392d2 dde50bd1d5d0b9e9
+ // 10^-36 ~= aa242499697392d2 dde50bd1d5d0b9e9
// eafe098611dbd516d0cdcd1e55c08eac * 2^-375
{{0x40a4a418449a0bbdULL, 0xbbfe6e04db164412ULL,
0x7e50d64177da2e54ULL, 0x881cea14545c7575ULL}},
- // 10^-37 ~= 881cea14545c7575 7e50d64177da2e54
+ // 10^-37 ~= 881cea14545c7575 7e50d64177da2e54
// bbfe6e04db16441240a4a418449a0bbd * 2^-378
{{0x9aa1068d3a9012c8ULL, 0x2cca49a15e8a0683ULL,
0x96e7bd358c904a21ULL, 0xd9c7dced53c72255ULL}},
- // 10^-38 ~= d9c7dced53c72255 96e7bd358c904a21
+ // 10^-38 ~= d9c7dced53c72255 96e7bd358c904a21
// 2cca49a15e8a06839aa1068d3a9012c8 * 2^-382
{{0x154d9ed7620cdbd3ULL, 0x8a3b6e1ab2080536ULL,
0xabec975e0a0d081aULL, 0xae397d8aa96c1b77ULL}},
- // 10^-39 ~= ae397d8aa96c1b77 abec975e0a0d081a
+ // 10^-39 ~= ae397d8aa96c1b77 abec975e0a0d081a
// 8a3b6e1ab2080536154d9ed7620cdbd3 * 2^-385
{{0x443e18ac4e70afdcULL, 0x3b62be7bc1a0042bULL,
0x2323ac4b3b3da015ULL, 0x8b61313bbabce2c6ULL}},
- // 10^-40 ~= 8b61313bbabce2c6 2323ac4b3b3da015
+ // 10^-40 ~= 8b61313bbabce2c6 2323ac4b3b3da015
// 3b62be7bc1a0042b443e18ac4e70afdc * 2^-388
{{0x6d30277a171ab2f9ULL, 0x5f0463f935ccd378ULL,
0x6b6c46dec52f6688ULL, 0xdf01e85f912e37a3ULL}},
- // 10^-41 ~= df01e85f912e37a3 6b6c46dec52f6688
+ // 10^-41 ~= df01e85f912e37a3 6b6c46dec52f6688
// 5f0463f935ccd3786d30277a171ab2f9 * 2^-392
{{0x8a8cec61ac155bfbULL, 0x7f36b660f7d70f93ULL,
0x55f038b237591ed3ULL, 0xb267ed1940f1c61cULL}},
- // 10^-42 ~= b267ed1940f1c61c 55f038b237591ed3
+ // 10^-42 ~= b267ed1940f1c61c 55f038b237591ed3
// 7f36b660f7d70f938a8cec61ac155bfb * 2^-395
{{0x3ba3f04e23444996ULL, 0xcc2bc51a5fdf3fa9ULL,
0x77f3608e92adb242ULL, 0x8eb98a7a9a5b04e3ULL}},
- // 10^-43 ~= 8eb98a7a9a5b04e3 77f3608e92adb242
+ // 10^-43 ~= 8eb98a7a9a5b04e3 77f3608e92adb242
// cc2bc51a5fdf3fa93ba3f04e23444996 * 2^-398
{{0xf9064d49d206dc22ULL, 0xe046082a32fecc41ULL,
0x8cb89a7db77c506aULL, 0xe45c10c42a2b3b05ULL}},
- // 10^-44 ~= e45c10c42a2b3b05 8cb89a7db77c506a
+ // 10^-44 ~= e45c10c42a2b3b05 8cb89a7db77c506a
// e046082a32fecc41f9064d49d206dc22 * 2^-402
{{0xfa6b7107db38b01bULL, 0x4d04d354f598a367ULL,
0x3d607b97c5fd0d22ULL, 0xb6b00d69bb55c8d1ULL}},
- // 10^-45 ~= b6b00d69bb55c8d1 3d607b97c5fd0d22
+ // 10^-45 ~= b6b00d69bb55c8d1 3d607b97c5fd0d22
// 4d04d354f598a367fa6b7107db38b01b * 2^-405
{{0xfb8927397c2d59b0ULL, 0x3d9d75dd9146e91fULL,
0xcab3961304ca70e8ULL, 0x9226712162ab070dULL}},
- // 10^-46 ~= 9226712162ab070d cab3961304ca70e8
+ // 10^-46 ~= 9226712162ab070d cab3961304ca70e8
// 3d9d75dd9146e91ffb8927397c2d59b0 * 2^-408
{{0xf8db71f5937bc2b2ULL, 0xc8fbefc8e87174ffULL,
0xaab8f01e6e10b4a6ULL, 0xe9d71b689dde71afULL}},
- // 10^-47 ~= e9d71b689dde71af aab8f01e6e10b4a6
+ // 10^-47 ~= e9d71b689dde71af aab8f01e6e10b4a6
// c8fbefc8e87174fff8db71f5937bc2b2 * 2^-412
{{0x2d7c5b2adc630228ULL, 0x3a63263a538df733ULL,
0x5560c018580d5d52ULL, 0xbb127c53b17ec159ULL}},
- // 10^-48 ~= bb127c53b17ec159 5560c018580d5d52
+ // 10^-48 ~= bb127c53b17ec159 5560c018580d5d52
// 3a63263a538df7332d7c5b2adc630228 * 2^-415
{{0x24637c2249e8ce87ULL, 0x2eb5b82ea93e5f5cULL,
0xdde7001379a44aa8ULL, 0x95a8637627989aadULL}},
- // 10^-49 ~= 95a8637627989aad dde7001379a44aa8
+ // 10^-49 ~= 95a8637627989aad dde7001379a44aa8
// 2eb5b82ea93e5f5c24637c2249e8ce87 * 2^-418
{{0x3a38c69d430e173eULL, 0x4abc59e441fd6560ULL,
0x963e66858f6d4440ULL, 0xef73d256a5c0f77cULL}},
- // 10^-50 ~= ef73d256a5c0f77c 963e66858f6d4440
+ // 10^-50 ~= ef73d256a5c0f77c 963e66858f6d4440
// 4abc59e441fd65603a38c69d430e173e * 2^-422
{{0x94fa387dcf3e78feULL, 0x6efd14b69b311de6ULL,
0xde98520472bdd033ULL, 0xbf8fdb78849a5f96ULL}},
- // 10^-51 ~= bf8fdb78849a5f96 de98520472bdd033
+ // 10^-51 ~= bf8fdb78849a5f96 de98520472bdd033
// 6efd14b69b311de694fa387dcf3e78fe * 2^-425
{{0xaa61c6cb0c31fa65ULL, 0x259743c548f417ebULL,
0xe546a8038efe4029ULL, 0x993fe2c6d07b7fabULL}},
- // 10^-52 ~= 993fe2c6d07b7fab e546a8038efe4029
+ // 10^-52 ~= 993fe2c6d07b7fab e546a8038efe4029
// 259743c548f417ebaa61c6cb0c31fa65 * 2^-428
{{0xaa360ade79e990a2ULL, 0x3c25393ba7ecf312ULL,
0xd53dd99f4b3066a8ULL, 0xf53304714d9265dfULL}},
- // 10^-53 ~= f53304714d9265df d53dd99f4b3066a8
+ // 10^-53 ~= f53304714d9265df d53dd99f4b3066a8
// 3c25393ba7ecf312aa360ade79e990a2 * 2^-432
{{0x882b3be52e5473b5ULL, 0x96842dc95323f5a8ULL,
0xaa97e14c3c26b886ULL, 0xc428d05aa4751e4cULL}},
- // 10^-54 ~= c428d05aa4751e4c aa97e14c3c26b886
+ // 10^-54 ~= c428d05aa4751e4c aa97e14c3c26b886
// 96842dc95323f5a8882b3be52e5473b5 * 2^-435
{{0xd355c98425105c91ULL, 0xab9cf16ddc1cc486ULL,
0x55464dd69685606bULL, 0x9ced737bb6c4183dULL}},
- // 10^-55 ~= 9ced737bb6c4183d 55464dd69685606b
+ // 10^-55 ~= 9ced737bb6c4183d 55464dd69685606b
// ab9cf16ddc1cc486d355c98425105c91 * 2^-438
{{0xebbc75a03b4d60e7ULL, 0xac2e4f162cfad40aULL,
0xeed6e2f0f0d56712ULL, 0xfb158592be068d2eULL}},
- // 10^-56 ~= fb158592be068d2e eed6e2f0f0d56712
+ // 10^-56 ~= fb158592be068d2e eed6e2f0f0d56712
// ac2e4f162cfad40aebbc75a03b4d60e7 * 2^-442
{{0x8963914cfc3de71fULL, 0x568b727823fbdcd5ULL,
0xf245825a5a445275ULL, 0xc8de047564d20a8bULL}},
- // 10^-57 ~= c8de047564d20a8b f245825a5a445275
+ // 10^-57 ~= c8de047564d20a8b f245825a5a445275
// 568b727823fbdcd58963914cfc3de71f * 2^-445
{{0xd44fa770c9cb1f4cULL, 0x453c5b934ffcb0aaULL,
0x5b6aceaeae9d0ec4ULL, 0xa0b19d2ab70e6ed6ULL}},
- // 10^-58 ~= a0b19d2ab70e6ed6 5b6aceaeae9d0ec4
+ // 10^-58 ~= a0b19d2ab70e6ed6 5b6aceaeae9d0ec4
// 453c5b934ffcb0aad44fa770c9cb1f4c * 2^-448
{{0xdd0c85f3d4a27f70ULL, 0x37637c75d996f3bbULL,
0xe2bbd88bbee40bd0ULL, 0x808e17555f3ebf11ULL}},
- // 10^-59 ~= 808e17555f3ebf11 e2bbd88bbee40bd0
+ // 10^-59 ~= 808e17555f3ebf11 e2bbd88bbee40bd0
// 37637c75d996f3bbdd0c85f3d4a27f70 * 2^-451
{{0x61ada31fba9d98b3ULL, 0x256bfa5628f185f9ULL,
0x3792f412cb06794dULL, 0xcdb02555653131b6ULL}},
- // 10^-60 ~= cdb02555653131b6 3792f412cb06794d
+ // 10^-60 ~= cdb02555653131b6 3792f412cb06794d
// 256bfa5628f185f961ada31fba9d98b3 * 2^-455
{{0xe7be1c196217ad5cULL, 0x51232eab53f46b2dULL,
0x5fa8c3423c052dd7ULL, 0xa48ceaaab75a8e2bULL}},
- // 10^-61 ~= a48ceaaab75a8e2b 5fa8c3423c052dd7
+ // 10^-61 ~= a48ceaaab75a8e2b 5fa8c3423c052dd7
// 51232eab53f46b2de7be1c196217ad5c * 2^-458
{{0x52fe7ce11b46244aULL, 0x40e8f222a99055beULL,
0x1953cf68300424acULL, 0x83a3eeeef9153e89ULL}},
- // 10^-62 ~= 83a3eeeef9153e89 1953cf68300424ac
+ // 10^-62 ~= 83a3eeeef9153e89 1953cf68300424ac
// 40e8f222a99055be52fe7ce11b46244a * 2^-461
{{0x51972e34f8703a10ULL, 0x34a7e9d10f4d55fdULL,
0x8eec7f0d19a03aadULL, 0xd29fe4b18e88640eULL}},
- // 10^-63 ~= d29fe4b18e88640e 8eec7f0d19a03aad
+ // 10^-63 ~= d29fe4b18e88640e 8eec7f0d19a03aad
// 34a7e9d10f4d55fd51972e34f8703a10 * 2^-465
{{0x0e128b5d938cfb40ULL, 0x2a1fee40d90aab31ULL,
0x3f2398d747b36224ULL, 0xa87fea27a539e9a5ULL}},
- // 10^-64 ~= a87fea27a539e9a5 3f2398d747b36224
+ // 10^-64 ~= a87fea27a539e9a5 3f2398d747b36224
// 2a1fee40d90aab310e128b5d938cfb40 * 2^-468
{{0x3e753c4adc70c900ULL, 0xbb4cbe9a473bbc27ULL,
0x98e947129fc2b4e9ULL, 0x86ccbb52ea94baeaULL}},
- // 10^-65 ~= 86ccbb52ea94baea 98e947129fc2b4e9
+ // 10^-65 ~= 86ccbb52ea94baea 98e947129fc2b4e9
// bb4cbe9a473bbc273e753c4adc70c900 * 2^-471
{{0x30bb93aafa4e0e66ULL, 0x9214642a0b92c6a5ULL,
0x5b0ed81dcc6abb0fULL, 0xd7adf884aa879177ULL}},
- // 10^-66 ~= d7adf884aa879177 5b0ed81dcc6abb0f
+ // 10^-66 ~= d7adf884aa879177 5b0ed81dcc6abb0f
// 9214642a0b92c6a530bb93aafa4e0e66 * 2^-475
{{0xc0960fbbfb71a51fULL, 0xa8105021a2dbd21dULL,
0xe272467e3d222f3fULL, 0xac8b2d36eed2dac5ULL}},
- // 10^-67 ~= ac8b2d36eed2dac5 e272467e3d222f3f
+ // 10^-67 ~= ac8b2d36eed2dac5 e272467e3d222f3f
// a8105021a2dbd21dc0960fbbfb71a51f * 2^-478
{{0x66de72fcc927b74cULL, 0xb9a6a6814f1641b1ULL,
0x1b8e9ecb641b58ffULL, 0x8a08f0f8bf0f156bULL}},
- // 10^-68 ~= 8a08f0f8bf0f156b 1b8e9ecb641b58ff
+ // 10^-68 ~= 8a08f0f8bf0f156b 1b8e9ecb641b58ff
// b9a6a6814f1641b166de72fcc927b74c * 2^-481
{{0xd7ca5194750c5879ULL, 0xf5d770cee4f0691bULL,
0xf8e431456cf88e65ULL, 0xdcdb1b2798182244ULL}},
- // 10^-69 ~= dcdb1b2798182244 f8e431456cf88e65
+ // 10^-69 ~= dcdb1b2798182244 f8e431456cf88e65
// f5d770cee4f0691bd7ca5194750c5879 * 2^-485
{{0xdfd50e105da379faULL, 0x9179270bea59edafULL,
0x2d835a9df0c6d851ULL, 0xb0af48ec79ace837ULL}},
- // 10^-70 ~= b0af48ec79ace837 2d835a9df0c6d851
+ // 10^-70 ~= b0af48ec79ace837 2d835a9df0c6d851
// 9179270bea59edafdfd50e105da379fa * 2^-488
{{0x19773e737e1c6195ULL, 0x0dfa85a321e18af3ULL,
0x579c487e5a38ad0eULL, 0x8d590723948a535fULL}},
- // 10^-71 ~= 8d590723948a535f 579c487e5a38ad0e
+ // 10^-71 ~= 8d590723948a535f 579c487e5a38ad0e
// dfa85a321e18af319773e737e1c6195 * 2^-491
{{0xf58b971f302d68efULL, 0x165da29e9c9c1184ULL,
0x25c6da63c38de1b0ULL, 0xe2280b6c20dd5232ULL}},
- // 10^-72 ~= e2280b6c20dd5232 25c6da63c38de1b0
+ // 10^-72 ~= e2280b6c20dd5232 25c6da63c38de1b0
// 165da29e9c9c1184f58b971f302d68ef * 2^-495
{{0xc46fac18f3578725ULL, 0x4517b54bb07cdad0ULL,
0x1e38aeb6360b1af3ULL, 0xb4ecd5f01a4aa828ULL}},
- // 10^-73 ~= b4ecd5f01a4aa828 1e38aeb6360b1af3
+ // 10^-73 ~= b4ecd5f01a4aa828 1e38aeb6360b1af3
// 4517b54bb07cdad0c46fac18f3578725 * 2^-498
{{0x36bfbce0c2ac6c1eULL, 0x9dac910959fd7bdaULL,
0xb1c6f22b5e6f48c2ULL, 0x90bd77f3483bb9b9ULL}},
- // 10^-74 ~= 90bd77f3483bb9b9 b1c6f22b5e6f48c2
+ // 10^-74 ~= 90bd77f3483bb9b9 b1c6f22b5e6f48c2
// 9dac910959fd7bda36bfbce0c2ac6c1e * 2^-501
{{0x2465fb01377a4696ULL, 0x2f7a81a88ffbf95dULL,
0xb60b1d1230b20e04ULL, 0xe7958cb87392c2c2ULL}}
- // 10^-75 ~= e7958cb87392c2c2 b60b1d1230b20e04
+ // 10^-75 ~= e7958cb87392c2c2 b60b1d1230b20e04
// 2f7a81a88ffbf95d2465fb01377a4696 * 2^-505
};
UINT256 ten2mxtrunc256[] = {
{{0xccccccccccccccccULL, 0xccccccccccccccccULL,
0xccccccccccccccccULL, 0xccccccccccccccccULL}},
- // (ten2mx >> 256) = cccccccccccccccc cccccccccccccccc
+ // (ten2mx >> 256) = cccccccccccccccc cccccccccccccccc
// cccccccccccccccccccccccccccccccc
{{0x70a3d70a3d70a3d7ULL, 0xd70a3d70a3d70a3dULL,
0x3d70a3d70a3d70a3ULL, 0xa3d70a3d70a3d70aULL}},
- // (ten2mx >> 256) = a3d70a3d70a3d70a 3d70a3d70a3d70a3
+ // (ten2mx >> 256) = a3d70a3d70a3d70a 3d70a3d70a3d70a3
// d70a3d70a3d70a3d70a3d70a3d70a3d7
{{0xc083126e978d4fdfULL, 0x78d4fdf3b645a1caULL,
0x645a1cac083126e9ULL, 0x83126e978d4fdf3bULL}},
- // (ten2mx >> 256) = 83126e978d4fdf3b 645a1cac083126e9
+ // (ten2mx >> 256) = 83126e978d4fdf3b 645a1cac083126e9
// 78d4fdf3b645a1cac083126e978d4fdf
{{0x67381d7dbf487fcbULL, 0xc154c985f06f6944ULL,
0xd3c36113404ea4a8ULL, 0xd1b71758e219652bULL}},
- // (ten2mx >> 256) = d1b71758e219652b d3c36113404ea4a8
+ // (ten2mx >> 256) = d1b71758e219652b d3c36113404ea4a8
// c154c985f06f694467381d7dbf487fcb
{{0x85c67dfe32a0663cULL, 0xcddd6e04c0592103ULL,
0x0fcf80dc33721d53ULL, 0xa7c5ac471b478423ULL}},
- // (ten2mx >> 256) = a7c5ac471b478423 fcf80dc33721d53
+ // (ten2mx >> 256) = a7c5ac471b478423 fcf80dc33721d53
// cddd6e04c059210385c67dfe32a0663c
{{0x37d1fe64f54d1e96ULL, 0xd7e45803cd141a69ULL,
0xa63f9a49c2c1b10fULL, 0x8637bd05af6c69b5ULL}},
- // (ten2mx >> 256) = 8637bd05af6c69b5 a63f9a49c2c1b10f
+ // (ten2mx >> 256) = 8637bd05af6c69b5 a63f9a49c2c1b10f
// d7e45803cd141a6937d1fe64f54d1e96
{{0x8c8330a1887b6424ULL, 0x8ca08cd2e1b9c3dbULL,
0x3d32907604691b4cULL, 0xd6bf94d5e57a42bcULL}},
- // (ten2mx >> 256) = d6bf94d5e57a42bc 3d32907604691b4c
+ // (ten2mx >> 256) = d6bf94d5e57a42bc 3d32907604691b4c
// 8ca08cd2e1b9c3db8c8330a1887b6424
{{0x7068f3b46d2f8350ULL, 0x3d4d3d758161697cULL,
0xfdc20d2b36ba7c3dULL, 0xabcc77118461cefcULL}},
- // (ten2mx >> 256) = abcc77118461cefc fdc20d2b36ba7c3d
+ // (ten2mx >> 256) = abcc77118461cefc fdc20d2b36ba7c3d
// 3d4d3d758161697c7068f3b46d2f8350
{{0xf387295d242602a6ULL, 0xfdd7645e011abac9ULL,
0x31680a88f8953030ULL, 0x89705f4136b4a597ULL}},
- // (ten2mx >> 256) = 89705f4136b4a597 31680a88f8953030
+ // (ten2mx >> 256) = 89705f4136b4a597 31680a88f8953030
// fdd7645e011abac9f387295d242602a6
{{0xb8d8422ea03cd10aULL, 0x2fbf06fcce912adcULL,
0xb573440e5a884d1bULL, 0xdbe6fecebdedd5beULL}},
- // (ten2mx >> 256) = dbe6fecebdedd5be b573440e5a884d1b
+ // (ten2mx >> 256) = dbe6fecebdedd5be b573440e5a884d1b
// 2fbf06fcce912adcb8d8422ea03cd10a
{{0x93e034f219ca40d5ULL, 0xf2ff38ca3eda88b0ULL,
0xf78f69a51539d748ULL, 0xafebff0bcb24aafeULL}},
- // (ten2mx >> 256) = afebff0bcb24aafe f78f69a51539d748
+ // (ten2mx >> 256) = afebff0bcb24aafe f78f69a51539d748
// f2ff38ca3eda88b093e034f219ca40d5
{{0x4319c3f4e16e9a44ULL, 0xf598fa3b657ba08dULL,
0xf93f87b7442e45d3ULL, 0x8cbccc096f5088cbULL}},
- // (ten2mx >> 256) = 8cbccc096f5088cb f93f87b7442e45d3
+ // (ten2mx >> 256) = 8cbccc096f5088cb f93f87b7442e45d3
// f598fa3b657ba08d4319c3f4e16e9a44
{{0x04f606549be42a06ULL, 0x88f4c3923bf900e2ULL,
0x2865a5f206b06fb9ULL, 0xe12e13424bb40e13ULL}},
- // (ten2mx >> 256) = e12e13424bb40e13 2865a5f206b06fb9
+ // (ten2mx >> 256) = e12e13424bb40e13 2865a5f206b06fb9
// 88f4c3923bf900e204f606549be42a06
{{0x03f805107cb68805ULL, 0x6d909c74fcc733e8ULL,
0x538484c19ef38c94ULL, 0xb424dc35095cd80fULL}},
- // (ten2mx >> 256) = b424dc35095cd80f 538484c19ef38c94
+ // (ten2mx >> 256) = b424dc35095cd80f 538484c19ef38c94
// 6d909c74fcc733e803f805107cb68805
{{0x3660040d3092066aULL, 0x57a6e390ca38f653ULL,
0x0f9d37014bf60a10ULL, 0x901d7cf73ab0acd9ULL}},
- // (ten2mx >> 256) = 901d7cf73ab0acd9 f9d37014bf60a10
+ // (ten2mx >> 256) = 901d7cf73ab0acd9 f9d37014bf60a10
// 57a6e390ca38f6533660040d3092066a
{{0x23ccd3484db670aaULL, 0xbf716c1add27f085ULL,
0x4c2ebe687989a9b3ULL, 0xe69594bec44de15bULL}},
- // (ten2mx >> 256) = e69594bec44de15b 4c2ebe687989a9b3
+ // (ten2mx >> 256) = e69594bec44de15b 4c2ebe687989a9b3
// bf716c1add27f08523ccd3484db670aa
{{0x4fd70f6d0af85a22ULL, 0xff8df0157db98d37ULL,
0x09befeb9fad487c2ULL, 0xb877aa3236a4b449ULL}},
- // (ten2mx >> 256) = b877aa3236a4b449 9befeb9fad487c2
+ // (ten2mx >> 256) = b877aa3236a4b449 9befeb9fad487c2
// ff8df0157db98d374fd70f6d0af85a22
{{0x0cac0c573bf9e1b5ULL, 0x32d7f344649470f9ULL,
0x3aff322e62439fcfULL, 0x9392ee8e921d5d07ULL}},
- // (ten2mx >> 256) = 9392ee8e921d5d07 3aff322e62439fcf
+ // (ten2mx >> 256) = 9392ee8e921d5d07 3aff322e62439fcf
// 32d7f344649470f90cac0c573bf9e1b5
{{0xe11346f1f98fcf88ULL, 0x1e2652070753e7f4ULL,
0x2b31e9e3d06c32e5ULL, 0xec1e4a7db69561a5ULL}},
- // (ten2mx >> 256) = ec1e4a7db69561a5 2b31e9e3d06c32e5
+ // (ten2mx >> 256) = ec1e4a7db69561a5 2b31e9e3d06c32e5
// 1e2652070753e7f4e11346f1f98fcf88
{{0x4da9058e613fd939ULL, 0x181ea8059f76532aULL,
0x88f4bb1ca6bcf584ULL, 0xbce5086492111aeaULL}},
- // (ten2mx >> 256) = bce5086492111aea 88f4bb1ca6bcf584
+ // (ten2mx >> 256) = bce5086492111aea 88f4bb1ca6bcf584
// 181ea8059f76532a4da9058e613fd939
{{0xa48737a51a997a94ULL, 0x467eecd14c5ea8eeULL,
0xd3f6fc16ebca5e03ULL, 0x971da05074da7beeULL}},
- // (ten2mx >> 256) = 971da05074da7bee d3f6fc16ebca5e03
+ // (ten2mx >> 256) = 971da05074da7bee d3f6fc16ebca5e03
// 467eecd14c5ea8eea48737a51a997a94
{{0x3a71f2a1c428c420ULL, 0x70cb148213caa7e4ULL,
0x5324c68b12dd6338ULL, 0xf1c90080baf72cb1ULL}},
- // (ten2mx >> 256) = f1c90080baf72cb1 5324c68b12dd6338
+ // (ten2mx >> 256) = f1c90080baf72cb1 5324c68b12dd6338
// 70cb148213caa7e43a71f2a1c428c420
{{0x2ec18ee7d0209ce7ULL, 0x8d6f439b43088650ULL,
0x75b7053c0f178293ULL, 0xc16d9a0095928a27ULL}},
- // (ten2mx >> 256) = c16d9a0095928a27 75b7053c0f178293
+ // (ten2mx >> 256) = c16d9a0095928a27 75b7053c0f178293
// 8d6f439b430886502ec18ee7d0209ce7
{{0xf23472530ce6e3ecULL, 0xd78c3615cf3a050cULL,
0xc4926a9672793542ULL, 0x9abe14cd44753b52ULL}},
- // (ten2mx >> 256) = 9abe14cd44753b52 c4926a9672793542
+ // (ten2mx >> 256) = 9abe14cd44753b52 c4926a9672793542
// d78c3615cf3a050cf23472530ce6e3ec
{{0xe9ed83b814a49fe0ULL, 0x8c1389bc7ec33b47ULL,
0x3a83ddbd83f52204ULL, 0xf79687aed3eec551ULL}},
- // (ten2mx >> 256) = f79687aed3eec551 3a83ddbd83f52204
+ // (ten2mx >> 256) = f79687aed3eec551 3a83ddbd83f52204
// 8c1389bc7ec33b47e9ed83b814a49fe0
{{0x87f1362cdd507fe6ULL, 0x3cdc6e306568fc39ULL,
0x95364afe032a819dULL, 0xc612062576589ddaULL}},
- // (ten2mx >> 256) = c612062576589dda 95364afe032a819d
+ // (ten2mx >> 256) = c612062576589dda 95364afe032a819d
// 3cdc6e306568fc3987f1362cdd507fe6
{{0x9ff42b5717739985ULL, 0xca49f1c05120c9c7ULL,
0x775ea264cf55347dULL, 0x9e74d1b791e07e48ULL}},
- // (ten2mx >> 256) = 9e74d1b791e07e48 775ea264cf55347d
+ // (ten2mx >> 256) = 9e74d1b791e07e48 775ea264cf55347d
// ca49f1c05120c9c79ff42b5717739985
{{0xccb9def1bf1f5c08ULL, 0x76dcb60081ce0fa5ULL,
0x8bca9d6e188853fcULL, 0xfd87b5f28300ca0dULL}},
- // (ten2mx >> 256) = fd87b5f28300ca0d 8bca9d6e188853fc
+ // (ten2mx >> 256) = fd87b5f28300ca0d 8bca9d6e188853fc
// 76dcb60081ce0fa5ccb9def1bf1f5c08
{{0xa3c7e58e327f7cd3ULL, 0x5f16f80067d80c84ULL,
0x096ee45813a04330ULL, 0xcad2f7f5359a3b3eULL}},
- // (ten2mx >> 256) = cad2f7f5359a3b3e 96ee45813a04330
+ // (ten2mx >> 256) = cad2f7f5359a3b3e 96ee45813a04330
// 5f16f80067d80c84a3c7e58e327f7cd3
{{0xb6398471c1ff970fULL, 0x18df2ccd1fe00a03ULL,
0xa1258379a94d028dULL, 0xa2425ff75e14fc31ULL}},
- // (ten2mx >> 256) = a2425ff75e14fc31 a1258379a94d028d
+ // (ten2mx >> 256) = a2425ff75e14fc31 a1258379a94d028d
// 18df2ccd1fe00a03b6398471c1ff970f
{{0xf82e038e34cc78d9ULL, 0x4718f0a419800802ULL,
0x80eacf948770ced7ULL, 0x81ceb32c4b43fcf4ULL}},
- // (ten2mx >> 256) = 81ceb32c4b43fcf4 80eacf948770ced7
+ // (ten2mx >> 256) = 81ceb32c4b43fcf4 80eacf948770ced7
// 4718f0a419800802f82e038e34cc78d9
{{0x59e338e387ad8e28ULL, 0x0b5b1aa028ccd99eULL,
0x67de18eda5814af2ULL, 0xcfb11ead453994baULL}},
- // (ten2mx >> 256) = cfb11ead453994ba 67de18eda5814af2
+ // (ten2mx >> 256) = cfb11ead453994ba 67de18eda5814af2
// b5b1aa028ccd99e59e338e387ad8e28
{{0x47e8fa4f9fbe0b53ULL, 0x6f7c154ced70ae18ULL,
0xecb1ad8aeacdd58eULL, 0xa6274bbdd0fadd61ULL}},
- // (ten2mx >> 256) = a6274bbdd0fadd61 ecb1ad8aeacdd58e
+ // (ten2mx >> 256) = a6274bbdd0fadd61 ecb1ad8aeacdd58e
// 6f7c154ced70ae1847e8fa4f9fbe0b53
{{0xd320c83fb2fe6f75ULL, 0xbf967770bdf3be79ULL,
0xbd5af13bef0b113eULL, 0x84ec3c97da624ab4ULL}},
- // (ten2mx >> 256) = 84ec3c97da624ab4 bd5af13bef0b113e
+ // (ten2mx >> 256) = 84ec3c97da624ab4 bd5af13bef0b113e
// bf967770bdf3be79d320c83fb2fe6f75
{{0x85014065eb30b256ULL, 0x65bd8be79652ca5cULL,
0x955e4ec64b44e864ULL, 0xd4ad2dbfc3d07787ULL}},
- // (ten2mx >> 256) = d4ad2dbfc3d07787 955e4ec64b44e864
+ // (ten2mx >> 256) = d4ad2dbfc3d07787 955e4ec64b44e864
// 65bd8be79652ca5c85014065eb30b256
{{0xd0cdcd1e55c08eabULL, 0xeafe098611dbd516ULL,
0xdde50bd1d5d0b9e9ULL, 0xaa242499697392d2ULL}},
- // (ten2mx >> 256) = aa242499697392d2 dde50bd1d5d0b9e9
+ // (ten2mx >> 256) = aa242499697392d2 dde50bd1d5d0b9e9
// eafe098611dbd516d0cdcd1e55c08eab
{{0x40a4a418449a0bbcULL, 0xbbfe6e04db164412ULL,
0x7e50d64177da2e54ULL, 0x881cea14545c7575ULL}},
- // (ten2mx >> 256) = 881cea14545c7575 7e50d64177da2e54
+ // (ten2mx >> 256) = 881cea14545c7575 7e50d64177da2e54
// bbfe6e04db16441240a4a418449a0bbc
{{0x9aa1068d3a9012c7ULL, 0x2cca49a15e8a0683ULL,
0x96e7bd358c904a21ULL, 0xd9c7dced53c72255ULL}},
- // (ten2mx >> 256) = d9c7dced53c72255 96e7bd358c904a21
+ // (ten2mx >> 256) = d9c7dced53c72255 96e7bd358c904a21
// 2cca49a15e8a06839aa1068d3a9012c7
{{0x154d9ed7620cdbd2ULL, 0x8a3b6e1ab2080536ULL,
0xabec975e0a0d081aULL, 0xae397d8aa96c1b77ULL}},
- // (ten2mx >> 256) = ae397d8aa96c1b77 abec975e0a0d081a
+ // (ten2mx >> 256) = ae397d8aa96c1b77 abec975e0a0d081a
// 8a3b6e1ab2080536154d9ed7620cdbd2
{{0x443e18ac4e70afdbULL, 0x3b62be7bc1a0042bULL,
0x2323ac4b3b3da015ULL, 0x8b61313bbabce2c6ULL}},
- // (ten2mx >> 256) = 8b61313bbabce2c6 2323ac4b3b3da015
+ // (ten2mx >> 256) = 8b61313bbabce2c6 2323ac4b3b3da015
// 3b62be7bc1a0042b443e18ac4e70afdb
{{0x6d30277a171ab2f8ULL, 0x5f0463f935ccd378ULL,
0x6b6c46dec52f6688ULL, 0xdf01e85f912e37a3ULL}},
- // (ten2mx >> 256) = df01e85f912e37a3 6b6c46dec52f6688
+ // (ten2mx >> 256) = df01e85f912e37a3 6b6c46dec52f6688
// 5f0463f935ccd3786d30277a171ab2f8
{{0x8a8cec61ac155bfaULL, 0x7f36b660f7d70f93ULL,
0x55f038b237591ed3ULL, 0xb267ed1940f1c61cULL}},
- // (ten2mx >> 256) = b267ed1940f1c61c 55f038b237591ed3
+ // (ten2mx >> 256) = b267ed1940f1c61c 55f038b237591ed3
// 7f36b660f7d70f938a8cec61ac155bfa
{{0x3ba3f04e23444995ULL, 0xcc2bc51a5fdf3fa9ULL,
0x77f3608e92adb242ULL, 0x8eb98a7a9a5b04e3ULL}},
- // (ten2mx >> 256) = 8eb98a7a9a5b04e3 77f3608e92adb242
+ // (ten2mx >> 256) = 8eb98a7a9a5b04e3 77f3608e92adb242
// cc2bc51a5fdf3fa93ba3f04e23444995
{{0xf9064d49d206dc21ULL, 0xe046082a32fecc41ULL,
0x8cb89a7db77c506aULL, 0xe45c10c42a2b3b05ULL}},
- // (ten2mx >> 256) = e45c10c42a2b3b05 8cb89a7db77c506a
+ // (ten2mx >> 256) = e45c10c42a2b3b05 8cb89a7db77c506a
// e046082a32fecc41f9064d49d206dc21
{{0xfa6b7107db38b01aULL, 0x4d04d354f598a367ULL,
0x3d607b97c5fd0d22ULL, 0xb6b00d69bb55c8d1ULL}},
- // (ten2mx >> 256) = b6b00d69bb55c8d1 3d607b97c5fd0d22
+ // (ten2mx >> 256) = b6b00d69bb55c8d1 3d607b97c5fd0d22
// 4d04d354f598a367fa6b7107db38b01a
{{0xfb8927397c2d59afULL, 0x3d9d75dd9146e91fULL,
0xcab3961304ca70e8ULL, 0x9226712162ab070dULL}},
- // (ten2mx >> 256) = 9226712162ab070d cab3961304ca70e8
+ // (ten2mx >> 256) = 9226712162ab070d cab3961304ca70e8
// 3d9d75dd9146e91ffb8927397c2d59af
{{0xf8db71f5937bc2b1ULL, 0xc8fbefc8e87174ffULL,
0xaab8f01e6e10b4a6ULL, 0xe9d71b689dde71afULL}},
- // (ten2mx >> 256) = e9d71b689dde71af aab8f01e6e10b4a6
+ // (ten2mx >> 256) = e9d71b689dde71af aab8f01e6e10b4a6
// c8fbefc8e87174fff8db71f5937bc2b1
{{0x2d7c5b2adc630227ULL, 0x3a63263a538df733ULL,
0x5560c018580d5d52ULL, 0xbb127c53b17ec159ULL}},
- // (ten2mx >> 256) = bb127c53b17ec159 5560c018580d5d52
+ // (ten2mx >> 256) = bb127c53b17ec159 5560c018580d5d52
// 3a63263a538df7332d7c5b2adc630227
{{0x24637c2249e8ce86ULL, 0x2eb5b82ea93e5f5cULL,
0xdde7001379a44aa8ULL, 0x95a8637627989aadULL}},
- // (ten2mx >> 256) = 95a8637627989aad dde7001379a44aa8
+ // (ten2mx >> 256) = 95a8637627989aad dde7001379a44aa8
// 2eb5b82ea93e5f5c24637c2249e8ce86
{{0x3a38c69d430e173dULL, 0x4abc59e441fd6560ULL,
0x963e66858f6d4440ULL, 0xef73d256a5c0f77cULL}},
- // (ten2mx >> 256) = ef73d256a5c0f77c 963e66858f6d4440
+ // (ten2mx >> 256) = ef73d256a5c0f77c 963e66858f6d4440
// 4abc59e441fd65603a38c69d430e173d
{{0x94fa387dcf3e78fdULL, 0x6efd14b69b311de6ULL,
0xde98520472bdd033ULL, 0xbf8fdb78849a5f96ULL}},
- // (ten2mx >> 256) = bf8fdb78849a5f96 de98520472bdd033
+ // (ten2mx >> 256) = bf8fdb78849a5f96 de98520472bdd033
// 6efd14b69b311de694fa387dcf3e78fd
{{0xaa61c6cb0c31fa64ULL, 0x259743c548f417ebULL,
0xe546a8038efe4029ULL, 0x993fe2c6d07b7fabULL}},
- // (ten2mx >> 256) = 993fe2c6d07b7fab e546a8038efe4029
+ // (ten2mx >> 256) = 993fe2c6d07b7fab e546a8038efe4029
// 259743c548f417ebaa61c6cb0c31fa64
{{0xaa360ade79e990a1ULL, 0x3c25393ba7ecf312ULL,
0xd53dd99f4b3066a8ULL, 0xf53304714d9265dfULL}},
- // (ten2mx >> 256) = f53304714d9265df d53dd99f4b3066a8
+ // (ten2mx >> 256) = f53304714d9265df d53dd99f4b3066a8
// 3c25393ba7ecf312aa360ade79e990a1
{{0x882b3be52e5473b4ULL, 0x96842dc95323f5a8ULL,
0xaa97e14c3c26b886ULL, 0xc428d05aa4751e4cULL}},
- // (ten2mx >> 256) = c428d05aa4751e4c aa97e14c3c26b886
+ // (ten2mx >> 256) = c428d05aa4751e4c aa97e14c3c26b886
// 96842dc95323f5a8882b3be52e5473b4
{{0xd355c98425105c90ULL, 0xab9cf16ddc1cc486ULL,
0x55464dd69685606bULL, 0x9ced737bb6c4183dULL}},
- // (ten2mx >> 256) = 9ced737bb6c4183d 55464dd69685606b
+ // (ten2mx >> 256) = 9ced737bb6c4183d 55464dd69685606b
// ab9cf16ddc1cc486d355c98425105c90
{{0xebbc75a03b4d60e6ULL, 0xac2e4f162cfad40aULL,
0xeed6e2f0f0d56712ULL, 0xfb158592be068d2eULL}},
- // (ten2mx >> 256) = fb158592be068d2e eed6e2f0f0d56712
+ // (ten2mx >> 256) = fb158592be068d2e eed6e2f0f0d56712
// ac2e4f162cfad40aebbc75a03b4d60e6
{{0x8963914cfc3de71eULL, 0x568b727823fbdcd5ULL,
0xf245825a5a445275ULL, 0xc8de047564d20a8bULL}},
- // (ten2mx >> 256) = c8de047564d20a8b f245825a5a445275
+ // (ten2mx >> 256) = c8de047564d20a8b f245825a5a445275
// 568b727823fbdcd58963914cfc3de71e
{{0xd44fa770c9cb1f4bULL, 0x453c5b934ffcb0aaULL,
0x5b6aceaeae9d0ec4ULL, 0xa0b19d2ab70e6ed6ULL}},
- // (ten2mx >> 256) = a0b19d2ab70e6ed6 5b6aceaeae9d0ec4
+ // (ten2mx >> 256) = a0b19d2ab70e6ed6 5b6aceaeae9d0ec4
// 453c5b934ffcb0aad44fa770c9cb1f4b
{{0xdd0c85f3d4a27f6fULL, 0x37637c75d996f3bbULL,
0xe2bbd88bbee40bd0ULL, 0x808e17555f3ebf11ULL}},
- // (ten2mx >> 256) = 808e17555f3ebf11 e2bbd88bbee40bd0
+ // (ten2mx >> 256) = 808e17555f3ebf11 e2bbd88bbee40bd0
// 37637c75d996f3bbdd0c85f3d4a27f6f
{{0x61ada31fba9d98b2ULL, 0x256bfa5628f185f9ULL,
0x3792f412cb06794dULL, 0xcdb02555653131b6ULL}},
- // (ten2mx >> 256) = cdb02555653131b6 3792f412cb06794d
+ // (ten2mx >> 256) = cdb02555653131b6 3792f412cb06794d
// 256bfa5628f185f961ada31fba9d98b2
{{0xe7be1c196217ad5bULL, 0x51232eab53f46b2dULL,
0x5fa8c3423c052dd7ULL, 0xa48ceaaab75a8e2bULL}},
- // (ten2mx >> 256) = a48ceaaab75a8e2b 5fa8c3423c052dd7
+ // (ten2mx >> 256) = a48ceaaab75a8e2b 5fa8c3423c052dd7
// 51232eab53f46b2de7be1c196217ad5b
{{0x52fe7ce11b462449ULL, 0x40e8f222a99055beULL,
0x1953cf68300424acULL, 0x83a3eeeef9153e89ULL}},
- // (ten2mx >> 256) = 83a3eeeef9153e89 1953cf68300424ac
+ // (ten2mx >> 256) = 83a3eeeef9153e89 1953cf68300424ac
// 40e8f222a99055be52fe7ce11b462449
{{0x51972e34f8703a0fULL, 0x34a7e9d10f4d55fdULL,
0x8eec7f0d19a03aadULL, 0xd29fe4b18e88640eULL}},
- // (ten2mx >> 256) = d29fe4b18e88640e 8eec7f0d19a03aad
+ // (ten2mx >> 256) = d29fe4b18e88640e 8eec7f0d19a03aad
// 34a7e9d10f4d55fd51972e34f8703a0f
{{0x0e128b5d938cfb3fULL, 0x2a1fee40d90aab31ULL,
0x3f2398d747b36224ULL, 0xa87fea27a539e9a5ULL}},
- // (ten2mx >> 256) = a87fea27a539e9a5 3f2398d747b36224
+ // (ten2mx >> 256) = a87fea27a539e9a5 3f2398d747b36224
// 2a1fee40d90aab310e128b5d938cfb3f
{{0x3e753c4adc70c8ffULL, 0xbb4cbe9a473bbc27ULL,
0x98e947129fc2b4e9ULL, 0x86ccbb52ea94baeaULL}},
- // (ten2mx >> 256) = 86ccbb52ea94baea 98e947129fc2b4e9
+ // (ten2mx >> 256) = 86ccbb52ea94baea 98e947129fc2b4e9
// bb4cbe9a473bbc273e753c4adc70c8ff
{{0x30bb93aafa4e0e65ULL, 0x9214642a0b92c6a5ULL,
0x5b0ed81dcc6abb0fULL, 0xd7adf884aa879177ULL}},
- // (ten2mx >> 256) = d7adf884aa879177 5b0ed81dcc6abb0f
+ // (ten2mx >> 256) = d7adf884aa879177 5b0ed81dcc6abb0f
// 9214642a0b92c6a530bb93aafa4e0e65
{{0xc0960fbbfb71a51eULL, 0xa8105021a2dbd21dULL,
0xe272467e3d222f3fULL, 0xac8b2d36eed2dac5ULL}},
- // (ten2mx >> 256) = ac8b2d36eed2dac5 e272467e3d222f3f
+ // (ten2mx >> 256) = ac8b2d36eed2dac5 e272467e3d222f3f
// a8105021a2dbd21dc0960fbbfb71a51e
{{0x66de72fcc927b74bULL, 0xb9a6a6814f1641b1ULL,
0x1b8e9ecb641b58ffULL, 0x8a08f0f8bf0f156bULL}},
- // (ten2mx >> 256) = 8a08f0f8bf0f156b 1b8e9ecb641b58ff
+ // (ten2mx >> 256) = 8a08f0f8bf0f156b 1b8e9ecb641b58ff
// b9a6a6814f1641b166de72fcc927b74b
{{0xd7ca5194750c5878ULL, 0xf5d770cee4f0691bULL,
0xf8e431456cf88e65ULL, 0xdcdb1b2798182244ULL}},
- // (ten2mx >> 256) = dcdb1b2798182244 f8e431456cf88e65
+ // (ten2mx >> 256) = dcdb1b2798182244 f8e431456cf88e65
// f5d770cee4f0691bd7ca5194750c5878
{{0xdfd50e105da379f9ULL, 0x9179270bea59edafULL,
0x2d835a9df0c6d851ULL, 0xb0af48ec79ace837ULL}},
- // (ten2mx >> 256) = b0af48ec79ace837 2d835a9df0c6d851
+ // (ten2mx >> 256) = b0af48ec79ace837 2d835a9df0c6d851
// 9179270bea59edafdfd50e105da379f9
{{0x19773e737e1c6194ULL, 0x0dfa85a321e18af3ULL,
0x579c487e5a38ad0eULL, 0x8d590723948a535fULL}},
- // (ten2mx >> 256) = 8d590723948a535f 579c487e5a38ad0e
+ // (ten2mx >> 256) = 8d590723948a535f 579c487e5a38ad0e
// dfa85a321e18af319773e737e1c6194
{{0xf58b971f302d68eeULL, 0x165da29e9c9c1184ULL,
0x25c6da63c38de1b0ULL, 0xe2280b6c20dd5232ULL}},
- // (ten2mx >> 256) = e2280b6c20dd5232 25c6da63c38de1b0
+ // (ten2mx >> 256) = e2280b6c20dd5232 25c6da63c38de1b0
// 165da29e9c9c1184f58b971f302d68ee
{{0xc46fac18f3578724ULL, 0x4517b54bb07cdad0ULL,
0x1e38aeb6360b1af3ULL, 0xb4ecd5f01a4aa828ULL}},
- // (ten2mx >> 256) = b4ecd5f01a4aa828 1e38aeb6360b1af3
+ // (ten2mx >> 256) = b4ecd5f01a4aa828 1e38aeb6360b1af3
// 4517b54bb07cdad0c46fac18f3578724
{{0x36bfbce0c2ac6c1dULL, 0x9dac910959fd7bdaULL,
0xb1c6f22b5e6f48c2ULL, 0x90bd77f3483bb9b9ULL}},
- // (ten2mx >> 256) = 90bd77f3483bb9b9 b1c6f22b5e6f48c2
+ // (ten2mx >> 256) = 90bd77f3483bb9b9 b1c6f22b5e6f48c2
// 9dac910959fd7bda36bfbce0c2ac6c1d
{{0x2465fb01377a4695ULL, 0x2f7a81a88ffbf95dULL,
0xb60b1d1230b20e04ULL, 0xe7958cb87392c2c2ULL}}
- // (ten2mx >> 256) = e7958cb87392c2c2 b60b1d1230b20e04
+ // (ten2mx >> 256) = e7958cb87392c2c2 b60b1d1230b20e04
// 2f7a81a88ffbf95d2465fb01377a4695
};
};
const UINT64 mod10_18_tbl[9][128] = {
- // 2^59 = 576460752303423488, A and B breakdown, where data = A*10^18 + B
+ // 2^59 = 576460752303423488, A and B breakdown, where data = A*10^18 + B
{
0LL, 0LL, 0LL, 576460752303423488LL,
}
// unpack the arguments
- // unpack x
+ // unpack x
C1_hi = x.w[1] & MASK_COEFF;
C1_lo = x.w[0];
// test for non-canonical values:
- // - values whose encoding begins with x00, x01, or x10 and whose
+ // - values whose encoding begins with x00, x01, or x10 and whose
// coefficient is larger than 10^34 -1, or
- // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
- // and infinitis were eliminated already this test is reduced to
- // checking for x10x)
+ // - values whose encoding begins with x1100, x1101, x1110 (if NaNs
+ // and infinitis were eliminated already this test is reduced to
+ // checking for x10x)
// x is not infinity; check for non-canonical values - treated as zero
if ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull) {
}
}
- // unpack y
+ // unpack y
C2_hi = y.w[1] & MASK_COEFF;
C2_lo = y.w[0];
- // y is not infinity; check for non-canonical values - treated as zero
+ // y is not infinity; check for non-canonical values - treated as zero
if ((y.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull) {
- // G0_G1=11; non-canonical
+ // G0_G1=11; non-canonical
y_exp = (y.w[1] << 2) & MASK_EXP; // biased and shifted left 49 bits
C2_hi = 0; // significand high
- C2_lo = 0; // significand low
- } else { // G0_G1 != 11
+ C2_lo = 0; // significand low
+ } else { // G0_G1 != 11
y_exp = y.w[1] & MASK_EXP; // biased and shifted left 49 bits
if (C2_hi > 0x0001ed09bead87c0ull ||
(C2_hi == 0x0001ed09bead87c0ull
&& C2_lo > 0x378d8e63ffffffffull)) {
- // y is non-canonical if coefficient is larger than 10^34 -1
+ // y is non-canonical if coefficient is larger than 10^34 -1
C2_hi = 0;
C2_lo = 0;
} else { // canonical
if (C2_hi == 0) { // y_bits is the nr. of bits in C2_lo
if (C2_lo >= 0x0020000000000000ull) { // y >= 2^53
- // split the 64-bit value in two 32-bit halves to avoid
+ // split the 64-bit value in two 32-bit halves to avoid
// rounding errors
if (C2_lo >= 0x0000000100000000ull) { // y >= 2^32
tmp2.d = (double) (C2_lo >> 32); // exact conversion
if (scale == 0) {
res.w[1] = y.w[1];
res.w[0] = y.w[0];
- } else if (q2 <= 19) { // y fits in 64 bits
+ } else if (q2 <= 19) { // y fits in 64 bits
if (scale <= 19) { // 10^scale fits in 64 bits
// 64 x 64 C2_lo * ten2k64[scale]
__mul_64x64_to_128MACH (res, C2_lo, ten2k64[scale]);
// 64 x 128 C2_lo * ten2k128[scale - 20]
__mul_128x64_to_128 (res, C2_lo, ten2k128[scale - 20]);
}
- } else { // y fits in 128 bits, but 10^scale must fit in 64 bits
+ } else { // y fits in 128 bits, but 10^scale must fit in 64 bits
// 64 x 128 ten2k64[scale] * C2
C2.w[1] = C2_hi;
C2.w[0] = C2_lo;
// determine first the nr. of bits in x
if (C1_hi == 0) { // x_bits is the nr. of bits in C1_lo
if (C1_lo >= 0x0020000000000000ull) { // x >= 2^53
- // split the 64-bit value in two 32-bit halves to avoid
+ // split the 64-bit value in two 32-bit halves to avoid
// rounding errors
if (C1_lo >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1_lo >> 32); // exact conversion
q1++;
}
// return (C1 * 10^scale) * 10^(x_exp - scale)
- // where scale = min (P34-q1, x_exp-y_exp)
+ // where scale = min (P34-q1, x_exp-y_exp)
scale = P34 - q1;
ind = (x_exp - y_exp) >> 49;
if (ind < scale)
if (scale == 0) {
res.w[1] = x.w[1];
res.w[0] = x.w[0];
- } else if (q1 <= 19) { // x fits in 64 bits
+ } else if (q1 <= 19) { // x fits in 64 bits
if (scale <= 19) { // 10^scale fits in 64 bits
- // 64 x 64 C1_lo * ten2k64[scale]
+ // 64 x 64 C1_lo * ten2k64[scale]
__mul_64x64_to_128MACH (res, C1_lo, ten2k64[scale]);
} else { // 10^scale fits in 128 bits
// 64 x 128 C1_lo * ten2k128[scale - 20]
} else { // x and y are not canonical, not special, and are not zero
// note that the result may still be zero, and then it has to have the
// preferred exponent
- if (x_exp < y_exp) { // if exp_x < exp_y then swap x and y
+ if (x_exp < y_exp) { // if exp_x < exp_y then swap x and y
tmp_sign = x_sign;
tmp_exp = x_exp;
tmp_signif_hi = C1_hi;
// possibly scaled up by 10^(P34-q1)
// an overflow cannot occur in this case (rounding to nearest)
if (q1 < P34) { // scale C1 up by 10^(P34-q1)
- // Note: because delta >= P34+1 it is certain that
+ // Note: because delta >= P34+1 it is certain that
// x_exp - ((UINT64)scale << 49) will stay above e_min
scale = P34 - q1;
if (q1 <= 19) { // C1 fits in 64 bits
C1_hi = C1.w[1];
C1_lo = C1.w[0];
}
- // some special cases arise: if delta = P34 + 1 and C1 = 10^(P34-1)
- // (after scaling) and x_sign != y_sign and C2 > 5*10^(q2-1) =>
+ // some special cases arise: if delta = P34 + 1 and C1 = 10^(P34-1)
+ // (after scaling) and x_sign != y_sign and C2 > 5*10^(q2-1) =>
// subtract 1 ulp
- // Note: do this only for rounding to nearest; for other rounding
+ // Note: do this only for rounding to nearest; for other rounding
// modes the correction will be applied next
if ((rnd_mode == ROUNDING_TO_NEAREST
|| rnd_mode == ROUNDING_TIES_AWAY) && delta == (P34 + 1)
C1_lo = C1_lo - 1;
if (C1_lo == 0xffffffffffffffffull)
C1_hi = C1_hi - 1;
- // if the coefficient is 10^33 - 1 then make it 10^34 - 1 and
+ // if the coefficient is 10^33 - 1 then make it 10^34 - 1 and
// decrease the exponent by 1 (because delta >= P34 + 1 the
// exponent will not become less than e_min)
// 10^33 - 1 = 0x0000314dc6448d9338c15b09ffffffff
// assemble the result
res.w[1] = x_sign | x_exp | C1_hi;
res.w[0] = C1_lo;
- } else { // delta = P34
+ } else { // delta = P34
// in most cases, the smaller operand may be < or = or > 1/2 ulp of the
// larger operand
// however, the case C1 = 10^(q1-1) and x_sign != y_sign is special due
if (q2 <= 19) { // C2 and 5*10^(q2-1) both fit in 64 bits
halfulp64 = midpoint64[q2 - 1]; // 5 * 10^(q2-1)
if (C2_lo < halfulp64) { // n2 < 1/2 ulp (n1)
- // for RN the result is the operand with the larger magnitude,
+ // for RN the result is the operand with the larger magnitude,
// possibly scaled up by 10^(P34-q1)
// an overflow cannot occur in this case (rounding to nearest)
if (q1 < P34) { // scale C1 up by 10^(P34-q1)
C1_lo = C1_lo - 1;
if (C1_lo == 0xffffffffffffffffull)
C1_hi = C1_hi - 1;
- // if the coefficient is 10^33-1 then make it 10^34-1 and
+ // if the coefficient is 10^33-1 then make it 10^34-1 and
// decrease the exponent by 1 (because delta >= P34 + 1 the
// exponent will not become less than e_min)
// 10^33 - 1 = 0x0000314dc6448d9338c15b09ffffffff
// 1 <= q1 <= 19 => 15 <= scale <= 33
if (scale <= 19) { // 10^scale fits in 64 bits
__mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
- } else { // if 20 <= scale <= 33
+ } else { // if 20 <= scale <= 33
// C1 * 10^scale = (C1 * 10^(scale-19)) * 10^19 where
- // (C1 * 10^(scale-19)) fits in 64 bits
+ // (C1 * 10^(scale-19)) fits in 64 bits
C1_lo = C1_lo * ten2k64[scale - 19];
__mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
}
} else { //if 20 <= q1 <= 33=P34-1 then C1 fits only in 128 bits
- // => 1 <= P34 - q1 <= 14 so 10^(P34-q1) fits in 64 bits
+ // => 1 <= P34 - q1 <= 14 so 10^(P34-q1) fits in 64 bits
C1.w[1] = C1_hi;
C1.w[0] = C1_lo;
- // C1 = ten2k64[P34 - q1] * C1
+ // C1 = ten2k64[P34 - q1] * C1
__mul_128x64_to_128 (C1, ten2k64[P34 - q1], C1);
}
x_exp = x_exp - ((UINT64) scale << 49);
}
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- // assemble the result
+ // assemble the result
res.w[1] = x_sign | x_exp | C1_hi;
res.w[0] = C1_lo;
- } else { // if C2_lo > halfulp64 ||
+ } else { // if C2_lo > halfulp64 ||
// (C2_lo == halfulp64 && q1 == P34 && ((C1_lo & 0x1) == 1)), i.e.
// 1/2 ulp(n1) < n2 < 1 ulp(n1) or n2 = 1/2 ulp(n1) and C1 odd
// res = x+1 ulp if n1*n2 > 0 and res = x-1 ulp if n1*n2 < 0
if (q1 < P34) { // then 1 ulp = 10^(e1+q1-P34) < 10^e1
// Note: if (q1 == P34) then 1 ulp = 10^(e1+q1-P34) = 10^e1
- // because q1 < P34 we must first replace C1 by
- // C1 * 10^(P34-q1), and must decrease the exponent by
+ // because q1 < P34 we must first replace C1 by
+ // C1 * 10^(P34-q1), and must decrease the exponent by
// (P34-q1) (it will still be at least e_min)
scale = P34 - q1;
if (q1 <= 19) { // C1 fits in 64 bits
// check for rounding overflow
if (C1_hi == 0x0001ed09bead87c0ull
&& C1_lo == 0x378d8e6400000000ull) {
- // C1 = 10^34 => rounding overflow
+ // C1 = 10^34 => rounding overflow
C1_hi = 0x0000314dc6448d93ull;
C1_lo = 0x38c15b0a00000000ull; // 10^33
x_exp = x_exp + EXP_P1;
res.w[1] = x_sign | x_exp | C1_hi;
res.w[0] = C1_lo;
}
- } else { // if q2 >= 20 then 5*10^(q2-1) and C2 (the latter in
+ } else { // if q2 >= 20 then 5*10^(q2-1) and C2 (the latter in
// most cases) fit only in more than 64 bits
halfulp128 = midpoint128[q2 - 20]; // 5 * 10^(q2-1)
if ((C2_hi < halfulp128.w[1])
// 1 <= q1 <= 19 => 15 <= scale <= 33
if (scale <= 19) { // 10^scale fits in 64 bits
__mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
- } else { // if 20 <= scale <= 33
+ } else { // if 20 <= scale <= 33
// C1 * 10^scale = (C1 * 10^(scale-19)) * 10^19 where
- // (C1 * 10^(scale-19)) fits in 64 bits
+ // (C1 * 10^(scale-19)) fits in 64 bits
C1_lo = C1_lo * ten2k64[scale - 19];
__mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
}
} else { //if 20 <= q1 <= 33=P34-1 then C1 fits only in 128 bits
- // => 1 <= P34 - q1 <= 14 so 10^(P34-q1) fits in 64 bits
+ // => 1 <= P34 - q1 <= 14 so 10^(P34-q1) fits in 64 bits
C1.w[1] = C1_hi;
C1.w[0] = C1_lo;
- // C1 = ten2k64[P34 - q1] * C1
+ // C1 = ten2k64[P34 - q1] * C1
__mul_128x64_to_128 (C1, ten2k64[P34 - q1], C1);
}
C1_hi = C1.w[1];
; // the result is already correct
}
}
- // set the inexact flag
+ // set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- // assemble the result
+ // assemble the result
res.w[1] = x_sign | x_exp | C1_hi;
res.w[0] = C1_lo;
} else if ((C2_hi == halfulp128.w[1]
// end case where C1 != 10^(q1-1)
} else { // C1 = 10^(q1-1) and x_sign != y_sign
// instead of C' = (C1 * 10^(e1-e2) + C2)rnd,P34
- // calculate C' = C1 * 10^(e1-e2-x1) + (C2 * 10^(-x1))rnd,P34
+ // calculate C' = C1 * 10^(e1-e2-x1) + (C2 * 10^(-x1))rnd,P34
// where x1 = q2 - 1, 0 <= x1 <= P34 - 1
- // Because C1 = 10^(q1-1) and x_sign != y_sign, C' will have P34
+ // Because C1 = 10^(q1-1) and x_sign != y_sign, C' will have P34
// digits and n = C' * 10^(e2+x1)
// If the result has P34+1 digits, redo the steps above with x1+1
- // If the result has P34-1 digits or less, redo the steps above with
+ // If the result has P34-1 digits or less, redo the steps above with
// x1-1 but only if initially x1 >= 1
// NOTE: these two steps can be improved, e.g we could guess if
- // P34+1 or P34-1 digits will be obtained by adding/subtracting
+ // P34+1 or P34-1 digits will be obtained by adding/subtracting
// just the top 64 bits of the two operands
// The result cannot be zero, and it cannot overflow
x1 = q2 - 1; // 0 <= x1 <= P34-1
// The coefficient of the result is C1 * 10^(e1-e2) + C2 and the
// exponent is e2; either C1 or 10^(e1-e2) may not fit is 64 bits,
// but their product fits with certainty in 128 bits (actually in 113)
- scale = delta - q1 + q2; // scale = (int)(e1 >> 49) - (int)(e2 >> 49)
+ scale = delta - q1 + q2; // scale = (int)(e1 >> 49) - (int)(e2 >> 49)
if (scale >= 20) { // 10^(e1-e2) does not fit in 64 bits, but C1 does
__mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
C1_hi = C1.w[1];
C1_lo = C1.w[0];
} else if (scale >= 1) {
- // if 1 <= scale <= 19 then 10^(e1-e2) fits in 64 bits
+ // if 1 <= scale <= 19 then 10^(e1-e2) fits in 64 bits
if (q1 <= 19) { // C1 fits in 64 bits
__mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
} else { // q1 >= 20
C1_hi = C1.w[1];
C1_lo = C1.w[0];
} else { // if (scale == 0) C1 is unchanged
- C1.w[0] = C1_lo; // C1.w[1] = C1_hi;
+ C1.w[0] = C1_lo; // C1.w[1] = C1_hi;
}
// now add C2
if (x_sign == y_sign) {
res.w[1] = x_sign | y_exp | C1_hi;
res.w[0] = C1_lo;
} else if (delta == P34 - q2) {
- // calculate C' directly; the result may be inexact if it requires
+ // calculate C' directly; the result may be inexact if it requires
// P34+1 decimal digits; in this case the 'cutoff' point for addition
// is at the position of the lsb of C2, so 0 <= e1-e2 <= P34-1
// The coefficient of the result is C1 * 10^(e1-e2) + C2 and the
C1_hi++;
// test for overflow, possible only when C1 >= 10^34
if (C1_hi > 0x0001ed09bead87c0ull || (C1_hi == 0x0001ed09bead87c0ull && C1_lo >= 0x378d8e6400000000ull)) { // C1 >= 10^34
- // in this case q = P34 + 1 and x = q - P34 = 1, so multiply
- // C'' = C'+ 5 = C1 + 5 by k1 ~ 10^(-1) calculated for P34 + 1
+ // in this case q = P34 + 1 and x = q - P34 = 1, so multiply
+ // C'' = C'+ 5 = C1 + 5 by k1 ~ 10^(-1) calculated for P34 + 1
// decimal digits
// Calculate C'' = C' + 1/2 * 10^x
if (C1_lo >= 0xfffffffffffffffbull) { // low half add has carry
ten2m1.w[0] = 0x9999999999999a00ull;
__mul_128x128_to_256 (P256, C1, ten2m1); // P256 = C*, f*
// C* is actually floor(C*) in this case
- // the top Ex = 128 bits of 10^(-1) are
+ // the top Ex = 128 bits of 10^(-1) are
// T* = 0x00199999999999999999999999999999
// if (0 < f* < 10^(-x)) then
- // if floor(C*) is even then C = floor(C*) - logical right
+ // if floor(C*) is even then C = floor(C*) - logical right
// shift; C has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C = floor(C*) - 1 (logical right
// shift; C has p decimal digits, correct by Pr. 1)
BID_SWAP128 (res);
BID_RETURN (res);
}
- // if (0 < f* - 1/2 < 10^(-x)) then
- // the result of the addition is exact
- // else
+ // if (0 < f* - 1/2 < 10^(-x)) then
+ // the result of the addition is exact
+ // else
// the result of the addition is inexact
if (P256.w[1] > 0x8000000000000000ull || (P256.w[1] == 0x8000000000000000ull && P256.w[0] > 0x0ull)) { // the result may be exact
tmp64 = P256.w[1] - 0x8000000000000000ull; // f* - 1/2
is_inexact_gt_midpoint = is_inexact
&& !(P256.w[1] & 0x8000000000000000ull);
}
- // general correction from RN to RA, RM, RP, RZ;
+ // general correction from RN to RA, RM, RP, RZ;
// result uses y_exp
if (rnd_mode != ROUNDING_TO_NEAREST) {
if ((!x_sign
res.w[0] = C1_lo;
} else { // if (delta >= P34 + 1 - q2)
// instead of C' = (C1 * 10^(e1-e2) + C2)rnd,P34
- // calculate C' = C1 * 10^(e1-e2-x1) + (C2 * 10^(-x1))rnd,P34
+ // calculate C' = C1 * 10^(e1-e2-x1) + (C2 * 10^(-x1))rnd,P34
// where x1 = q1 + e1 - e2 - P34, 1 <= x1 <= P34 - 1
// In most cases C' will have P34 digits, and n = C' * 10^(e2+x1)
// If the result has P34+1 digits, redo the steps above with x1+1
- // If the result has P34-1 digits or less, redo the steps above with
+ // If the result has P34-1 digits or less, redo the steps above with
// x1-1 but only if initially x1 >= 1
// NOTE: these two steps can be improved, e.g we could guess if
// P34+1 or P34-1 digits will be obtained by adding/subtracting just
tmp64 = C1.w[0]; // C1.w[1], C1.w[0] contains C1 * 10^(e1-e2-x1)
// now round C2 to q2-x1 decimal digits, where 1<=x1<=q2-1<=P34-1
- // (but if we got here a second time after x1 = x1 - 1, then
+ // (but if we got here a second time after x1 = x1 - 1, then
// x1 >= 0; note that for x1 = 0 C2 is unchanged)
// C2' = C2 + 1/2 * 10^x1 = C2 + 5 * 10^(x1-1)
ind = x1 - 1; // 0 <= ind <= q2-2<=P34-2=32; but note that if x1 = 0
is_midpoint_lt_even = 0;
is_midpoint_gt_even = 0;
}
- // determine inexactness of the rounding of C2* (this may be
- // followed by a second rounding only if we get P34+1
+ // determine inexactness of the rounding of C2* (this may be
+ // followed by a second rounding only if we get P34+1
// decimal digits)
// if (0 < f2* - 1/2 < 10^(-x1)) then
// the result is exact
if (C1.w[0] < tmp64)
C1.w[1]++; // carry
// if the sum has P34+1 digits, i.e. C1>=10^34 redo the calculation
- // with x1=x1+1
+ // with x1=x1+1
if (C1.w[1] > 0x0001ed09bead87c0ull || (C1.w[1] == 0x0001ed09bead87c0ull && C1.w[0] >= 0x378d8e6400000000ull)) { // C1 >= 10^34
// chop off one more digit from the sum, but make sure there is
// no double-rounding error (see table - double rounding logic)
}
}
tmp_inexact = 1; // in all cases
- } else { // the result is not a midpoint
+ } else { // the result is not a midpoint
// determine inexactness of the rounding of C1 (the sum C1+C2*)
// if (0 < f1* - 1/2 < 10^(-1)) then
// the result is exact
}
// if the difference has P34-1 digits or less, i.e. C1 < 10^33 then
// redo the calculation with x1=x1-1;
- // redo the calculation also if C1 = 10^33 and
+ // redo the calculation also if C1 = 10^33 and
// (is_inexact_gt_midpoint or is_midpoint_lt_even);
- // (the last part should have really been
+ // (the last part should have really been
// (is_inexact_lt_midpoint or is_midpoint_gt_even) from
// the rounding of C2, but the position flags have been reversed)
// 10^33 = 0x0000314dc6448d93 0x38c15b0a00000000
}
}
// if the coefficient of the result is 10^34 it means that this
- // must be the second pass, and we are done
+ // must be the second pass, and we are done
if (C1.w[1] == 0x0001ed09bead87c0ull && C1.w[0] == 0x378d8e6400000000ull) { // if C1 = 10^34
C1.w[1] = 0x0000314dc6448d93ull; // C1 = 10^33
C1.w[0] = 0x38c15b0a00000000ull;
x_sign = tmp_sign;
if (x1 >= 1)
y_exp = y_exp + ((UINT64) x1 << 49);
- // x1 = -1 is possible at the end of a second pass when the
- // first pass started with x1 = 1
+ // x1 = -1 is possible at the end of a second pass when the
+ // first pass started with x1 = 1
}
C1_hi = C1.w[1];
C1_lo = C1.w[0];
*pfpsf |= INEXACT_EXCEPTION;
}
} else { // if (-P34 + 1 <= delta <= -1) <=> 1 <= -delta <= P34 - 1
- // NOTE: the following, up to "} else { // if x_sign != y_sign
+ // NOTE: the following, up to "} else { // if x_sign != y_sign
// the result is exact" is identical to "else if (delta == P34 - q2) {"
// from above; also, the code is not symmetric: a+b and b+a may take
- // different paths (need to unify eventually!)
- // calculate C' = C2 + C1 * 10^(e1-e2) directly; the result may be
- // inexact if it requires P34 + 1 decimal digits; in either case the
+ // different paths (need to unify eventually!)
+ // calculate C' = C2 + C1 * 10^(e1-e2) directly; the result may be
+ // inexact if it requires P34 + 1 decimal digits; in either case the
// 'cutoff' point for addition is at the position of the lsb of C2
// The coefficient of the result is C1 * 10^(e1-e2) + C2 and the
// exponent is e2; either C1 or 10^(e1-e2) may not fit is 64 bits,
C1_hi++;
// test for overflow, possible only when C1 >= 10^34
if (C1_hi > 0x0001ed09bead87c0ull || (C1_hi == 0x0001ed09bead87c0ull && C1_lo >= 0x378d8e6400000000ull)) { // C1 >= 10^34
- // in this case q = P34 + 1 and x = q - P34 = 1, so multiply
- // C'' = C'+ 5 = C1 + 5 by k1 ~ 10^(-1) calculated for P34 + 1
+ // in this case q = P34 + 1 and x = q - P34 = 1, so multiply
+ // C'' = C'+ 5 = C1 + 5 by k1 ~ 10^(-1) calculated for P34 + 1
// decimal digits
// Calculate C'' = C' + 1/2 * 10^x
if (C1_lo >= 0xfffffffffffffffbull) { // low half add has carry
ten2m1.w[0] = 0x9999999999999a00ull;
__mul_128x128_to_256 (P256, C1, ten2m1); // P256 = C*, f*
// C* is actually floor(C*) in this case
- // the top Ex = 128 bits of 10^(-1) are
+ // the top Ex = 128 bits of 10^(-1) are
// T* = 0x00199999999999999999999999999999
// if (0 < f* < 10^(-x)) then
- // if floor(C*) is even then C = floor(C*) - logical right
+ // if floor(C*) is even then C = floor(C*) - logical right
// shift; C has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C = floor(C*) - 1 (logical right
// shift; C has p decimal digits, correct by Pr. 1)
BID_SWAP128 (res);
BID_RETURN (res);
}
- // if (0 < f* - 1/2 < 10^(-x)) then
- // the result of the addition is exact
- // else
+ // if (0 < f* - 1/2 < 10^(-x)) then
+ // the result of the addition is exact
+ // else
// the result of the addition is inexact
if (P256.w[1] > 0x8000000000000000ull || (P256.w[1] == 0x8000000000000000ull && P256.w[0] > 0x0ull)) { // the result may be exact
tmp64 = P256.w[1] - 0x8000000000000000ull; // f* - 1/2
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered, rather than
+ // if either number is NAN, the comparison is unordered, rather than
// equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
res = 0;
BID_RETURN (res);
}
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y!=pos_infinity
else {
res = (((y.w[1] & MASK_INF) != MASK_INF)
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of the
+ // if exponents are the same, then we have a simple comparison of the
// significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
- // rather than
+ // if either number is NAN, the comparison is unordered,
+ // rather than
// equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
res = 0;
BID_RETURN (res);
}
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y!=pos_infinity
else {
res = (((y.w[1] & MASK_INF) != MASK_INF)
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of the
+ // if exponents are the same, then we have a simple comparison of the
// significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of the
+ // if exponents are the same, then we have a simple comparison of the
// significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
res = 1;
BID_RETURN (res);
}
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y!=pos_infinity
else {
res = (((y.w[1] & MASK_INF) == MASK_INF)
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of the
+ // if exponents are the same, then we have a simple comparison of the
// significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1]) || (sig_x.w[1] == sig_y.w[1] &&
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// adjust the y significand upwards
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_y);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_x.w[1]
&& (sig_n_prime192.w[0] == sig_x.w[0])) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x.w[1] & MASK_NAN) == MASK_NAN)
|| ((y.w[1] & MASK_NAN) == MASK_NAN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF X IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_x.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_x.w[1] == 0x0001ed09bead87c0ull)
sig_y.w[0] = y.w[0];
// CHECK IF Y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((sig_y.w[1] > 0x0001ed09bead87c0ull)
|| ((sig_y.w[1] == 0x0001ed09bead87c0ull)
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || ((sig_x.w[1] == 0) && (sig_x.w[0] == 0))) {
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison
+ // if exponents are the same, then we have a simple comparison
// of the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN));
BID_RETURN (res);
}
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
//else { //128 by 64 bit multiply -> 192 bits
__mul_64x128_to192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
}
// x is Infinity?
if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
- // check if y is Inf.
+ // check if y is Inf.
if (((y.w[1] & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
}
// y is NaN?
if (((y.w[1] & 0x7c00000000000000ull) != 0x7c00000000000000ull))
- // return NaN
+ // return NaN
{
// return +/-Inf
res.w[1] = ((x.w[1] ^ y.w[1]) & 0x8000000000000000ull) |
if (((x) & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if ((((y) & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
if ((x & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if (((y.w[1] & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
}
// x is Infinity?
if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
- // check if y is Inf.
+ // check if y is Inf.
if (((y & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
}
// y is NaN?
if (((y & 0x7c00000000000000ull) != 0x7c00000000000000ull))
- // return NaN
+ // return NaN
{
// return +/-Inf
res.w[1] = ((x.w[1] ^ y) & 0x8000000000000000ull) |
<http://www.gnu.org/licenses/>. */
/*****************************************************************************
- *
+ *
* BID128 fma x * y + z
- *
+ *
****************************************************************************/
#include "bid_internal.h"
UINT64 C_hi, C_lo;
// general correction from RN to RA, RM, RP, RZ
- // Note: if the result is negative, then is_inexact_lt_midpoint,
- // is_inexact_gt_midpoint, is_midpoint_lt_even, and is_midpoint_gt_even
- // have to be considered as if determined for the absolute value of the
+ // Note: if the result is negative, then is_inexact_lt_midpoint,
+ // is_inexact_gt_midpoint, is_midpoint_lt_even, and is_midpoint_gt_even
+ // have to be considered as if determined for the absolute value of the
// result (so they seem to be reversed)
if (is_inexact_lt_midpoint || is_inexact_gt_midpoint ||
C_hi = res.w[1] & MASK_COEFF;
C_lo = res.w[0];
if ((!sign && ((rnd_mode == ROUNDING_UP && is_inexact_lt_midpoint) ||
- ((rnd_mode == ROUNDING_TIES_AWAY || rnd_mode == ROUNDING_UP) &&
- is_midpoint_gt_even))) ||
+ ((rnd_mode == ROUNDING_TIES_AWAY || rnd_mode == ROUNDING_UP) &&
+ is_midpoint_gt_even))) ||
(sign && ((rnd_mode == ROUNDING_DOWN && is_inexact_lt_midpoint) ||
- ((rnd_mode == ROUNDING_TIES_AWAY || rnd_mode == ROUNDING_DOWN) &&
+ ((rnd_mode == ROUNDING_TIES_AWAY || rnd_mode == ROUNDING_DOWN) &&
is_midpoint_gt_even)))) {
// C = C + 1
C_lo = C_lo + 1;
if (C_lo == 0xffffffffffffffffull)
C_hi--;
// check if we crossed into the lower decade
- if (C_hi == 0x0000314dc6448d93ull && C_lo == 0x38c15b09ffffffffull) {
+ if (C_hi == 0x0000314dc6448d93ull && C_lo == 0x38c15b09ffffffffull) {
// C = 10^33 - 1
if (exp > 0) {
C_hi = 0x0001ed09bead87c0ull; // 10^34 - 1
__mul_128x128_to_256 (R256, P128, C3);
} else if (scale <= 38) { // 10^scale fits in 128 bits
__mul_128x128_to_256 (R256, ten2k128[scale - 20], C3);
- } else if (scale <= 57) { // 39 <= scale <= 57
+ } else if (scale <= 57) { // 39 <= scale <= 57
// 10^scale fits in 192 bits but C3 * 10^scale fits in 223 or 230 bits
- // (10^67 has 223 bits; 10^69 has 230 bits);
- // must split the computation:
+ // (10^67 has 223 bits; 10^69 has 230 bits);
+ // must split the computation:
// 10^scale * C3 = 10*38 * 10^(scale-38) * C3 where 10^38 takes 127
// bits and so 10^(scale-38) * C3 fits in 128 bits with certainty
// Note that 1 <= scale - 38 <= 19 => 10^(scale-38) fits in 64 bits
} else { // 58 <= scale <= 66
// 10^scale takes between 193 and 220 bits,
// and C3 * 10^scale fits in 223 bits (10^67/10^69 has 223/230 bits)
- // must split the computation:
+ // must split the computation:
// 10^scale * C3 = 10*38 * 10^(scale-38) * C3 where 10^38 takes 127
- // bits and so 10^(scale-38) * C3 fits in 128 bits with certainty
+ // bits and so 10^(scale-38) * C3 fits in 128 bits with certainty
// Note that 20 <= scale - 38 <= 30 => 10^(scale-38) fits in 128 bits
// Calculate first 10^(scale-38) * C3, which fits in 128 bits; because
// 10^(scale-38) takes more than 64 bits, C3 will take less than 64
__mul_64x128_to_128 (R128, C3.w[0], ten2k128[scale - 58]);
- // now calculate 10*38 * 10^(scale-38) * C3
+ // now calculate 10*38 * 10^(scale-38) * C3
__mul_128x128_to_256 (R256, R128, ten2k128[18]);
}
- // C3 * 10^scale is now in R256
+ // C3 * 10^scale is now in R256
- // for Cases (15), (16), (17) C4 > C3 * 10^scale because C4 has at least
- // one extra digit; for Cases (2), (3), (4), (5), or (6) any order is
- // possible
+ // for Cases (15), (16), (17) C4 > C3 * 10^scale because C4 has at least
+ // one extra digit; for Cases (2), (3), (4), (5), or (6) any order is
+ // possible
// add/subtract C4 and C3 * 10^scale; the exponent is e4
if (p_sign == z_sign) { // R256 = C4 + R256
// calculate R256 = C4 + C3 * 10^scale = C4 + R256 which is exact,
// R256 = C3 * 10^scale - C4 = R256 - C4 which is exact,
// but may require rounding
- // compare first R256 = C3 * 10^scale and C4
+ // compare first R256 = C3 * 10^scale and C4
if (R256.w[3] > C4.w[3] || (R256.w[3] == C4.w[3] && R256.w[2] > C4.w[2]) ||
(R256.w[3] == C4.w[3] && R256.w[2] == C4.w[2] && R256.w[1] > C4.w[1]) ||
(R256.w[3] == C4.w[3] && R256.w[2] == C4.w[2] && R256.w[1] == C4.w[1] &&
R256.w[0] >= C4.w[0])) { // C3 * 10^scale >= C4
// calculate R256 = C3 * 10^scale - C4 = R256 - C4, which is exact,
- // but may require rounding
+ // but may require rounding
sub256 (R256, C4, &R256);
- // flip p_sign too, because the result has the sign of z
+ // flip p_sign too, because the result has the sign of z
p_sign = z_sign;
} else { // if C4 > C3 * 10^scale
// calculate R256 = C4 - C3 * 10^scale = C4 - R256, which is exact,
- // but may require rounding
+ // but may require rounding
sub256 (C4, R256, &R256);
}
// if the result is pure zero, the sign depends on the rounding mode
- // (x*y and z had opposite signs)
+ // (x*y and z had opposite signs)
if (R256.w[3] == 0x0ull && R256.w[2] == 0x0ull &&
R256.w[1] == 0x0ull && R256.w[0] == 0x0ull) {
if (rnd_mode != ROUNDING_DOWN)
// the exponent is max (e4, expmin)
if (e4 < -6176)
e4 = expmin;
- // assemble result
+ // assemble result
res.w[1] = p_sign | ((UINT64) (e4 + 6176) << 49);
res.w[0] = 0x0;
*ptrres = res;
e4 = e4 + x0 + incr_exp;
if (rnd_mode != ROUNDING_TO_NEAREST) {
// for RM, RP, RZ, RA apply correction in order to determine tininess
- // but do not save the result; apply the correction to
+ // but do not save the result; apply the correction to
// (-1)^p_sign * significand * 10^0
P128.w[1] = p_sign | 0x3040000000000000ull | R128.w[1];
P128.w[0] = R128.w[0];
}
// at this point we have the result rounded with unbounded exponent in
// res and we know its tininess:
- // res = (-1)^p_sign * significand * 10^e4,
+ // res = (-1)^p_sign * significand * 10^e4,
// where q (significand) = ind <= p34
// Note: res is correct only if expmin <= e4 <= expmax
} else if (x0 == ind) { // 1 <= x0 = ind <= p34 = 34
// this is <, =, or > 1/2 ulp
// compare the ind-digit value in the significand of res with
- // 1/2 ulp = 5*10^(ind-1), i.e. determine whether it is
+ // 1/2 ulp = 5*10^(ind-1), i.e. determine whether it is
// less than, equal to, or greater than 1/2 ulp (significand of res)
R128.w[1] = res.w[1] & MASK_COEFF;
R128.w[0] = res.w[0];
is_inexact_gt_midpoint = 1;
}
} else { // if (ind <= 38) {
- if (R128.w[1] < midpoint128[ind - 20].w[1] ||
- (R128.w[1] == midpoint128[ind - 20].w[1] &&
+ if (R128.w[1] < midpoint128[ind - 20].w[1] ||
+ (R128.w[1] == midpoint128[ind - 20].w[1] &&
R128.w[0] < midpoint128[ind - 20].w[0])) { // < 1/2 ulp
lt_half_ulp = 1;
is_inexact_lt_midpoint = 1;
- } else if (R128.w[1] == midpoint128[ind - 20].w[1] &&
+ } else if (R128.w[1] == midpoint128[ind - 20].w[1] &&
R128.w[0] == midpoint128[ind - 20].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
is_midpoint_gt_even = 1;
res.w[1] =
p_sign | ((UINT64) (e4 + 6176) << 49) | (res.w[1] & MASK_COEFF);
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// is not possible in Cases (2)-(6) or (15)-(17) which may get here
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res.w[0]++;
is_inexact_gt_midpoint = 1;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint && !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
UINT256 R256;
// the following are based on the table of special cases for fma; the NaN
- // behavior is similar to that of the IA-64 Architecture fma
+ // behavior is similar to that of the IA-64 Architecture fma
// identify cases where at least one operand is NaN
z.w[1] = z.w[1] & 0xffffc00000000000ull;
z.w[0] = 0x0ull;
}
- if ((z.w[1] & MASK_SNAN) == MASK_SNAN) { // z is SNAN
- // set invalid flag
+ if ((z.w[1] & MASK_SNAN) == MASK_SNAN) { // z is SNAN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (z)
+ // return quiet (z)
res.w[1] = z.w[1] & 0xfc003fffffffffffull; // clear out also G[6]-G[16]
res.w[0] = z.w[0];
- } else { // z is QNaN
- // return z
+ } else { // z is QNaN
+ // return z
res.w[1] = z.w[1] & 0xfc003fffffffffffull; // clear out G[6]-G[16]
res.w[0] = z.w[0];
// if x = SNaN signal invalid exception
x.w[1] = x.w[1] & 0xffffc00000000000ull;
x.w[0] = 0x0ull;
}
- if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
- // set invalid flag
+ if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (x)
+ // return quiet (x)
res.w[1] = x.w[1] & 0xfc003fffffffffffull; // clear out also G[6]-G[16]
res.w[0] = x.w[0];
- } else { // x is QNaN
- // return x
+ } else { // x is QNaN
+ // return x
res.w[1] = x.w[1] & 0xfc003fffffffffffull; // clear out G[6]-G[16]
res.w[0] = x.w[0];
}
// x is non-canonical if coefficient is larger than 10^34 -1
C1.w[1] = 0;
C1.w[0] = 0;
- } else { // canonical
+ } else { // canonical
;
}
}
// non-canonical
y_exp = (y.w[1] << 2) & MASK_EXP; // biased and shifted left 49 bits
C2.w[1] = 0; // significand high
- C2.w[0] = 0; // significand low
+ C2.w[0] = 0; // significand low
} else { // G0_G1 != 11
y_exp = y.w[1] & MASK_EXP; // biased and shifted left 49 bits
if (C2.w[1] > 0x0001ed09bead87c0ull ||
// non-canonical
z_exp = (z.w[1] << 2) & MASK_EXP; // biased and shifted left 49 bits
C3.w[1] = 0; // significand high
- C3.w[0] = 0; // significand low
+ C3.w[0] = 0; // significand low
} else { // G0_G1 != 11
z_exp = z.w[1] & MASK_EXP; // biased and shifted left 49 bits
if (C3.w[1] > 0x0001ed09bead87c0ull ||
res.w[1] = z_sign | MASK_INF;
res.w[0] = 0x0;
} else {
- // return QNaN Indefinite
+ // return QNaN Indefinite
res.w[1] = 0x7c00000000000000ull;
res.w[0] = 0x0000000000000000ull;
// set invalid flag
if (scale == 0) {
res.w[1] = z.w[1]; // & MASK_COEFF, which is redundant
res.w[0] = z.w[0];
- } else if (q3 <= 19) { // z fits in 64 bits
+ } else if (q3 <= 19) { // z fits in 64 bits
if (scale <= 19) { // 10^scale fits in 64 bits
// 64 x 64 C3.w[0] * ten2k64[scale]
__mul_64x64_to_128MACH (res, C3.w[0], ten2k64[scale]);
// 64 x 128 C3.w[0] * ten2k128[scale - 20]
__mul_128x64_to_128 (res, C3.w[0], ten2k128[scale - 20]);
}
- } else { // z fits in 128 bits, but 10^scale must fit in 64 bits
+ } else { // z fits in 128 bits, but 10^scale must fit in 64 bits
// 64 x 128 ten2k64[scale] * C3
__mul_128x64_to_128 (res, ten2k64[scale], C3);
}
} else {
; // continue with x = f, y = f, z = 0 or x = f, y = f, z = f
}
- e1 = (x_exp >> 49) - 6176; // unbiased exponent of x
- e2 = (y_exp >> 49) - 6176; // unbiased exponent of y
+ e1 = (x_exp >> 49) - 6176; // unbiased exponent of x
+ e2 = (y_exp >> 49) - 6176; // unbiased exponent of y
e3 = (z_exp >> 49) - 6176; // unbiased exponent of z
e4 = e1 + e2; // unbiased exponent of the exact x * y
e4 = e4 + 1;
if (q4 + e4 == expmin + p34) *pfpsf |= (INEXACT_EXCEPTION | UNDERFLOW_EXCEPTION);
}
- // res is now the coefficient of the result rounded to the destination
+ // res is now the coefficient of the result rounded to the destination
// precision, with unbounded exponent; the exponent is e4; q4=digits(res)
} else { // if (q4 <= p34)
- // C4 * 10^e4 is the result rounded to the destination precision, with
+ // C4 * 10^e4 is the result rounded to the destination precision, with
// unbounded exponent (which is exact)
if ((q4 + e4 <= p34 + expmax) && (e4 > expmax)) {
res.w[1] = C4.w[1];
res.w[0] = C4.w[0];
}
- // res is the coefficient of the result rounded to the destination
- // precision, with unbounded exponent (it has q4 digits); the exponent
+ // res is the coefficient of the result rounded to the destination
+ // precision, with unbounded exponent (it has q4 digits); the exponent
// is e4 (exact result)
}
}
}
}
- e4 = e4 + x0; // expmin
+ e4 = e4 + x0; // expmin
} else if (x0 == q4) {
// the second rounding is for 0.d(0)d(1)...d(q4-1) * 10^emin
// determine relationship with 1/2 ulp
is_inexact_gt_midpoint = 1;
}
} else { // if (q4 <= 34)
- if (res.w[1] < midpoint128[q4 - 20].w[1] ||
- (res.w[1] == midpoint128[q4 - 20].w[1] &&
+ if (res.w[1] < midpoint128[q4 - 20].w[1] ||
+ (res.w[1] == midpoint128[q4 - 20].w[1] &&
res.w[0] < midpoint128[q4 - 20].w[0])) { // < 1/2 ulp
lt_half_ulp = 1;
is_inexact_lt_midpoint = 1;
- } else if (res.w[1] == midpoint128[q4 - 20].w[1] &&
+ } else if (res.w[1] == midpoint128[q4 - 20].w[1] &&
res.w[0] == midpoint128[q4 - 20].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
is_midpoint_gt_even = 1;
is_inexact_lt_midpoint = 1;
}
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// not possible for f * f + 0
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res.w[0]++;
is_inexact_gt_midpoint = 1;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint && !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
; // leave res unchanged
} else if (q4 <= 19) { // x * y fits in 64 bits
if (scale <= 19) { // 10^scale fits in 64 bits
- // 64 x 64 C3.w[0] * ten2k64[scale]
+ // 64 x 64 C3.w[0] * ten2k64[scale]
__mul_64x64_to_128MACH (res, C3.w[0], ten2k64[scale]);
- } else { // 10^scale fits in 128 bits
+ } else { // 10^scale fits in 128 bits
// 64 x 128 C3.w[0] * ten2k128[scale - 20]
__mul_128x64_to_128 (res, C3.w[0], ten2k128[scale - 20]);
}
res.w[1] = p_sign | (p_exp & MASK_EXP) | res.w[1];
} else { // x * y fits in 128 bits, but 10^scale must fit in 64 bits
- // 64 x 128 ten2k64[scale] * C3
+ // 64 x 128 ten2k64[scale] * C3
__mul_128x64_to_128 (res, ten2k64[scale], C3);
res.w[1] = p_sign | (p_exp & MASK_EXP) | res.w[1];
}
res.w[1] = z_sign | ((UINT64) (e3 + 6176) << 49) | C3.w[1];
res.w[0] = C3.w[0];
}
-
+
// use the following to avoid double rounding errors when operating on
// mixed formats in rounding to nearest, and for correcting the result
// if not rounding to nearest
if (q4 == 1) {
R64 = C4.w[0];
} else {
- // if q4 > 1 then truncate C4 from q4 digits to 1 digit;
+ // if q4 > 1 then truncate C4 from q4 digits to 1 digit;
// x = q4-1, 1 <= x <= 67 and check if this operation is exact
if (q4 <= 18) { // 2 <= q4 <= 18
round64_2_18 (q4, q4 - 1, C4.w[0], &R64, &incr_exp,
// endif
// endif
// endif
- // endif
- if ((e3 == expmin && (q3 + scale) < p34) ||
- (e3 == expmin && (q3 + scale) == p34 &&
+ // endif
+ if ((e3 == expmin && (q3 + scale) < p34) ||
+ (e3 == expmin && (q3 + scale) == p34 &&
(res.w[1] & MASK_COEFF) == 0x0000314dc6448d93ull && // 10^33_high
res.w[0] == 0x38c15b0a00000000ull && // 10^33_low
z_sign != p_sign)) {
e3 = e3 - scale;
// now z_sign, z_exp, and res correspond to a z scaled to p34 = 34 digits
- // determine whether x * y is less than, equal to, or greater than
+ // determine whether x * y is less than, equal to, or greater than
// 1/2 ulp (z)
if (q4 <= 19) {
if (C4.w[0] < midpoint64[q4 - 1]) { // < 1/2 ulp
gt_half_ulp = 1;
}
} else if (q4 <= 38) {
- if (C4.w[2] == 0 && (C4.w[1] < midpoint128[q4 - 20].w[1] ||
- (C4.w[1] == midpoint128[q4 - 20].w[1] &&
+ if (C4.w[2] == 0 && (C4.w[1] < midpoint128[q4 - 20].w[1] ||
+ (C4.w[1] == midpoint128[q4 - 20].w[1] &&
C4.w[0] < midpoint128[q4 - 20].w[0]))) { // < 1/2 ulp
lt_half_ulp = 1;
- } else if (C4.w[2] == 0 && C4.w[1] == midpoint128[q4 - 20].w[1] &&
+ } else if (C4.w[2] == 0 && C4.w[1] == midpoint128[q4 - 20].w[1] &&
C4.w[0] == midpoint128[q4 - 20].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
} else { // > 1/2 ulp
gt_half_ulp = 1;
}
} else if (q4 <= 58) {
- if (C4.w[3] == 0 && (C4.w[2] < midpoint192[q4 - 39].w[2] ||
- (C4.w[2] == midpoint192[q4 - 39].w[2] &&
- C4.w[1] < midpoint192[q4 - 39].w[1]) ||
- (C4.w[2] == midpoint192[q4 - 39].w[2] &&
- C4.w[1] == midpoint192[q4 - 39].w[1] &&
+ if (C4.w[3] == 0 && (C4.w[2] < midpoint192[q4 - 39].w[2] ||
+ (C4.w[2] == midpoint192[q4 - 39].w[2] &&
+ C4.w[1] < midpoint192[q4 - 39].w[1]) ||
+ (C4.w[2] == midpoint192[q4 - 39].w[2] &&
+ C4.w[1] == midpoint192[q4 - 39].w[1] &&
C4.w[0] < midpoint192[q4 - 39].w[0]))) { // < 1/2 ulp
lt_half_ulp = 1;
- } else if (C4.w[3] == 0 && C4.w[2] == midpoint192[q4 - 39].w[2] &&
- C4.w[1] == midpoint192[q4 - 39].w[1] &&
+ } else if (C4.w[3] == 0 && C4.w[2] == midpoint192[q4 - 39].w[2] &&
+ C4.w[1] == midpoint192[q4 - 39].w[1] &&
C4.w[0] == midpoint192[q4 - 39].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
} else { // > 1/2 ulp
gt_half_ulp = 1;
}
} else {
- if (C4.w[3] < midpoint256[q4 - 59].w[3] ||
- (C4.w[3] == midpoint256[q4 - 59].w[3] &&
- C4.w[2] < midpoint256[q4 - 59].w[2]) ||
- (C4.w[3] == midpoint256[q4 - 59].w[3] &&
- C4.w[2] == midpoint256[q4 - 59].w[2] &&
- C4.w[1] < midpoint256[q4 - 59].w[1]) ||
- (C4.w[3] == midpoint256[q4 - 59].w[3] &&
- C4.w[2] == midpoint256[q4 - 59].w[2] &&
- C4.w[1] == midpoint256[q4 - 59].w[1] &&
+ if (C4.w[3] < midpoint256[q4 - 59].w[3] ||
+ (C4.w[3] == midpoint256[q4 - 59].w[3] &&
+ C4.w[2] < midpoint256[q4 - 59].w[2]) ||
+ (C4.w[3] == midpoint256[q4 - 59].w[3] &&
+ C4.w[2] == midpoint256[q4 - 59].w[2] &&
+ C4.w[1] < midpoint256[q4 - 59].w[1]) ||
+ (C4.w[3] == midpoint256[q4 - 59].w[3] &&
+ C4.w[2] == midpoint256[q4 - 59].w[2] &&
+ C4.w[1] == midpoint256[q4 - 59].w[1] &&
C4.w[0] < midpoint256[q4 - 59].w[0])) { // < 1/2 ulp
lt_half_ulp = 1;
- } else if (C4.w[3] == midpoint256[q4 - 59].w[3] &&
- C4.w[2] == midpoint256[q4 - 59].w[2] &&
- C4.w[1] == midpoint256[q4 - 59].w[1] &&
+ } else if (C4.w[3] == midpoint256[q4 - 59].w[3] &&
+ C4.w[2] == midpoint256[q4 - 59].w[2] &&
+ C4.w[1] == midpoint256[q4 - 59].w[1] &&
C4.w[0] == midpoint256[q4 - 59].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
} else { // > 1/2 ulp
if (res.w[0] == 0x0ull)
res.w[1]++;
// check for rounding overflow, when coeff == 10^34
- if ((res.w[1] & MASK_COEFF) == 0x0001ed09bead87c0ull &&
+ if ((res.w[1] & MASK_COEFF) == 0x0001ed09bead87c0ull &&
res.w[0] == 0x378d8e6400000000ull) { // coefficient = 10^34
e3 = e3 + 1;
// coeff = 10^33
is_inexact_gt_midpoint = 1; // if (z_sign), as if for absolute value
}
} else { // if (eq_half_ulp && !(res.w[0] & 0x01))
- // leave unchanged
+ // leave unchanged
res.w[1] = z_sign | (z_exp & MASK_EXP) | res.w[1];
is_midpoint_gt_even = 1; // if (z_sign), as if for absolute value
}
}
} else { // if (p_sign != z_sign)
// consider two cases, because C3 * 10^scale = 10^33 is a special case
- if (res.w[1] != 0x0000314dc6448d93ull ||
+ if (res.w[1] != 0x0000314dc6448d93ull ||
res.w[0] != 0x38c15b0a00000000ull) { // C3 * 10^scale != 10^33
if (lt_half_ulp) {
res.w[1] = z_sign | (z_exp & MASK_EXP) | res.w[1];
} else { // if C3 * 10^scale = 10^33
e3 = (z_exp >> 49) - 6176;
if (e3 > expmin) {
- // the result is exact if exp > expmin and C4 = d*10^(q4-1),
+ // the result is exact if exp > expmin and C4 = d*10^(q4-1),
// where d = 1, 2, 3, ..., 9; it could be tiny too, but exact
if (q4 == 1) {
// if q4 = 1 the result is exact
e3 = e3 - 1;
res.w[1] = z_sign | (z_exp & MASK_EXP) | res.w[1];
} else {
- // if q4 > 1 then truncate C4 from q4 digits to 1 digit;
+ // if q4 > 1 then truncate C4 from q4 digits to 1 digit;
// x = q4-1, 1 <= x <= 67 and check if this operation is exact
if (q4 <= 18) { // 2 <= q4 <= 18
round64_2_18 (q4, q4 - 1, C4.w[0], &R64, &incr_exp,
z_sign | (z_exp & MASK_EXP) | 0x0001ed09bead87c0ull;
res.w[0] = 0x378d8e6400000000ull - R64;
} else {
- // We want R64 to be the top digit of C4, but we actually
+ // We want R64 to be the top digit of C4, but we actually
// obtained (C4 * 10^(-q4+1))RN; a correction may be needed,
// because the top digit is (C4 * 10^(-q4+1))RZ
// however, if incr_exp = 1 then R64 = 10 with certainty
// tininess is C4 > 050...0 [q4 digits] which is met because
// the msd of C4 is not zero)
// the result is tiny and inexact in all rounding modes;
- // it is either 100...0 or 0999...9 (use lt_half_ulp, eq_half_ulp,
+ // it is either 100...0 or 0999...9 (use lt_half_ulp, eq_half_ulp,
// gt_half_ulp to calculate)
// if (lt_half_ulp || eq_half_ulp) res = 10^33 stays unchanged
if ((q3 <= delta && delta < p34 && p34 < delta + q4) || // Case (2)
(delta < q3 && p34 < delta + q4)) { // Case (4)
// round first the sum x * y + z with unbounded exponent
- // scale C3 up by scale = p34 - q3, 1 <= scale <= p34-1,
+ // scale C3 up by scale = p34 - q3, 1 <= scale <= p34-1,
// 1 <= scale <= 33
// calculate res = C3 * 10^scale
scale = p34 - q3;
x0 = delta + q4 - p34;
} else if (delta + q4 < q3) { // Case (6)
// make Case (6) look like Case (3) or Case (5) with scale = 0
- // by scaling up C4 by 10^(q3 - delta - q4)
+ // by scaling up C4 by 10^(q3 - delta - q4)
scale = q3 - delta - q4; // 1 <= scale <= 33
if (q4 <= 19) { // 1 <= scale <= 19; C4 fits in 64 bits
if (scale <= 19) { // 10^scale fits in 64 bits
// e4 does not need adjustment, as it is not used from this point on
scale = 0;
x0 = 0;
- // now Case (6) looks like Case (3) or Case (5) with scale = 0
+ // now Case (6) looks like Case (3) or Case (5) with scale = 0
} else { // if Case (3) or Case (5)
// Note: Case (3) is similar to Case (2), but scale differs and the
// result is exact, unless it is tiny (so x0 = 0 when calculating the
// R256.w[3] and R256.w[2] are always 0
if (incr_exp) {
// R256 = 10^(q4-x0), 1 <= q4 - x0 <= q4 - 25, 1 <= q4 - x0 <= 43
- if (q4 - x0 <= 19) { // 1 <= q4 - x0 <= 19
+ if (q4 - x0 <= 19) { // 1 <= q4 - x0 <= 19
R256.w[0] = ten2k64[q4 - x0];
// R256.w[1] stays 0
// R256.w[2] stays 0
// R256.w[3] stays 0
- } else { // 20 <= q4 - x0 <= 33
+ } else { // 20 <= q4 - x0 <= 33
R256.w[0] = ten2k128[q4 - x0 - 20].w[0];
R256.w[1] = ten2k128[q4 - x0 - 20].w[1];
// R256.w[2] stays 0
&is_inexact_gt_midpoint);
// incr_exp is 0 with certainty in this case
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// not possible in Cases (2)-(6) or (15)-(17) which may get here
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res.w[0]++;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint
&& !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
res.w[0]--;
if (res.w[0] == 0xffffffffffffffffull)
res.w[1]--;
- // if the result is pure zero, the sign depends on the rounding
+ // if the result is pure zero, the sign depends on the rounding
// mode (x*y and z had opposite signs)
if (res.w[1] == 0x0ull && res.w[0] == 0x0ull) {
if (rnd_mode != ROUNDING_DOWN)
res.w[1] = res.w[1] - R128.w[1];
if (res.w[0] > tmp64)
res.w[1]--; // borrow
- // if res < 10^33 and exp > expmin need to decrease x0 and
+ // if res < 10^33 and exp > expmin need to decrease x0 and
// increase scale by 1
if (e3 > expmin && ((res.w[1] < 0x0000314dc6448d93ull ||
(res.w[1] == 0x0000314dc6448d93ull &&
goto case2_repeat;
}
// else this is the result rounded with unbounded exponent;
- // because the result has opposite sign to that of C4 which was
+ // because the result has opposite sign to that of C4 which was
// rounded, need to change the rounding indicators
if (is_inexact_lt_midpoint) {
is_inexact_lt_midpoint = 0;
res.w[0]--;
if (res.w[0] == 0xffffffffffffffffull)
res.w[1]--;
- // if the result is pure zero, the sign depends on the rounding
+ // if the result is pure zero, the sign depends on the rounding
// mode (x*y and z had opposite signs)
if (res.w[1] == 0x0ull && res.w[0] == 0x0ull) {
if (rnd_mode != ROUNDING_DOWN)
is_tiny = 1;
}
if (((res.w[1] & 0x7fffffffffffffffull) == 0x0000314dc6448d93ull) &&
- (res.w[0] == 0x38c15b0a00000000ull) && // 10^33*10^-6176
+ (res.w[0] == 0x38c15b0a00000000ull) && // 10^33*10^-6176
(z_sign != p_sign)) is_tiny = 1;
} else if (e3 < expmin) {
// the result is tiny, so we must truncate more of res
// at this point ind >= x0; because delta >= 2 on this path, the case
// ind = x0 can occur only in Case (2) or case (3), when C3 has one
- // digit (q3 = 1) equal to 1 (C3 = 1), e3 is expmin (e3 = expmin),
- // the signs of x * y and z are opposite, and through cancellation
+ // digit (q3 = 1) equal to 1 (C3 = 1), e3 is expmin (e3 = expmin),
+ // the signs of x * y and z are opposite, and through cancellation
// the most significant decimal digit in res has the weight
// 10^(emin-1); however, it is clear that in this case the most
// significant digit is 9, so the result before rounding is
}
}
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// not possible in Cases (2)-(6) which may get here
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res.w[0]++;
is_inexact_gt_midpoint = 1;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint && !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
}
// now check for significand = 10^34 (may have resulted from going
// back to case2_repeat)
- if (res.w[1] == 0x0001ed09bead87c0ull &&
+ if (res.w[1] == 0x0001ed09bead87c0ull &&
res.w[0] == 0x378d8e6400000000ull) { // if res = 10^34
res.w[1] = 0x0000314dc6448d93ull; // res = 10^33
res.w[0] = 0x38c15b0a00000000ull;
// we get here only if delta <= 1 in Cases (2), (3), (4), (5), or (6) and
// the signs of x*y and z are opposite; in these cases massive
- // cancellation can occur, so it is better to scale either C3 or C4 and
- // to perform the subtraction before rounding; rounding is performed
- // next, depending on the number of decimal digits in the result and on
+ // cancellation can occur, so it is better to scale either C3 or C4 and
+ // to perform the subtraction before rounding; rounding is performed
+ // next, depending on the number of decimal digits in the result and on
// the exponent value
// Note: overlow is not possible in this case
// this is similar to Cases (15), (16), and (17)
- if (delta + q4 < q3) { // from Case (6)
- // Case (6) with 0<= delta <= 1 is similar to Cases (15), (16), and
+ if (delta + q4 < q3) { // from Case (6)
+ // Case (6) with 0<= delta <= 1 is similar to Cases (15), (16), and
// (17) if we swap (C3, C4), (q3, q4), (e3, e4), (z_sign, p_sign)
// and call add_and_round; delta stays positive
// C4.w[3] = 0 and C4.w[2] = 0, so swap just the low part of C4 with C3
z_sign = p_sign;
p_sign = tmp_sign;
} else { // from Cases (2), (3), (4), (5)
- // In Cases (2), (3), (4), (5) with 0 <= delta <= 1 C3 has to be
- // scaled up by q4 + delta - q3; this is the same as in Cases (15),
+ // In Cases (2), (3), (4), (5) with 0 <= delta <= 1 C3 has to be
+ // scaled up by q4 + delta - q3; this is the same as in Cases (15),
// (16), and (17) if we just change the sign of delta
delta = -delta;
}
if (p_sign == z_sign) {
is_inexact_lt_midpoint = 1;
} else { // if (p_sign != z_sign)
- if (res.w[1] != 0x0000314dc6448d93ull ||
+ if (res.w[1] != 0x0000314dc6448d93ull ||
res.w[0] != 0x38c15b0a00000000ull) { // res != 10^33
is_inexact_gt_midpoint = 1;
} else { // res = 10^33 and exact is a special case
is_inexact_lt_midpoint = 1;
}
} else { // if (20 <= q3 <=34)
- if (C3.w[1] < midpoint128[q3 - 20].w[1] ||
- (C3.w[1] == midpoint128[q3 - 20].w[1] &&
+ if (C3.w[1] < midpoint128[q3 - 20].w[1] ||
+ (C3.w[1] == midpoint128[q3 - 20].w[1] &&
C3.w[0] < midpoint128[q3 - 20].w[0])) { // C3 < 1/2 ulp
// res = 10^33, unchanged
is_inexact_gt_midpoint = 1;
- } else if (C3.w[1] == midpoint128[q3 - 20].w[1] &&
+ } else if (C3.w[1] == midpoint128[q3 - 20].w[1] &&
C3.w[0] == midpoint128[q3 - 20].w[0]) { // C3 = 1/2 ulp
// res = 10^33, unchanged
is_midpoint_lt_even = 1;
res.w[0] = res.w[0] - 1;
if (res.w[0] == 0xffffffffffffffffull)
res.w[1]--;
- // if it is (10^33-1)*10^e4 then the corect result is
+ // if it is (10^33-1)*10^e4 then the corect result is
// (10^34-1)*10(e4-1)
if (res.w[1] == 0x0000314dc6448d93ull &&
res.w[0] == 0x38c15b09ffffffffull) {
(delta < p34 && p34 < q4 && q4 < delta + q3)) { // Case (12)
// round C3 to nearest to q3 - x0 digits, where x0 = e4 - e3,
- // 1 <= x0 <= q3 - 1 <= p34 - 1
+ // 1 <= x0 <= q3 - 1 <= p34 - 1
x0 = e4 - e3; // or x0 = delta + q3 - q4
if (q3 <= 18) { // 2 <= q3 <= 18
round64_2_18 (q3, x0, C3.w[0], &R64, &incr_exp,
__mul_64x128_to_128 (C3, ten2k64[1], P128);
}
e3 = e3 + x0; // this is e4
- // now add/subtract the 256-bit C4 and the new (and shorter) 128-bit C3;
+ // now add/subtract the 256-bit C4 and the new (and shorter) 128-bit C3;
// the result will have the sign of x * y; the exponent is e4
R256.w[3] = 0;
R256.w[2] = 0;
add256 (C4, R256, &R256);
} else { // if (p_sign != z_sign) { // R256 = C4 - R256
sub256 (C4, R256, &R256); // the result cannot be pure zero
- // because the result has opposite sign to that of R256 which was
+ // because the result has opposite sign to that of R256 which was
// rounded, need to change the rounding indicators
lsb = C4.w[0] & 0x01;
if (is_inexact_lt_midpoint) {
}
// determine the number of decimal digits in R256
ind = nr_digits256 (R256); // ind >= p34
- // if R256 is sum, then ind > p34; if R256 is a difference, then
+ // if R256 is sum, then ind > p34; if R256 is a difference, then
// ind >= p34; this means that we can calculate the result rounded to
// the destination precision, with unbounded exponent, starting from R256
// and using the indicators from the rounding of C3 to avoid a double
- // rounding error
+ // rounding error
if (ind < p34) {
;
} else if (ind == p34) {
- // the result rounded to the destination precision with
+ // the result rounded to the destination precision with
// unbounded exponent
// is (-1)^p_sign * R256 * 10^e4
res.w[1] = R256.w[1];
res.w[0] = R128.w[0];
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// (35 digits in all), possibly followed by a number of zeros; this
// not possible in Cases (2)-(6) or (15)-(17) which may get here
// if this is 10^33 - 1 make it 10^34 - 1 and decrement exponent
- if (res.w[1] == 0x0000314dc6448d93ull &&
+ if (res.w[1] == 0x0000314dc6448d93ull &&
res.w[0] == 0x38c15b09ffffffffull) { // 10^33 - 1
res.w[1] = 0x0001ed09bead87c0ull; // 10^34 - 1
res.w[0] = 0x378d8e63ffffffffull;
e4--;
}
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
- // res = res + 1
+ // res = res + 1
res.w[0]++;
if (res.w[0] == 0)
res.w[1]++;
}
} else {
// for RM, RP, RZ, RA apply correction in order to determine tininess
- // but do not save the result; apply the correction to
+ // but do not save the result; apply the correction to
// (-1)^p_sign * res * 10^0
P128.w[1] = p_sign | 0x3040000000000000ull | res.w[1];
P128.w[0] = res.w[0];
// at this point we have the result rounded with unbounded exponent in
// res and we know its tininess:
- // res = (-1)^p_sign * significand * 10^e4,
+ // res = (-1)^p_sign * significand * 10^e4,
// where q (significand) = ind = p34
// Note: res is correct only if expmin <= e4 <= expmax
} else if (x0 == ind) { // 1 <= x0 = ind <= p34 = 34
// this is <, =, or > 1/2 ulp
// compare the ind-digit value in the significand of res with
- // 1/2 ulp = 5*10^(ind-1), i.e. determine whether it is
+ // 1/2 ulp = 5*10^(ind-1), i.e. determine whether it is
// less than, equal to, or greater than 1/2 ulp (significand of res)
R128.w[1] = res.w[1] & MASK_COEFF;
R128.w[0] = res.w[0];
is_inexact_gt_midpoint = 1;
}
} else { // if (ind <= 38)
- if (R128.w[1] < midpoint128[ind - 20].w[1] ||
- (R128.w[1] == midpoint128[ind - 20].w[1] &&
+ if (R128.w[1] < midpoint128[ind - 20].w[1] ||
+ (R128.w[1] == midpoint128[ind - 20].w[1] &&
R128.w[0] < midpoint128[ind - 20].w[0])) { // < 1/2 ulp
lt_half_ulp = 1;
is_inexact_lt_midpoint = 1;
- } else if (R128.w[1] == midpoint128[ind - 20].w[1] &&
+ } else if (R128.w[1] == midpoint128[ind - 20].w[1] &&
R128.w[0] == midpoint128[ind - 20].w[0]) { // = 1/2 ulp
eq_half_ulp = 1;
is_midpoint_gt_even = 1;
p_sign | ((UINT64) (e4 + 6176) << 49) | (res.
w[1] & MASK_COEFF);
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res.w[0]--;
// not possible in this underflow case
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res.w[0]++;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint
&& !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
_EXC_INFO_ARG);
#endif
- if ((rnd_mode == ROUNDING_DOWN) || (rnd_mode == ROUNDING_UP) ||
+ if ((rnd_mode == ROUNDING_DOWN) || (rnd_mode == ROUNDING_UP) ||
(rnd_mode == ROUNDING_TO_ZERO) || // no double rounding error is possible
((res.w[HIGH_128W] & MASK_NAN) == MASK_NAN) || //res=QNaN (cannot be SNaN)
- ((res.w[HIGH_128W] & MASK_ANY_INF) == MASK_INF)) { // result is infinity
+ ((res.w[HIGH_128W] & MASK_ANY_INF) == MASK_INF)) { // result is infinity
#if DECIMAL_CALL_BY_REFERENCE
bid128_to_bid64 (&res1, &res _RND_MODE_ARG _EXC_FLAGS_ARG);
#else
&& ((res1 & MASK_BINARY_SIG1) < 1000000000000000ull)
&& (is_inexact_lt_midpoint0 || is_inexact_gt_midpoint0
|| is_midpoint_lt_even0 || is_midpoint_gt_even0)) {
- // set the inexact flag and the underflow flag
+ // set the inexact flag and the underflow flag
*pfpsf |= (INEXACT_EXCEPTION | UNDERFLOW_EXCEPTION);
} else if (is_inexact_lt_midpoint0 || is_inexact_gt_midpoint0 ||
is_midpoint_lt_even0 || is_midpoint_gt_even0) {
C.w[0] = res.w[LOW_128W];
if ((C.w[1] == 0x0 && C.w[0] == 0x0) || // result is zero
- (unbexp <= (-398 - 35)) || (unbexp >= (369 + 16))) {
+ (unbexp <= (-398 - 35)) || (unbexp >= (369 + 16))) {
// clear under/overflow
#if DECIMAL_CALL_BY_REFERENCE
bid128_to_bid64 (&res1, &res _RND_MODE_ARG _EXC_FLAGS_ARG);
C.w[0] >= nr_digits[nr_bits - 1].threshold_lo))
q++;
}
- // if q > 16, round to nearest even to 16 digits (but for underflow it may
+ // if q > 16, round to nearest even to 16 digits (but for underflow it may
// have to be truncated even more)
if (q > 16) {
x0 = q - 16;
unbexp++;
q = 16; // need to set in case denormalization is necessary
} else {
- // the result does not require a second rounding (and it must have
+ // the result does not require a second rounding (and it must have
// been exact in the first rounding, since q <= 16)
res1 = C.w[0];
}
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
- // res = res - 1
- res1--; // res1 becomes odd
+ // res = res - 1
+ res1--; // res1 becomes odd
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
if (res1 == 0x00038d7ea4c67fffull) { // 10^15 - 1
- res1 = 0x002386f26fc0ffffull; // 10^16 - 1
+ res1 = 0x002386f26fc0ffffull; // 10^16 - 1
unbexp--;
}
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res1++; // res1 becomes odd (so it cannot be 10^16)
is_inexact_gt_midpoint = 1;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint && !is_inexact_gt_midpoint) {
- // if this second rounding was exact the result may still be
+ // if this second rounding was exact the result may still be
// inexact because of the first rounding
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
}
} else if (is_midpoint_gt_even &&
(is_inexact_gt_midpoint0 || is_midpoint_lt_even0)) {
- // pulled up to a midpoint
+ // pulled up to a midpoint
is_inexact_lt_midpoint = 1;
is_inexact_gt_midpoint = 0;
is_midpoint_lt_even = 0;
is_midpoint_gt_even = 0;
} else if (is_midpoint_lt_even &&
(is_inexact_lt_midpoint0 || is_midpoint_gt_even0)) {
- // pulled down to a midpoint
+ // pulled down to a midpoint
is_inexact_lt_midpoint = 0;
is_inexact_gt_midpoint = 1;
is_midpoint_lt_even = 0;
} else {
;
}
- // this is the result rounded correctly to nearest even, with unbounded exp.
+ // this is the result rounded correctly to nearest even, with unbounded exp.
// check for overflow
if (q + unbexp > P16 + expmax16) {
// 10^(unbexp - expmax16) and the product will fit in 16 decimal digits
scale = unbexp - expmax16;
res1 = res1 * ten2k64[scale]; // res1 * 10^scale
- unbexp = expmax16; // unbexp - scale
+ unbexp = expmax16; // unbexp - scale
} else {
; // continue
}
is_inexact_lt_midpoint = 1;
}
// avoid a double rounding error
- if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
+ if ((is_inexact_gt_midpoint0 || is_midpoint_lt_even0) &&
is_midpoint_lt_even) { // double rounding error upward
// res = res - 1
res1--; // res1 becomes odd
is_midpoint_lt_even = 0;
is_inexact_lt_midpoint = 1;
- } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
+ } else if ((is_inexact_lt_midpoint0 || is_midpoint_gt_even0) &&
is_midpoint_gt_even) { // double rounding error downward
// res = res + 1
res1++; // res1 becomes odd
is_inexact_gt_midpoint = 1;
} else if (!is_midpoint_lt_even && !is_midpoint_gt_even &&
!is_inexact_lt_midpoint && !is_inexact_gt_midpoint) {
- // if this rounding was exact the result may still be
+ // if this rounding was exact the result may still be
// inexact because of the previous roundings
if (is_inexact_gt_midpoint0 || is_midpoint_lt_even0) {
is_inexact_gt_midpoint = 1;
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => ignore the exponent
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B => ignore the exponent
// field
// (Any non-canonical # is considered 0)
if ((sig_x.w[1] == 0) && (sig_x.w[0] == 0)) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of
+ // if exponents are the same, then we have a simple comparison of
// the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1])
MASK_SIGN)) ? y : x;
BID_RETURN (res);
}
- // if both components are either bigger or smaller, it is clear what
+ // if both components are either bigger or smaller, it is clear what
// needs to be done
if (sig_x.w[1] >= sig_y.w[1] && sig_x.w[0] >= sig_y.w[0]
&& exp_x > exp_y) {
}
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res = ((((sig_n_prime256.w[3] > 0) || sig_n_prime256.w[2] > 0)
|| (sig_n_prime256.w[1] > sig_y.w[1])
BID_RETURN (res);
}
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res =
(((sig_n_prime192.w[2] > 0) || (sig_n_prime192.w[1] > sig_y.w[1])
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res =
((sig_n_prime256.w[3] != 0 || sig_n_prime256.w[2] != 0
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if ((sig_x.w[1] == 0) && (sig_x.w[0] == 0)) {
|| (sig_x.w[1] == sig_y.w[1]
&& sig_x.w[0] >= sig_y.w[0]))
&& exp_x > exp_y)) {
- // if both components are either bigger or smaller, it is clear what
+ // if both components are either bigger or smaller, it is clear what
// needs to be done; also if the magnitudes are equal
res = y;
BID_RETURN (res);
}
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if positive, return whichever significand is larger
+ // if positive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
BID_RETURN (res);
}
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if positive, return whichever significand is larger
+ // if positive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if positive, return whichever significand is larger
+ // if positive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if ((sig_x.w[1] == 0) && (sig_x.w[0] == 0)) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if exponents are the same, then we have a simple comparison of
+ // if exponents are the same, then we have a simple comparison of
// the significands
if (exp_y == exp_x) {
res = (((sig_x.w[1] > sig_y.w[1]) || (sig_x.w[1] == sig_y.w[1] &&
((x.w[1] & MASK_SIGN) == MASK_SIGN)) ? x : y;
BID_RETURN (res);
}
- // if both components are either bigger or smaller, it is clear what
+ // if both components are either bigger or smaller, it is clear what
// needs to be done
if ((sig_x.w[1] > sig_y.w[1]
|| (sig_x.w[1] == sig_y.w[1] && sig_x.w[0] > sig_y.w[0]))
}
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res = ((((sig_n_prime256.w[3] > 0) || sig_n_prime256.w[2] > 0)
|| (sig_n_prime256.w[1] > sig_y.w[1])
BID_RETURN (res);
}
__mul_64x128_to_192 (sig_n_prime192, ten2k64[diff], sig_x);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res =
(((sig_n_prime192.w[2] > 0) || (sig_n_prime192.w[1] > sig_y.w[1])
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
res =
((sig_n_prime256.w[3] != 0 || sig_n_prime256.w[2] != 0
// ZERO (CASE4)
// some properties:
// (+ZERO == -ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if ((sig_x.w[1] == 0) && (sig_x.w[0] == 0)) {
|| (sig_x.w[1] == sig_y.w[1]
&& sig_x.w[0] >= sig_y.w[0]))
&& exp_x > exp_y)) {
- // if both components are either bigger or smaller, it is clear what
+ // if both components are either bigger or smaller, it is clear what
// needs to be done; also if the magnitudes are equal
res = x;
BID_RETURN (res);
}
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
__mul_128x128_to_256 (sig_n_prime256, sig_x, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_y.w[1]
if (diff > 19) { //128 by 128 bit multiply -> 256 bits
// adjust the y significand upwards
__mul_128x128_to_256 (sig_n_prime256, sig_y, ten2k128[diff - 20]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime256.w[3] == 0 && (sig_n_prime256.w[2] == 0)
&& sig_n_prime256.w[1] == sig_x.w[1]
// x is non-canonical if coefficient is larger than 10^34 -1
C1.w[1] = 0;
C1.w[0] = 0;
- } else { // canonical
+ } else { // canonical
;
}
}
// G0_G1=11 => non-canonical
y_exp = (y.w[HIGH_128W] << 2) & MASK_EXP; // biased and shifted left 49 bits
C2.w[1] = 0; // significand high
- C2.w[0] = 0; // significand low
+ C2.w[0] = 0; // significand low
} else { // G0_G1 != 11
y_exp = y.w[HIGH_128W] & MASK_EXP; // biased and shifted left 49 bits
if (C2.w[1] > 0x0001ed09bead87c0ull ||
p_sign = x_sign ^ y_sign; // sign of the product
true_p_exp = (x_exp >> 49) - 6176 + (y_exp >> 49) - 6176;
- // true_p_exp, p_exp are used only for 0 * 0, 0 * f, or f * 0
+ // true_p_exp, p_exp are used only for 0 * 0, 0 * f, or f * 0
if (true_p_exp < -398)
p_exp = 0; // cannot be less than EXP_MIN
else if (true_p_exp > 369)
// x is non-canonical if coefficient is larger than 10^34 -1
C1.w[1] = 0;
C1.w[0] = 0;
- } else { // canonical
+ } else { // canonical
;
}
}
// G0_G1=11 => non-canonical
y_exp = (y.w[1] << 2) & MASK_EXP; // biased and shifted left 49 bits
C2.w[1] = 0; // significand high
- C2.w[0] = 0; // significand low
+ C2.w[0] = 0; // significand low
} else { // G0_G1 != 11
y_exp = y.w[1] & MASK_EXP; // biased and shifted left 49 bits
if (C2.w[1] > 0x0001ed09bead87c0ull ||
p_sign = x_sign ^ y_sign; // sign of the product
true_p_exp = (x_exp >> 49) - 6176 + (y_exp >> 49) - 6176;
- // true_p_exp, p_exp are used only for 0 * 0, 0 * f, or f * 0
+ // true_p_exp, p_exp are used only for 0 * 0, 0 * f, or f * 0
if (true_p_exp < -6176)
p_exp = 0; // cannot be less than EXP_MIN
else if (true_p_exp > 6111)
ind = exp + 6176;
// C1 = C1 * 10^ind
if (ind <= 19) { // 1 <= P34 - q1 <= 19 <=> 15 <= q1 <= 33
- if (q1 <= 19) { // 64-bit C1, 64-bit 10^ind
+ if (q1 <= 19) { // 64-bit C1, 64-bit 10^ind
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[ind]);
} else { // 20 <= q1 <= 33 => 128-bit C1, 64-bit 10^ind
__mul_128x64_to_128 (C1, ten2k64[ind], C1);
ind = exp + 6176;
// C1 = C1 * 10^ind
if (ind <= 19) { // 1 <= P34 - q1 <= 19 <=> 15 <= q1 <= 33
- if (q1 <= 19) { // 64-bit C1, 64-bit 10^ind
+ if (q1 <= 19) { // 64-bit C1, 64-bit 10^ind
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[ind]);
} else { // 20 <= q1 <= 33 => 128-bit C1, 64-bit 10^ind
__mul_128x64_to_128 (C1, ten2k64[ind], C1);
void
bid128_nextafter (UINT128 * pres, UINT128 * px,
UINT128 *
- py _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ py _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT128 x = *px;
UINT128 y = *py;
#endif
BID_SWAP128 (res);
}
- // if the operand x is finite but the result is infinite, signal
+ // if the operand x is finite but the result is infinite, signal
// overflow and inexact
if (((x.w[1] & MASK_SPECIAL) != MASK_SPECIAL)
&& ((res.w[1] & MASK_SPECIAL) == MASK_SPECIAL)) {
#endif
*pfpsf = tmp_fpsf; // restore fpsf
if (res1 && res2) {
- // set the inexact flag
+ // set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- // set the underflow flag
+ // set the underflow flag
*pfpsf |= UNDERFLOW_EXCEPTION;
}
BID_RETURN (res);
res = 0;
BID_RETURN (res);
}
- // unpack x
+ // unpack x
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1_hi = x.w[1] & MASK_COEFF;
C1_lo = x.w[0];
res = 0;
BID_RETURN (res);
}
- // unpack x
+ // unpack x
x_exp = x.w[1] & MASK_EXP; // biased and shifted left 49 bit positions
C1_hi = x.w[1] & MASK_COEFF;
C1_lo = x.w[0];
// not NaN or infinity; extract significand to ensure it is canonical
sig_x.w[1] = x.w[1] & 0x0001ffffffffffffull;
sig_x.w[0] = x.w[0];
- // a canonical number has a coefficient < 10^34
+ // a canonical number has a coefficient < 10^34
// (0x0001ed09_bead87c0_378d8e64_00000000)
if ((sig_x.w[1] > 0x0001ed09bead87c0ull) || // significand is non-canonical
((sig_x.w[1] == 0x0001ed09bead87c0ull) && (sig_x.w[0] > 0x378d8e63ffffffffull)) || // significand is non-canonical
BID_RETURN (res);
}
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
- // if exponent is less than -6176, the number may be subnormal
+ // if exponent is less than -6176, the number may be subnormal
// (less than the smallest normal value)
// the smallest normal value is 1 x 10^-6143 = 10^33 x 10^-6176
// if (exp_x - 6176 < -6143)
res = 0; // y is a number, return 1
BID_RETURN (res);
} else {
- // x and y are both +NaN;
+ // x and y are both +NaN;
pyld_x.w[1] = x.w[1] & 0x00003fffffffffffull;
pyld_x.w[0] = x.w[0];
pyld_y.w[1] = y.w[1] & 0x00003fffffffffffull;
BID_RETURN (res);
}
// OPPOSITE SIGNS (CASE 3)
- // if signs are opposite, return 1 if x is negative
+ // if signs are opposite, return 1 if x is negative
// (if x < y, totalOrder is true)
if (((x.w[1] & MASK_SIGN) == MASK_SIGN) ^ ((y.w[1] & MASK_SIGN) ==
MASK_SIGN)) {
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF x IS CANONICAL
- // 9999999999999999999999999999999999 (decimal) =
+ // 9999999999999999999999999999999999 (decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((((sig_x.w[1] > 0x0001ed09bead87c0ull) ||
((sig_x.w[1] == 0x0001ed09bead87c0ull) &&
sig_y.w[0] = y.w[0];
// CHECK IF y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((((sig_y.w[1] > 0x0001ed09bead87c0ull) ||
((sig_y.w[1] == 0x0001ed09bead87c0ull) &&
}
}
// ZERO (CASE 5)
- // if x and y represent the same entities, and both are negative
+ // if x and y represent the same entities, and both are negative
// return true iff exp_x <= exp_y
if (x_is_zero && y_is_zero) {
- // we know that signs must be the same because we would have caught it
+ // we know that signs must be the same because we would have caught it
// in case3 if signs were different
// totalOrder(x,y) iff exp_x >= exp_y for negative numbers
// totalOrder(x,y) iff exp_x <= exp_y for positive numbers
}
// if |exp_x - exp_y| < 33, it comes down to the compensated significand
if (exp_x > exp_y) {
- // if exp_x is 33 greater than exp_y, it is definitely larger,
+ // if exp_x is 33 greater than exp_y, it is definitely larger,
// so no need for compensation
if (exp_x - exp_y > 33) {
res = ((x.w[1] & MASK_SIGN) == MASK_SIGN);
__mul_128x128_to_256 (sig_n_prime256, sig_x,
ten2k128[exp_x - exp_y - 20]);
// the compensated significands are equal (ie "x and y represent the same
- // entities") return 1 if (negative && expx > expy) ||
+ // entities") return 1 if (negative && expx > expy) ||
// (positive && expx < expy)
if ((sig_n_prime256.w[3] == 0) && (sig_n_prime256.w[2] == 0)
&& (sig_n_prime256.w[1] == sig_y.w[1])
&& (sig_n_prime256.w[0] == sig_y.w[0])) {
- // the case exp_x == exp_y cannot occur, because all bits must be
+ // the case exp_x == exp_y cannot occur, because all bits must be
// the same - would have been caught if (x == y)
res = ((exp_x <= exp_y) ^ ((x.w[1] & MASK_SIGN) == MASK_SIGN));
BID_RETURN (res);
BID_RETURN (res);
}
__mul_64x128_to_192 (sig_n_prime192, ten2k64[exp_x - exp_y], sig_x);
- // if positive, return whichever significand is larger
+ // if positive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
MASK_SIGN));
BID_RETURN (res);
}
- // if exp_x is 33 less than exp_y, it is definitely smaller,
+ // if exp_x is 33 less than exp_y, it is definitely smaller,
// no need for compensation
if (exp_y - exp_x > 33) {
res = ((x.w[1] & MASK_SIGN) != MASK_SIGN);
res = 0; // y is a number, return 0
BID_RETURN (res);
} else {
- // x and y are both +NaN;
+ // x and y are both +NaN;
pyld_x.w[1] = x.w[1] & 0x00003fffffffffffull;
pyld_x.w[0] = x.w[0];
pyld_y.w[1] = y.w[1] & 0x00003fffffffffffull;
exp_x = (x.w[1] >> 49) & 0x000000000003fffull;
// CHECK IF x IS CANONICAL
- // 9999999999999999999999999999999999 (decimal) =
+ // 9999999999999999999999999999999999 (decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((((sig_x.w[1] > 0x0001ed09bead87c0ull) ||
((sig_x.w[1] == 0x0001ed09bead87c0ull) &&
sig_y.w[0] = y.w[0];
// CHECK IF y IS CANONICAL
- // 9999999999999999999999999999999999(decimal) =
+ // 9999999999999999999999999999999999(decimal) =
// 1ed09_bead87c0_378d8e63_ffffffff(hexadecimal)
- // [0, 10^34) is the 754r supported canonical range.
+ // [0, 10^34) is the 754r supported canonical range.
// If the value exceeds that, it is interpreted as 0.
if ((((sig_y.w[1] > 0x0001ed09bead87c0ull) ||
((sig_y.w[1] == 0x0001ed09bead87c0ull) &&
}
// ZERO (CASE 4)
if (x_is_zero && y_is_zero) {
- // we know that signs must be the same because we would have caught it
+ // we know that signs must be the same because we would have caught it
// in case3 if signs were different
// totalOrder(x,y) iff exp_x <= exp_y for positive numbers
if (exp_x == exp_y) {
}
// if |exp_x - exp_y| < 33, it comes down to the compensated significand
if (exp_x > exp_y) {
- // if exp_x is 33 greater than exp_y, it is definitely larger,
+ // if exp_x is 33 greater than exp_y, it is definitely larger,
// so no need for compensation
if (exp_x - exp_y > 33) {
res = 0; // difference cannot be greater than 10^33
__mul_128x128_to_256 (sig_n_prime256, sig_x,
ten2k128[exp_x - exp_y - 20]);
// the compensated significands are equal (ie "x and y represent the same
- // entities") return 1 if (negative && expx > expy) ||
+ // entities") return 1 if (negative && expx > expy) ||
// (positive && expx < expy)
if ((sig_n_prime256.w[3] == 0) && (sig_n_prime256.w[2] == 0)
&& (sig_n_prime256.w[1] == sig_y.w[1])
&& (sig_n_prime256.w[0] == sig_y.w[0])) {
- // the case (exp_x == exp_y) cannot occur, because all bits must be
+ // the case (exp_x == exp_y) cannot occur, because all bits must be
// the same - would have been caught if (x == y)
res = (exp_x <= exp_y);
BID_RETURN (res);
BID_RETURN (res);
}
__mul_64x128_to_192 (sig_n_prime192, ten2k64[exp_x - exp_y], sig_x);
- // if positive, return whichever significand is larger
+ // if positive, return whichever significand is larger
// (converse if negative)
if ((sig_n_prime192.w[2] == 0) && sig_n_prime192.w[1] == sig_y.w[1]
&& (sig_n_prime192.w[0] == sig_y.w[0])) {
&& sig_n_prime192.w[0] < sig_y.w[0])));
BID_RETURN (res);
}
- // if exp_x is 33 less than exp_y, it is definitely smaller,
+ // if exp_x is 33 less than exp_y, it is definitely smaller,
// no need for compensation
if (exp_y - exp_x > 33) {
res = 1;
if ((y.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if x is not Inf.
if (((x.w[1] & 0x7c00000000000000ull) < 0x7800000000000000ull)) {
- // return NaN
+ // return NaN
#ifdef SET_STATUS_FLAGS
// set status flags
__set_status_flags (pfpsf, INVALID_EXCEPTION);
if (rnd_mode == 0)
#endif
if (CR.w[0] & 1) {
- // check whether fractional part of initial_P/10^extra_digits is
- // exactly .5 this is the same as fractional part of
+ // check whether fractional part of initial_P/10^extra_digits is
+ // exactly .5 this is the same as fractional part of
// (initial_P + 0.5*10^extra_digits)/10^extra_digits is exactly zero
// get remainder
if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if (((y.w[1] & 0x7c00000000000000ull) != 0x7c00000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
// set status flags
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
fstar.w[0] >= ten2mk128[ind - 1].w[0])) {
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
- } // else the result is exact
- } else { // the result is inexact; f2* <= 1/2
- // set the inexact flag
+ } // else the result is exact
+ } else { // the result is inexact; f2* <= 1/2
+ // set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
}
} else if (ind - 1 <= 21) { // 3 <= ind - 1 <= 21 => 3 <= shift <= 63
if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
case ROUNDING_DOWN:
if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; exp will be 0
- ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
+ ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
case ROUNDING_UP:
if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; exp will be 0
- ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
+ ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
// C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
// C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
// }
- // if (C1.w[0] < tmp64) C1.w[1]++;
+ // if (C1.w[0] < tmp64) C1.w[1]++;
// if carry-out from C1.w[0], increment C1.w[1]
// calculate C* and f*
// C* is actually floor(C*) in this case
res.w[0] = P256.w[2];
// redundant fstar.w[3] = 0;
// redundant fstar.w[2] = 0;
- // redundant fstar.w[1] = P256.w[1];
+ // redundant fstar.w[1] = P256.w[1];
// redundant fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if ((P256.w[1] > ten2mk128[ind - 1].w[1])
|| (P256.w[1] == ten2mk128[ind - 1].w[1]
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[2] || fstar.w[1] > ten2mk128[ind - 1].w[1] ||
(fstar.w[1] == ten2mk128[ind - 1].w[1] &&
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[3] || fstar.w[2]
|| fstar.w[1] > ten2mk128[ind - 1].w[1]
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
// C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
// C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
// }
- // if (C1.w[0] < tmp64) C1.w[1]++;
+ // if (C1.w[0] < tmp64) C1.w[1]++;
// if carry-out from C1.w[0], increment C1.w[1]
// calculate C* and f*
// C* is actually floor(C*) in this case
res.w[0] = P256.w[2];
// redundant fstar.w[3] = 0;
// redundant fstar.w[2] = 0;
- // redundant fstar.w[1] = P256.w[1];
+ // redundant fstar.w[1] = P256.w[1];
// redundant fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if ((P256.w[1] > ten2mk128[ind - 1].w[1])
|| (P256.w[1] == ten2mk128[ind - 1].w[1]
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[2] || fstar.w[1] > ten2mk128[ind - 1].w[1] ||
(fstar.w[1] == ten2mk128[ind - 1].w[1] &&
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[3] || fstar.w[2]
|| fstar.w[1] > ten2mk128[ind - 1].w[1]
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
} else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
- // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
+ // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
// (all are UINT64)
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1;
- // UINT128 res is C* at first - represents up to 34 decimal digits ~
+ // UINT128 res is C* at first - represents up to 34 decimal digits ~
// 113 bits
UINT256 fstar;
UINT256 P256;
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
BID_RETURN (res);
} else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; the exp will be 0
- ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
+ ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
- // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
+ // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
// (all are UINT64)
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1;
- // UINT128 res is C* at first - represents up to 34 decimal digits ~
+ // UINT128 res is C* at first - represents up to 34 decimal digits ~
// 113 bits
UINT256 fstar;
UINT256 P256;
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
// test for input equal to zero
if ((C1.w[1] == 0x0ull) && (C1.w[0] == 0x0ull)) {
// x is 0
- // return 0 preserving the sign bit and the preferred exponent
+ // return 0 preserving the sign bit and the preferred exponent
// of MAX(Q(x), 0)
if (x_exp <= (0x1820ull << 49)) {
res.w[1] = (x.w[1] & 0x8000000000000000ull) | 0x3040000000000000ull;
// if (exp <= -p) return -0.0 or +1.0
if (x_exp <= 0x2ffc000000000000ull) { // 0x2ffc000000000000ull == -34
if (x_sign) {
- // if negative, return negative 0, because we know the coefficient
+ // if negative, return negative 0, because we know the coefficient
// is non-zero (would have been caught above)
res.w[1] = 0xb040000000000000ull;
res.w[0] = 0x0000000000000000ull;
} else {
- // if positive, return positive 1, because we know coefficient is
+ // if positive, return positive 1, because we know coefficient is
// non-zero (would have been caught above)
res.w[1] = 0x3040000000000000ull;
res.w[0] = 0x0000000000000001ull;
BID_RETURN (res);
} else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; exp will be 0
- ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
+ ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
// C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
// C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
// }
- // if (C1.w[0] < tmp64) C1.w[1]++;
+ // if (C1.w[0] < tmp64) C1.w[1]++;
// if carry-out from C1.w[0], increment C1.w[1]
// calculate C* and f*
// C* is actually floor(C*) in this case
if (!x_sign) { // if positive
// redundant fstar.w[3] = 0;
// redundant fstar.w[2] = 0;
- // redundant fstar.w[1] = P256.w[1];
+ // redundant fstar.w[1] = P256.w[1];
// redundant fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if ((P256.w[1] > ten2mk128[ind - 1].w[1])
|| (P256.w[1] == ten2mk128[ind - 1].w[1]
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[2] || fstar.w[1] > ten2mk128[ind - 1].w[1] ||
(fstar.w[1] == ten2mk128[ind - 1].w[1] &&
fstar.w[1] = P256.w[1];
fstar.w[0] = P256.w[0];
// fraction f* > 10^(-x) <=> inexact
- // f* is in the right position to be compared with
+ // f* is in the right position to be compared with
// 10^(-x) from ten2mk128[]
if (fstar.w[3] || fstar.w[2]
|| fstar.w[1] > ten2mk128[ind - 1].w[1]
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
// (number of digits to be chopped off)
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
// FOR ROUND_TO_ZERO, WE DON'T NEED TO ADD 1/2 ULP
// FOR ROUND_TO_POSITIVE_INFINITY, WE TRUNCATE, THEN ADD 1 IF POSITIVE
// C1.w[0] = C1.w[0] + midpoint128[ind - 20].w[0];
// C1.w[1] = C1.w[1] + midpoint128[ind - 20].w[1];
// }
- // if (C1.w[0] < tmp64) C1.w[1]++;
+ // if (C1.w[0] < tmp64) C1.w[1]++;
// if carry-out from C1.w[0], increment C1.w[1]
// calculate C* and f*
// C* is actually floor(C*) in this case
UINT64 x_sign;
UINT64 x_exp;
int exp; // unbiased exponent
- // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
+ // Note: C1.w[1], C1.w[0] represent x_signif_hi, x_signif_lo
// (all are UINT64)
UINT64 tmp64;
BID_UI64DOUBLE tmp1;
unsigned int x_nr_bits;
int q, ind, shift;
UINT128 C1;
- // UINT128 res is C* at first - represents up to 34 decimal digits ~
+ // UINT128 res is C* at first - represents up to 34 decimal digits ~
// 113 bits
// UINT256 fstar;
UINT256 P256;
// return +inf
res.w[1] = 0x7800000000000000ull;
res.w[0] = 0x0000000000000000ull;
- } else { // x is -inf
+ } else { // x is -inf
// return -inf
res.w[1] = 0xf800000000000000ull;
res.w[0] = 0x0000000000000000ull;
} else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 34; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 127 bits
tmp64 = C1.w[0];
if (ind <= 19) {
#else
void
-bid128_to_string (char *str, UINT128 x
+bid128_to_string (char *str, UINT128 x
_EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM) {
#endif
UINT64 x_sign;
if ((x.w[1] & MASK_NAN) == MASK_NAN) { // x is NAN
if ((x.w[1] & MASK_SNAN) == MASK_SNAN) { // x is SNAN
// set invalid flag
- str[0] = ((SINT64)x.w[1]<0)? '-':'+';
+ str[0] = ((SINT64)x.w[1]<0)? '-':'+';
str[1] = 'S';
str[2] = 'N';
str[3] = 'a';
str[4] = 'N';
str[5] = '\0';
} else { // x is QNaN
- str[0] = ((SINT64)x.w[1]<0)? '-':'+';
+ str[0] = ((SINT64)x.w[1]<0)? '-':'+';
str[1] = 'Q';
str[2] = 'N';
str[3] = 'a';
str[2] = 'n';
str[3] = 'f';
str[4] = '\0';
- } else { // x is -inf
+ } else { // x is -inf
str[0] = '-';
str[1] = 'I';
str[2] = 'n';
}
if (exp >= 0) {
str[len++] = '+';
- len += sprintf (str + len, "%u", exp);// should not use sprintf (should
+ len += sprintf (str + len, "%u", exp);// should not use sprintf (should
// use sophisticated algorithm, since we know range of exp is limited)
str[len++] = '\0';
} else {
- len += sprintf (str + len, "%d", exp);// should not use sprintf (should
+ len += sprintf (str + len, "%d", exp);// should not use sprintf (should
// use sophisticated algorithm, since we know range of exp is limited)
str[len++] = '\0';
}
// determine coefficient's representation as a decimal string
// if zero or non-canonical, set coefficient to '0'
- if ((C1.w[1] > 0x0001ed09bead87c0ull) ||
- (C1.w[1] == 0x0001ed09bead87c0ull &&
- (C1.w[0] > 0x378d8e63ffffffffull)) ||
- ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull) ||
+ if ((C1.w[1] > 0x0001ed09bead87c0ull) ||
+ (C1.w[1] == 0x0001ed09bead87c0ull &&
+ (C1.w[0] > 0x378d8e63ffffffffull)) ||
+ ((x.w[1] & 0x6000000000000000ull) == 0x6000000000000000ull) ||
((C1.w[1] == 0) && (C1.w[0] == 0))) {
str[k++] = '0';
} else {
/* ****************************************************
- This takes a bid coefficient in C1.w[1],C1.w[0]
- and put the converted character sequence at location
+ This takes a bid coefficient in C1.w[1],C1.w[0]
+ and put the converted character sequence at location
starting at &(str[k]). The function returns the number
- of MiDi returned. Note that the character sequence
+ of MiDi returned. Note that the character sequence
does not have leading zeros EXCEPT when the input is of
zero value. It will then output 1 character '0'
The algorithm essentailly tries first to get a sequence of
18 digits, we set hi = 0, and lo = d to begin with.
We then retrieve from a table, for j = 0, 1, ..., 8
that gives us A and B where c_j 2^(59+6j) = A * 10^18 + B.
- hi += A ; lo += B; After each accumulation into lo, we normalize
+ hi += A ; lo += B; After each accumulation into lo, we normalize
immediately. So at the end, we have the decomposition as we need. */
Tmp = C1.w[0] >> 59;
c = *ps;
- // if c is null or not equal to a (radix point, negative sign,
+ // if c is null or not equal to a (radix point, negative sign,
// positive sign, or number) it might be SNaN, sNaN, Infinity
if (!c
|| (c != '.' && c != '-' && c != '+'
BID_RETURN (res);
}
// return sNaN
- if (tolower_macro (ps[0]) == 's' && tolower_macro (ps[1]) == 'n' &&
- tolower_macro (ps[2]) == 'a' && tolower_macro (ps[3]) == 'n') {
+ if (tolower_macro (ps[0]) == 's' && tolower_macro (ps[1]) == 'n' &&
+ tolower_macro (ps[2]) == 'a' && tolower_macro (ps[3]) == 'n') {
// case insensitive check for snan
res.w[1] = 0x7e00000000000000ull;
BID_RETURN (res);
BID_RETURN (res);
}
}
- // if +Inf, -Inf, +Infinity, or -Infinity (case insensitive check for inf)
- if ((tolower_macro (ps[1]) == 'i' && tolower_macro (ps[2]) == 'n' &&
- tolower_macro (ps[3]) == 'f') && (!ps[4] ||
- (tolower_macro (ps[4]) == 'i' && tolower_macro (ps[5]) == 'n' &&
- tolower_macro (ps[6]) == 'i' && tolower_macro (ps[7]) == 't' &&
+ // if +Inf, -Inf, +Infinity, or -Infinity (case insensitive check for inf)
+ if ((tolower_macro (ps[1]) == 'i' && tolower_macro (ps[2]) == 'n' &&
+ tolower_macro (ps[3]) == 'f') && (!ps[4] ||
+ (tolower_macro (ps[4]) == 'i' && tolower_macro (ps[5]) == 'n' &&
+ tolower_macro (ps[6]) == 'i' && tolower_macro (ps[7]) == 't' &&
tolower_macro (ps[8]) == 'y' && !ps[9]))) { // ci check for infinity
res.w[0] = 0;
ps++;
- // for numbers such as 0.0000000000000000000000000000000000001001,
+ // for numbers such as 0.0000000000000000000000000000000000001001,
// we want to count the leading zeros
if (rdx_pt_enc) {
right_radix_leading_zeros++;
}
- // if this character is a radix point, make sure we haven't already
+ // if this character is a radix point, make sure we haven't already
// encountered one
if (*(ps) == '.') {
if (rdx_pt_enc == 0) {
rdx_pt_enc = 1;
- // if this is the first radix point, and the next character is NULL,
+ // if this is the first radix point, and the next character is NULL,
// we have a zero
if (!*(ps + 1)) {
res.w[1] =
break;
case ROUNDING_DOWN:
- if(sign_x)
+ if(sign_x)
for (; i < ndigits_total; i++) {
if (buffer[i] > '0') {
carry = 1;
}
break;
case ROUNDING_UP:
- if(!sign_x)
+ if(!sign_x)
for (; i < ndigits_total; i++) {
if (buffer[i] > '0') {
carry = 1;
}
if (dec_expon == -MAX_FORMAT_DIGITS_128
&& coeff_high > 50000000000000000ull)
- carry = 0;
+ carry = 0;
}
__mul_64x64_to_128_fast (CX, coeff_high, scale_high);
#include "bid_internal.h"
/*****************************************************************************
- * BID128_to_int32_rnint
+ * BID128_to_int32_rnint
****************************************************************************/
BID128_FUNCTION_ARG1_NORND_CUSTOMRESTYPE (int, bid128_to_int32_rnint, x)
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x50000000000000005, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0000000000000005ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1/2 <= n < 2^63-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x50000000000000005, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0000000000000005ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1/2 <= n < 2^63-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x50000000000000000, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x0000000000000000ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n < 2^63
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x50000000000000000, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x0000000000000000ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n < 2^63
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x5000000000000000a, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x000000000000000aull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n <= 2^63 - 1
// <=> 0.c(0)c(1)...c(q-1) * 10^20 > 0x5000000000000000a, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x000000000000000aull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] > C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n <= 2^63 - 1
// <=> 0.c(0)c(1)...c(q-1) * 10^20 >= 0x5000000000000000a, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x000000000000000aull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n < 2^63
// <=> 0.c(0)c(1)...c(q-1) * 10^20 >= 0x5000000000000000a, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0x000000000000000aull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1 < n < 2^63
// <=> 0.c(0)c(1)...c(q-1) * 10^20 >= 0x50000000000000005, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0000000000000005ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1/2 <= n < 2^63-1/2
// <=> 0.c(0)c(1)...c(q-1) * 10^20 >= 0x50000000000000005, 1<=q<=34
C.w[1] = 0x0000000000000005ull;
C.w[0] = 0000000000000005ull;
- if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
+ if (q <= 19) { // 1 <= q <= 19 => 1 <= 20-q <= 19 =>
// 10^(20-q) is 64-bit, and so is C1
__mul_64x64_to_128MACH (C1, C1.w[0], ten2k64[20 - q]);
} else if (q == 20) {
__mul_128x64_to_128 (C, ten2k64[q - 20], C); // max 47-bit x 67-bit
}
if (C1.w[1] > C.w[1] || (C1.w[1] == C.w[1] && C1.w[0] >= C.w[0])) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x8000000000000000ull;
BID_RETURN (res);
}
// else cases that can be rounded to a 64-bit int fall through
- // to '1 <= q + exp <= 19'
+ // to '1 <= q + exp <= 19'
}
}
// n is not too large to be converted to int64: -2^63-1/2 <= n < 2^63-1/2
// res = 0
// else if x > 0
// res = +1
- // else // if x < 0
- // invalid exc
+ // else // if x < 0
+ // invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
// res = 0
// else if x > 0
// res = +1
- // else // if x < 0
- // invalid exc
+ // else // if x < 0
+ // invalid exc
ind = q - 1;
if (ind <= 18) { // 0 <= ind <= 18
if ((C1.w[1] == 0) && (C1.w[0] < midpoint64[ind])) {
// <=> 0.c(0)c(1)...c(q-1) * 10^21 > 0x05, 1<=q<=34
// <=> C * 10^(21-q) > 0x05, 1<=q<=34
if (q == 21) {
- // C > 5
+ // C > 5
if (C1.w[1] != 0 || C1.w[0] > 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// <=> 0.c(0)c(1)...c(q-1) * 10^21 > 0x05, 1<=q<=34
// <=> C * 10^(21-q) > 0x05, 1<=q<=34
if (q == 21) {
- // C > 5
+ // C > 5
if (C1.w[1] != 0 || C1.w[0] > 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x0a, 1<=q<=34
// <=> C * 10^(21-q) >= 0x0a, 1<=q<=34
if (q == 21) {
- // C >= a
+ // C >= a
if (C1.w[1] != 0 || C1.w[0] >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// if n > 2^64 - 1 then n is too large
// <=> c(0)c(1)...c(19).c(20)...c(q-1) > 2^64 - 1
// <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^20 > 2^64 - 1
- // <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^21 > 10 * (2^64 - 1)
+ // <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^21 > 10 * (2^64 - 1)
// <=> C * 10^(21-q) > 0x9fffffffffffffff6, 1<=q<=34
if (q == 1) {
// C * 10^20 > 0x9fffffffffffffff6
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x0a, 1<=q<=34
// <=> C * 10^(21-q) >= 0x0a, 1<=q<=34
if (q == 21) {
- // C >= a
+ // C >= a
if (C1.w[1] != 0 || C1.w[0] >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// if n > 2^64 - 1 then n is too large
// <=> c(0)c(1)...c(19).c(20)...c(q-1) > 2^64 - 1
// <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^20 > 2^64 - 1
- // <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^21 > 10 * (2^64 - 1)
+ // <=> 0.c(0)c(1)...c(19)c(20)...c(q-1) * 10^21 > 10 * (2^64 - 1)
// <=> C * 10^(21-q) > 0x9fffffffffffffff6, 1<=q<=34
if (q == 1) {
// C * 10^20 > 0x9fffffffffffffff6
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x0a, 1<=q<=34
// <=> C * 10^(21-q) >= 0x0a, 1<=q<=34
if (q == 21) {
- // C >= a
+ // C >= a
if (C1.w[1] != 0 || C1.w[0] >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x0a, 1<=q<=34
// <=> C * 10^(21-q) >= 0x0a, 1<=q<=34
if (q == 21) {
- // C >= a
+ // C >= a
if (C1.w[1] != 0 || C1.w[0] >= 0x0aull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x05, 1<=q<=34
// <=> C * 10^(21-q) >= 0x05, 1<=q<=34
if (q == 21) {
- // C >= 5
+ // C >= 5
if (C1.w[1] != 0 || C1.w[0] >= 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// <=> 0.c(0)c(1)...c(q-1) * 10^21 >= 0x05, 1<=q<=34
// <=> C * 10^(21-q) >= 0x05, 1<=q<=34
if (q == 21) {
- // C >= 5
+ // C >= 5
if (C1.w[1] != 0 || C1.w[0] >= 0x05ull) {
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
#define BID_128RES
#include "bid_internal.h"
-/*
+/*
* Takes a BID32 as input and converts it to a BID128 and returns it.
*/
TYPE0_FUNCTION_ARGTYPE1_NORND (UINT128, bid32_to_bid128, UINT32, x)
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if (coefficient_x & 1) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
// get remainder
* else
* add sign_a*coefficient_a*10^diff_expon, sign_b*coefficient_b
* in 128-bit integer arithmetic, then round to 16 decimal digits
- *
+ *
*
****************************************************************************/
amount = short_recip_scale[extra_digits];
C0_64 = CT.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64 = C0_64 + coefficient_a;
// filter out difficult (corner) cases
- // this test ensures the number of digits in coefficient_a does not change
+ // this test ensures the number of digits in coefficient_a does not change
// after adding (the appropriately scaled and rounded) coefficient_b
if ((UINT64) (C64 - 1000000000000000ull - 1) >
9000000000000000ull - 2) {
amount = short_recip_scale[extra_digits];
C0_64 = CT.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64 = C0_64 + coefficient_a;
} else if (C64 <= 1000000000000000ull) {
// less than 16 digits in result
amount = short_recip_scale[extra_digits];
C0_64 = CT_new.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64_new = C0_64 + coefficient_a;
if (C64_new < 10000000000000000ull) {
C64 = C64_new;
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if (C64 & 1) {
- // check whether fractional part of initial_P/10^extra_digits is
+ // check whether fractional part of initial_P/10^extra_digits is
// exactly .5
- // this is the same as fractional part of
+ // this is the same as fractional part of
// (initial_P + 0.5*10^extra_digits)/10^extra_digits is exactly zero
// get remainder
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y, lcv;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered, rather than equal :
+ // if either number is NAN, the comparison is unordered, rather than equal :
// return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
res = 0;
BID_RETURN (res);
} else {
- // x is pos infinity, it is greater, unless y is positive
+ // x is pos infinity, it is greater, unless y is positive
// infinity => return y!=pos_infinity
res = (((y & MASK_INF) != MASK_INF)
|| ((y & MASK_SIGN) == MASK_SIGN));
// ZERO (CASE4)
// some properties:
//(+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- //(ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore ignore the
+ //(ZERO x 10^A == ZERO x 10^B) for any valid A, B => therefore ignore the
// exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x > exp_y) {
res = ((x & MASK_SIGN) != MASK_SIGN);
// adjust the y significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_y,
mult_factor[exp_y - exp_x]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_x)) {
res = 0;
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] == 0)
&& sig_n_prime.w[0] < sig_y) ^ ((x & MASK_SIGN) !=
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] > 0)
|| (sig_x < sig_n_prime.w[0])) ^ ((x & MASK_SIGN) !=
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered, rather than equal :
+ // if either number is NAN, the comparison is unordered, rather than equal :
// return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
res = 0;
BID_RETURN (res);
} else {
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y!=pos_infinity
res = (((y & MASK_INF) != MASK_INF)
|| ((y & MASK_SIGN) == MASK_SIGN));
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
res = 0;
void
bid64_quiet_less (int *pres, UINT64 * px,
UINT64 *
- py _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ py _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
UINT64 y = *py;
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = ((x & MASK_SIGN) == MASK_SIGN);
res = 0;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] == 0)
&& sig_n_prime.w[0] < sig_y) ^ ((x & MASK_SIGN) ==
res = 0;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] > 0)
|| (sig_x < sig_n_prime.w[0])) ^ ((x & MASK_SIGN) ==
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered, rather than equal :
+ // if either number is NAN, the comparison is unordered, rather than equal :
// return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
res = 1;
BID_RETURN (res);
} else {
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y==pos_infinity
res = !(((y & MASK_INF) != MASK_INF)
|| ((y & MASK_SIGN) == MASK_SIGN));
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] == 0)
&& sig_n_prime.w[0] < sig_y) ^ ((x & MASK_SIGN) ==
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] > 0)
|| (sig_x < sig_n_prime.w[0])) ^ ((x & MASK_SIGN) ==
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 0;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] == 0)
&& sig_n_prime.w[0] < sig_y) ^ ((x & MASK_SIGN) ==
res = 0;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
res = (((sig_n_prime.w[1] > 0)
|| (sig_x < sig_n_prime.w[0])) ^ ((x & MASK_SIGN) ==
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y, lcv;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 1
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
res = 1;
BID_RETURN (res);
}
- // x is pos infinity, it is greater, unless y is positive
+ // x is pos infinity, it is greater, unless y is positive
// infinity => return y==pos_infinity
else {
res = !(((y & MASK_INF) != MASK_INF)
// ZERO (CASE4)
// some properties:
- // (+ZERO==-ZERO) => therefore ignore the sign, and neither
+ // (+ZERO==-ZERO) => therefore ignore the sign, and neither
// number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
// ZERO (CASE4)
// some properties:
- // (+ZERO==-ZERO) => therefore ignore the sign, and neither
+ // (+ZERO==-ZERO) => therefore ignore the sign, and neither
// number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
int res;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
if ((x & MASK_SNAN) == MASK_SNAN || (y & MASK_SNAN) == MASK_SNAN) {
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
*pfpsf |= INVALID_EXCEPTION; // set invalid exception if NaN
res = 0;
BID_RETURN (res);
}
- // x is pos infinity, it is greater,
+ // x is pos infinity, it is greater,
// unless y is positive infinity => return y!=pos_infinity
else {
res = (((y & MASK_INF) != MASK_INF)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
mult_factor[exp_x - exp_y]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
res = 0;
__mul_64x64_to_128MACH (sig_n_prime, sig_y,
mult_factor[exp_y - exp_x]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_x)) {
res = 0;
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
*pfpsf |= INVALID_EXCEPTION; // set invalid exception if NaN
res = 0;
BID_RETURN (res);
}
- // x is pos infinity, it is greater,
+ // x is pos infinity, it is greater,
// unless y is positive infinity => return y!=pos_infinity
else {
res = (((y & MASK_INF) != MASK_INF)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
res = 0;
__mul_64x64_to_128MACH (sig_n_prime, sig_y,
mult_factor[exp_y - exp_x]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_x)) {
res = 0;
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 0;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 0;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
*pfpsf |= INVALID_EXCEPTION; // set invalid exception if NaN
res = 1;
BID_RETURN (res);
}
- // x is pos infinity, it is greater,
+ // x is pos infinity, it is greater,
// unless y is positive infinity => return y==pos_infinity
else {
res = !(((y & MASK_INF) != MASK_INF)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 0;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 0;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
char x_is_zero = 0, y_is_zero = 0, non_canon_x, non_canon_y;
// NaN (CASE1)
- // if either number is NAN, the comparison is unordered,
+ // if either number is NAN, the comparison is unordered,
// rather than equal : return 0
if (((x & MASK_NAN) == MASK_NAN) || ((y & MASK_NAN) == MASK_NAN)) {
*pfpsf |= INVALID_EXCEPTION; // set invalid exception if NaN
res = 1;
BID_RETURN (res);
}
- // x is pos infinity, it is greater,
+ // x is pos infinity, it is greater,
// unless y is positive infinity => return y==pos_infinity
else {
res = !(((y & MASK_INF) != MASK_INF)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
// ZERO (CASE4)
// some properties:
// (+ZERO==-ZERO) => therefore ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// therefore ignore the exponent field
// (Any non-canonical # is considered 0)
if (non_canon_x || sig_x == 0) {
res = 1;
BID_RETURN (res);
}
- // if postitive, return whichever significand abs is smaller
+ // if postitive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] == 0)
res = 1;
BID_RETURN (res);
}
- // if positive, return whichever significand abs is smaller
+ // if positive, return whichever significand abs is smaller
// (converse if negative)
{
res = (((sig_n_prime.w[1] > 0)
* CA= A*10^(15+j), j=0 for A>=B, 1 otherwise
* Q = 0
* else
- * get Q=(int)(coefficient_x/coefficient_y)
+ * get Q=(int)(coefficient_x/coefficient_y)
* (based on double precision divide)
* check for exact divide case
* Let R = coefficient_x - Q*coefficient_y
* endif
* if (CA<2^64)
* Q += CA/B (64-bit unsigned divide)
- * else
- * get final Q using double precision divide, followed by 3 integer
+ * else
+ * get final Q using double precision divide, followed by 3 integer
* iterations
* if exact result, eliminate trailing zeros
* check for underflow
if ((x & INFINITY_MASK64) == INFINITY_MASK64) {
// check if y is Inf or NaN
if ((y & INFINITY_MASK64) == INFINITY_MASK64) {
- // y==Inf, return NaN
+ // y==Inf, return NaN
if ((y & NAN_MASK64) == INFINITY_MASK64) { // Inf/Inf
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
if (((x) & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if (((y.w[1] & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
__div_256_by_128 (&CQ, &CA4, CY);
}
-
+
#ifdef SET_STATUS_FLAGS
if (CA4.w[0] || CA4.w[1]) {
CQ.w[0]++;
if (!CQ.w[0])
CQ.w[1]++;
- break;
+ break;
}
#endif
#endif
-
+
res =
fast_get_BID64_check_OF (sign_x ^ sign_y, diff_expon, CQ.w[0], rnd_mode,
pfpsf);
if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if (((y & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
CQ.w[0] = (P256.w[2] >> amount) | (P256.w[3] << (64 - amount));
CQ.w[1] = 0;
- __mul_64x64_to_128 (CQ2, CQ.w[0], (power10_table_128[ed2].w[0]));
+ __mul_64x64_to_128 (CQ2, CQ.w[0], (power10_table_128[ed2].w[0]));
__mul_64x64_to_128 (QB256, CQ2.w[0], CY.w[0]);
QB256.w[1] += CQ2.w[0] * CY.w[1] + CQ2.w[1] * CY.w[0];
__set_status_flags (pfpsf, INEXACT_EXCEPTION);
}
#ifndef LEAVE_TRAILING_ZEROS
- else
+ else
#endif
#else
#ifndef LEAVE_TRAILING_ZEROS
#endif
#endif
-
+
res =
fast_get_BID64_check_OF (sign_x ^ sign_y, diff_expon, CQ.w[0], rnd_mode,
pfpsf);
extern UINT8 packed_10000_zeros[];
-//UINT64* bid64_div128x128(UINT64 res, UINT128 *px, UINT128 *py, unsigned rnd_mode, unsigned *pfpsf)
+//UINT64* bid64_div128x128(UINT64 res, UINT128 *px, UINT128 *py, unsigned rnd_mode, unsigned *pfpsf)
TYPE0_FUNCTION_ARG128_ARG128 (UINT64, bid64qq_div, x, y)
UINT256 CA4 =
if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) {
// check if y is Inf.
if (((y.w[1] & 0x7c00000000000000ull) == 0x7800000000000000ull))
- // return NaN
+ // return NaN
{
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
__div_256_by_128 (&CQ, &CA4, CY);
}
-
+
#ifdef SET_STATUS_FLAGS
if (CA4.w[0] || CA4.w[1]) {
__set_status_flags (pfpsf, INEXACT_EXCEPTION);
}
#ifndef LEAVE_TRAILING_ZEROS
- else
+ else
#endif
#else
#ifndef LEAVE_TRAILING_ZEROS
#endif
#endif
-
+
res =
fast_get_BID64_check_OF (sign_x ^ sign_y, diff_expon, CQ.w[0], rnd_mode,
pfpsf);
*
* if multiplication is guranteed exact (short coefficients)
* call the unpacked arg. equivalent of bid64_add(x*y, z)
- * else
+ * else
* get full coefficient_x*coefficient_y product
- * call subroutine to perform addition of 64-bit argument
+ * call subroutine to perform addition of 64-bit argument
* to 128-bit product
*
****************************************************************************/
// test if z is Inf of oposite sign
if (((z & 0x7c00000000000000ull) == 0x7800000000000000ull)
&& (((x ^ y) ^ z) & 0x8000000000000000ull)) {
- // return NaN
+ // return NaN
#ifdef SET_STATUS_FLAGS
__set_status_flags (pfpsf, INVALID_EXCEPTION);
#endif
BID_RETURN (((x ^ y) & 0x8000000000000000ull) |
0x7800000000000000ull);
}
- // y is 0
+ // y is 0
if (((z & 0x7800000000000000ull) != 0x7800000000000000ull)) {
if (coefficient_z) {
tempy.d = (double) coefficient_y;
bin_expon_cy = ((tempy.i & MASK_BINARY_EXPONENT) >> 52);
- // magnitude estimate for coefficient_x*coefficient_y is
+ // magnitude estimate for coefficient_x*coefficient_y is
// 2^(unbiased_bin_expon_cx + unbiased_bin_expon_cx)
bin_expon_product = bin_expon_cx + bin_expon_cy;
res =
get_add64 (sign_x ^ sign_y,
- exponent_x + exponent_y - DECIMAL_EXPONENT_BIAS, C64,
- sign_z, exponent_z, coefficient_z,
+ exponent_x + exponent_y - DECIMAL_EXPONENT_BIAS, C64,
+ sign_z, exponent_z, coefficient_z,
rnd_mode, pfpsf);
BID_RETURN (res);
}
// non-canonical
x = (x & MASK_SIGN) | ((x & MASK_BINARY_EXPONENT2) << 2);
} // else canonical
- } // else canonical
+ } // else canonical
}
// check for non-canonical y
// ZERO (CASE4)
// some properties:
- // (+ZERO == -ZERO) => therefore
+ // (+ZERO == -ZERO) => therefore
// ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (sig_x == 0) {
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = ((x & MASK_SIGN) != MASK_SIGN) ? y : x;
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
res = y;
// non-canonical
x = (x & MASK_SIGN) | ((x & MASK_BINARY_EXPONENT2) << 2);
} // else canonical
- } // else canonical
+ } // else canonical
}
// check for non-canonical y
// ZERO (CASE4)
// some properties:
- // (+ZERO == -ZERO) => therefore
+ // (+ZERO == -ZERO) => therefore
// ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (sig_x == 0) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = y;
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // now, sig_n_prime has: sig_x * 10^(exp_x-exp_y), this is
+ // now, sig_n_prime has: sig_x * 10^(exp_x-exp_y), this is
// the compensated signif.
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
// two numbers are equal, return minNum(x,y)
res = ((y & MASK_SIGN) == MASK_SIGN) ? y : x;
BID_RETURN (res);
}
- // now, if compensated_x (sig_n_prime) is greater than y, return y,
+ // now, if compensated_x (sig_n_prime) is greater than y, return y,
// otherwise return x
res = ((sig_n_prime.w[1] != 0) || sig_n_prime.w[0] > sig_y) ? y : x;
BID_RETURN (res);
// non-canonical
x = (x & MASK_SIGN) | ((x & MASK_BINARY_EXPONENT2) << 2);
} // else canonical
- } // else canonical
+ } // else canonical
}
// check for non-canonical y
// INFINITY (CASE3)
if ((x & MASK_INF) == MASK_INF) {
// if x is neg infinity, there is no way it is greater than y, return y
- // x is pos infinity, it is greater, unless y is positive infinity =>
+ // x is pos infinity, it is greater, unless y is positive infinity =>
// return y!=pos_infinity
if (((x & MASK_SIGN) == MASK_SIGN)) {
res = y;
// ZERO (CASE4)
// some properties:
- // (+ZERO == -ZERO) => therefore
+ // (+ZERO == -ZERO) => therefore
// ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (sig_x == 0) {
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = ((x & MASK_SIGN) != MASK_SIGN) ? x : y;
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if postitive, return whichever significand is larger
+ // if postitive, return whichever significand is larger
// (converse if negative)
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
res = y;
// non-canonical
x = (x & MASK_SIGN) | ((x & MASK_BINARY_EXPONENT2) << 2);
} // else canonical
- } // else canonical
+ } // else canonical
}
// check for non-canonical y
// ZERO (CASE4)
// some properties:
- // (+ZERO == -ZERO) => therefore
+ // (+ZERO == -ZERO) => therefore
// ignore the sign, and neither number is greater
- // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
+ // (ZERO x 10^A == ZERO x 10^B) for any valid A, B =>
// ignore the exponent field
// (Any non-canonical # is considered 0)
if (sig_x == 0) {
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = x;
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // now, sig_n_prime has: sig_x * 10^(exp_x-exp_y),
+ // now, sig_n_prime has: sig_x * 10^(exp_x-exp_y),
// this is the compensated signif.
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
// two numbers are equal, return maxNum(x,y)
res = ((y & MASK_SIGN) == MASK_SIGN) ? x : y;
BID_RETURN (res);
}
- // now, if compensated_x (sig_n_prime) is greater than y return y,
+ // now, if compensated_x (sig_n_prime) is greater than y return y,
// otherwise return x
res = ((sig_n_prime.w[1] != 0) || sig_n_prime.w[0] > sig_y) ? x : y;
BID_RETURN (res);
* else
* get long product: coefficient_x*coefficient_y
* determine number of digits to round off (extra_digits)
- * rounding is performed as a 128x128-bit multiplication by
+ * rounding is performed as a 128x128-bit multiplication by
* 2^M[extra_digits]/10^extra_digits, followed by a shift
- * M[extra_digits] is sufficiently large for required accuracy
+ * M[extra_digits] is sufficiently large for required accuracy
*
****************************************************************************/
tempy.d = (double) coefficient_y;
bin_expon_cy = ((tempy.i & MASK_BINARY_EXPONENT) >> 52);
- // magnitude estimate for coefficient_x*coefficient_y is
+ // magnitude estimate for coefficient_x*coefficient_y is
// 2^(unbiased_bin_expon_cx + unbiased_bin_expon_cx)
bin_expon_product = bin_expon_cx + bin_expon_cy;
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if ((C64 & 1) && !round_up) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
- // this is the same as fractional part of
+ // this is the same as fractional part of
// (initial_P + 0.5*10^extra_digits)/10^extra_digits is exactly zero
// get remainder
} else { // -MAXFP <= x <= -MINFP - 1 ulp OR MINFP <= x <= MAXFP - 1 ulp
// can add/subtract 1 ulp to the significand
- // Note: we could check here if x >= 10^16 to speed up the case q1 =16
+ // Note: we could check here if x >= 10^16 to speed up the case q1 =16
// q1 = nr. of decimal digits in x (1 <= q1 <= 54)
// determine first the nr. of bits in x
if (C1 >= MASK_BINARY_OR2) { // x >= 2^53
UINT64 C1; // C1 represents x_signif (UINT64)
// check for NaNs and infinities
- if ((x & MASK_NAN) == MASK_NAN) { // check for NaN
+ if ((x & MASK_NAN) == MASK_NAN) { // check for NaN
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
- x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
+ x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
// set invalid flag
*pfpsf |= INVALID_EXCEPTION;
// return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
- } else { // QNaN
+ } else { // QNaN
res = x;
}
BID_RETURN (res);
} else { // -MAXFP + 1ulp <= x <= -MINFP OR MINFP + 1 ulp <= x <= MAXFP
// can add/subtract 1 ulp to the significand
- // Note: we could check here if x >= 10^16 to speed up the case q1 =16
+ // Note: we could check here if x >= 10^16 to speed up the case q1 =16
// q1 = nr. of decimal digits in x (1 <= q1 <= 16)
// determine first the nr. of bits in x
if (C1 >= 0x0020000000000000ull) { // x >= 2^53
- // split the 64-bit value in two 32-bit halves to avoid
+ // split the 64-bit value in two 32-bit halves to avoid
// rounding errors
if (C1 >= 0x0000000100000000ull) { // x >= 2^32
tmp1.d = (double) (C1 >> 32); // exact conversion
*pfpsf |= OVERFLOW_EXCEPTION;
}
// if the result is in (-10^emin, 10^emin), and is different from the
- // operand x, signal underflow and inexact
+ // operand x, signal underflow and inexact
tmp1 = 0x00038d7ea4c68000ull; // +100...0[16] * 10^emin
tmp2 = res & 0x7fffffffffffffffull;
tmp_fpsf = *pfpsf; // save fpsf
BID_RETURN (res);
}
-// copies operand x to destination in the same format as x, but
+// copies operand x to destination in the same format as x, but
// with the sign of y
#if DECIMAL_CALL_BY_REFERENCE
void
}
// true if the exponents of x and y are the same, false otherwise.
-// The special cases of sameQuantum (NaN, NaN) and sameQuantum (Inf, Inf) are
+// The special cases of sameQuantum (NaN, NaN) and sameQuantum (Inf, Inf) are
// true.
// If exactly one operand is infinite or exactly one operand is NaN, then false
#if DECIMAL_CALL_BY_REFERENCE
res = 0; // y is a number, return 1
BID_RETURN (res);
} else {
- // x and y are both +NaN;
+ // x and y are both +NaN;
// must investigate payload if both quiet or both signaling
// this xnor statement will be true if both x and y are +qNaN or +sNaN
if (!
// are bitwise identical)
pyld_y = y & 0x0003ffffffffffffull;
pyld_x = x & 0x0003ffffffffffffull;
- // if x is zero and y isn't, x has the smaller
+ // if x is zero and y isn't, x has the smaller
// payload definitely (since we know y isn't 0 at this point)
if (pyld_x > 999999999999999ull || pyld_x == 0) {
res = 1;
res = (pyld_x <= pyld_y);
BID_RETURN (res);
} else {
- // return true if y is +qNaN and x is +sNaN
+ // return true if y is +qNaN and x is +sNaN
// (we know they're different bc of xor if_stmt above)
res = ((x & MASK_SNAN) == MASK_SNAN);
BID_RETURN (res);
BID_RETURN (res);
}
// OPPOSITE SIGNS (CASE 3)
- // if signs are opposite, return 1 if x is negative
+ // if signs are opposite, return 1 if x is negative
// (if x<y, totalOrder is true)
if (((x & MASK_SIGN) == MASK_SIGN) ^ ((y & MASK_SIGN) == MASK_SIGN)) {
res = (x & MASK_SIGN) == MASK_SIGN;
res = 1;
BID_RETURN (res);
} else {
- // x is positive infinity, only return1 if y
+ // x is positive infinity, only return1 if y
// is positive infinity as well
// (we know y has same sign as x)
res = ((y & MASK_INF) == MASK_INF);
}
// ZERO (CASE 5)
- // if x and y represent the same entities, and
+ // if x and y represent the same entities, and
// both are negative , return true iff exp_x <= exp_y
if (x_is_zero && y_is_zero) {
if (!((x & MASK_SIGN) == MASK_SIGN) ^
BID_RETURN (res);
}
// REDUNDANT REPRESENTATIONS (CASE6)
- // if both components are either bigger or smaller,
+ // if both components are either bigger or smaller,
// it is clear what needs to be done
if (sig_x > sig_y && exp_x >= exp_y) {
res = ((x & MASK_SIGN) == MASK_SIGN);
res = ((x & MASK_SIGN) != MASK_SIGN);
BID_RETURN (res);
}
- // if exp_x is 15 greater than exp_y, it is
+ // if exp_x is 15 greater than exp_y, it is
// definitely larger, so no need for compensation
if (exp_x - exp_y > 15) {
// difference cannot be greater than 10^15
res = ((x & MASK_SIGN) == MASK_SIGN);
BID_RETURN (res);
}
- // if exp_x is 15 less than exp_y, it is
+ // if exp_x is 15 less than exp_y, it is
// definitely smaller, no need for compensation
if (exp_y - exp_x > 15) {
res = ((x & MASK_SIGN) != MASK_SIGN);
BID_RETURN (res);
}
- // if |exp_x - exp_y| < 15, it comes down
+ // if |exp_x - exp_y| < 15, it comes down
// to the compensated significand
if (exp_x > exp_y) {
// otherwise adjust the x significand upwards
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if x and y represent the same entities,
+ // if x and y represent the same entities,
// and both are negative, return true iff exp_x <= exp_y
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
- // case cannot occure, because all bits must
+ // case cannot occure, because all bits must
// be the same - would have been caught if (x==y)
res = (exp_x <= exp_y) ^ ((x & MASK_SIGN) == MASK_SIGN);
BID_RETURN (res);
__mul_64x64_to_128MACH (sig_n_prime, sig_y,
mult_factor[exp_y - exp_x]);
- // if x and y represent the same entities,
+ // if x and y represent the same entities,
// and both are negative, return true iff exp_x <= exp_y
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_x)) {
- // Cannot occur, because all bits must be the same.
+ // Cannot occur, because all bits must be the same.
// Case would have been caught if (x==y)
res = (exp_x <= exp_y) ^ ((x & MASK_SIGN) == MASK_SIGN);
BID_RETURN (res);
}
- // values are not equal, for positive numbers return 1
+ // values are not equal, for positive numbers return 1
// if x is less than y. 0 otherwise
res = ((sig_n_prime.w[1] > 0)
|| (sig_x < sig_n_prime.w[0])) ^ ((x & MASK_SIGN) ==
} else {
- // x and y are both +NaN;
+ // x and y are both +NaN;
// must investigate payload if both quiet or both signaling
// this xnor statement will be true if both x and y are +qNaN or +sNaN
if (!
// are bitwise identical)
pyld_y = y & 0x0003ffffffffffffull;
pyld_x = x & 0x0003ffffffffffffull;
- // if x is zero and y isn't, x has the smaller
+ // if x is zero and y isn't, x has the smaller
// payload definitely (since we know y isn't 0 at this point)
if (pyld_x > 999999999999999ull || pyld_x == 0) {
res = 1;
BID_RETURN (res);
} else {
- // return true if y is +qNaN and x is +sNaN
+ // return true if y is +qNaN and x is +sNaN
// (we know they're different bc of xor if_stmt above)
res = ((x & MASK_SNAN) == MASK_SNAN);
BID_RETURN (res);
BID_RETURN (res);
}
// SIMPLE (CASE2)
- // if all the bits (except sign bit) are the same,
+ // if all the bits (except sign bit) are the same,
// these numbers are equal.
if ((x & ~MASK_SIGN) == (y & ~MASK_SIGN)) {
res = 1;
}
// INFINITY (CASE3)
if ((x & MASK_INF) == MASK_INF) {
- // x is positive infinity, only return1
+ // x is positive infinity, only return1
// if y is positive infinity as well
res = ((y & MASK_INF) == MASK_INF);
BID_RETURN (res);
res = 1;
BID_RETURN (res);
}
- // if steering bits are 11 (condition will be 0),
+ // if steering bits are 11 (condition will be 0),
// then exponent is G[0:w+1] =>
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
exp_x = (x & MASK_BINARY_EXPONENT2) >> 51;
}
}
- // if steering bits are 11 (condition will be 0),
+ // if steering bits are 11 (condition will be 0),
// then exponent is G[0:w+1] =>
if ((y & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
exp_y = (y & MASK_BINARY_EXPONENT2) >> 51;
}
// ZERO (CASE 5)
- // if x and y represent the same entities,
+ // if x and y represent the same entities,
// and both are negative , return true iff exp_x <= exp_y
if (x_is_zero && y_is_zero) {
// totalOrder(x,y) iff exp_x <= exp_y for positive numbers
res = 1;
BID_RETURN (res);
}
- // if exp_x is 15 greater than exp_y, it is definitely
+ // if exp_x is 15 greater than exp_y, it is definitely
// larger, so no need for compensation
if (exp_x - exp_y > 15) {
res = 0; // difference cannot be greater than 10^15
BID_RETURN (res);
}
- // if exp_x is 15 less than exp_y, it is definitely
+ // if exp_x is 15 less than exp_y, it is definitely
// smaller, no need for compensation
if (exp_y - exp_x > 15) {
res = 1;
BID_RETURN (res);
}
- // if |exp_x - exp_y| < 15, it comes down
+ // if |exp_x - exp_y| < 15, it comes down
// to the compensated significand
if (exp_x > exp_y) {
__mul_64x64_to_128MACH (sig_n_prime, sig_x,
mult_factor[exp_x - exp_y]);
- // if x and y represent the same entities,
+ // if x and y represent the same entities,
// and both are negative, return true iff exp_x <= exp_y
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_y)) {
- // case cannot occur, because all bits
+ // case cannot occur, because all bits
// must be the same - would have been caught if (x==y)
res = (exp_x <= exp_y);
BID_RETURN (res);
__mul_64x64_to_128MACH (sig_n_prime, sig_y,
mult_factor[exp_y - exp_x]);
- // if x and y represent the same entities,
+ // if x and y represent the same entities,
// and both are negative, return true iff exp_x <= exp_y
if (sig_n_prime.w[1] == 0 && (sig_n_prime.w[0] == sig_x)) {
res = (exp_x <= exp_y);
BID_RETURN (res);
}
- // values are not equal, for positive numbers
+ // values are not equal, for positive numbers
// return 1 if x is less than y. 0 otherwise
res = ((sig_n_prime.w[1] > 0) || (sig_x < sig_n_prime.w[0]));
BID_RETURN (res);
if ((x & 0x7800000000000000ull) == 0x7800000000000000ull) {
#ifdef SET_STATUS_FLAGS
if (((x & 0x7e00000000000000ull) == 0x7e00000000000000ull) // sNaN
- || ((x & 0x7c00000000000000ull) == 0x7800000000000000ull)) //Inf
+ || ((x & 0x7c00000000000000ull) == 0x7800000000000000ull)) //Inf
__set_status_flags (pfpsf, INVALID_EXCEPTION);
#endif
if ((x & NAN_MASK64) != NAN_MASK64)
if (rnd_mode == 0)
#endif
if (C64 & 1) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
- // this is the same as fractional part of
+ // this is the same as fractional part of
// (initial_P + 0.5*10^extra_digits)/10^extra_digits is exactly zero
// get remainder
* if(exponent_x < exponent_y)
* scale coefficient_y so exponents are aligned
* perform coefficient divide (64-bit integer divide), unless
- * coefficient_y is longer than 64 bits (clearly larger
- * than coefficient_x)
+ * coefficient_y is longer than 64 bits (clearly larger
+ * than coefficient_x)
* else // exponent_x > exponent_y
- * use a loop to scale coefficient_x to 18_digits, divide by
+ * use a loop to scale coefficient_x to 18_digits, divide by
* coefficient_y (64-bit integer divide), calculate remainder
- * as new_coefficient_x and repeat until final remainder is obtained
+ * as new_coefficient_x and repeat until final remainder is obtained
* (when new_exponent_x < exponent_y)
*
****************************************************************************/
}
// unpack x
if ((x & MASK_STEERING_BITS) == MASK_STEERING_BITS) {
- // if the steering bits are 11 (condition will be 0), then
+ // if the steering bits are 11 (condition will be 0), then
// the exponent is G[0:w+1]
exp = ((x & MASK_BINARY_EXPONENT2) >> 51) - 398;
C1 = (x & MASK_BINARY_SIG2) | MASK_BINARY_OR2;
C1 = (x & MASK_BINARY_SIG1);
}
- // if x is 0 or non-canonical return 0 preserving the sign bit and
+ // if x is 0 or non-canonical return 0 preserving the sign bit and
// the preferred exponent of MAX(Q(x), 0)
if (C1 == 0) {
if (exp < 0)
}
break;
case ROUNDING_TO_ZERO:
- // return 0 if (exp <= -p)
+ // return 0 if (exp <= -p)
if (exp <= -16) {
res = x_sign | 0x31c0000000000000ull;
*pfpsf |= INEXACT_EXCEPTION;
if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 64 bits
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
C1 = C1 + midpoint64[ind - 1];
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
- // else
+ // else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 64 bits
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
C1 = C1 + midpoint64[ind - 1];
__mul_64x64_to_128 (P128, C1, ten2mk64[ind - 1]);
// if (0 < f* < 10^(-x)) then the result is a midpoint
- // C* = floor(C*) - logical right shift; C* has p decimal digits,
+ // C* = floor(C*) - logical right shift; C* has p decimal digits,
// correct by Prop. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// the result is inexact
if (ind - 1 <= 2) {
if (fstar.w[0] > 0x8000000000000000ull) {
- // f* > 1/2 and the result may be exact
+ // f* > 1/2 and the result may be exact
// fstar.w[0] - 0x8000000000000000ull is f* - 1/2
if ((fstar.w[0] - 0x8000000000000000ull) > ten2mk64[ind - 1]) {
// set the inexact flag
if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 127 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
- // set invalid flag
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (SNaN)
+ // return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
} else { // QNaN
res = x;
} else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 64 bits
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
C1 = C1 + midpoint64[ind - 1];
// shift; C* has p decimal digits, correct by Prop. 1)
// else if floor(C*) is odd C* = floor(C*)-1 (logical right
// shift; C* has p decimal digits, correct by Pr. 1)
- // else
+ // else
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
- // set invalid flag
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (SNaN)
+ // return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
} else { // QNaN
res = x;
} else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
- // set invalid flag
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (SNaN)
+ // return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
} else { // QNaN
res = x;
} else if ((q + exp) > 0) { // exp < 0 and 1 <= -exp < q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 64 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
- // set invalid flag
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (SNaN)
+ // return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
} else { // QNaN
res = x;
} else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 fits in 127 bits
// calculate C* and f*
// C* is actually floor(C*) in this case
// C* = floor(C*) (logical right shift; C has p decimal digits,
// correct by Property 1)
// if (0 < f* < 10^(-x)) then the result is exact
- // n = C* * 10^(e+x)
+ // n = C* * 10^(e+x)
if (ind - 1 <= 2) { // 0 <= ind - 1 <= 2 => shift = 0
res = P128.w[1];
if ((x & 0x0003ffffffffffffull) > 999999999999999ull)
x = x & 0xfe00000000000000ull; // clear G6-G12 and the payload bits
else
- x = x & 0xfe03ffffffffffffull; // clear G6-G12
- if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
- // set invalid flag
+ x = x & 0xfe03ffffffffffffull; // clear G6-G12
+ if ((x & MASK_SNAN) == MASK_SNAN) { // SNaN
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return quiet (SNaN)
+ // return quiet (SNaN)
res = x & 0xfdffffffffffffffull;
} else { // QNaN
res = x;
} else if ((q + exp) >= 0) { // exp < 0 and 1 <= -exp <= q
// need to shift right -exp digits from the coefficient; the exp will be 0
ind = -exp; // 1 <= ind <= 16; ind is a synonym for 'x'
- // chop off ind digits from the lower part of C1
+ // chop off ind digits from the lower part of C1
// C1 = C1 + 1/2 * 10^x where the result C1 fits in 64 bits
// FOR ROUND_TO_NEAREST, WE ADD 1/2 ULP(y) then truncate
C1 = C1 + midpoint64[ind - 1];
__mul_64x64_to_128 (P128, C1, ten2mk64[ind - 1]);
// if (0 < f* < 10^(-x)) then the result is a midpoint
- // C* = floor(C*) - logical right shift; C* has p decimal digits,
+ // C* = floor(C*) - logical right shift; C* has p decimal digits,
// correct by Prop. 1)
// else
// C* = floor(C*) (logical right shift; C has p decimal digits,
* - scale coefficient x to between 31 and 33 decimal digits
* - in parallel, check for exact case and return if true
* - get high part of result coefficient using double precision sqrt
- * - compute remainder and refine coefficient in one iteration (which
+ * - compute remainder and refine coefficient in one iteration (which
* modifies it by at most 1)
- * - result exponent is easy to compute from the adjusted arg. exponent
+ * - result exponent is easy to compute from the adjusted arg. exponent
*
****************************************************************************/
CS.w[0] = short_sqrt128 (A10);
CS.w[1] = 0;
mul_factor = 0;
- // check for exact result
+ // check for exact result
if (CS.w[0] < 10000000000000000ull) {
if (CS.w[0] * CS.w[0] == A10.w[0]) {
__sqr64_fast (S2, CS.w[0]);
/*__add_carry_out(M256.w[0], Carry, M256.w[0], C8.w[0]);
M256.w[1] = M256.w[1] + Carry + C8.w[1];
M256.w[0]++;
- if(!M256.w[0])
+ if(!M256.w[0])
{
M256.w[1]++;
}
ps[istart++] = '0';
} else {
/* ****************************************************
- This takes a bid coefficient in C1.w[1],C1.w[0]
- and put the converted character sequence at location
+ This takes a bid coefficient in C1.w[1],C1.w[0]
+ and put the converted character sequence at location
starting at &(str[k]). The function returns the number
- of MiDi returned. Note that the character sequence
+ of MiDi returned. Note that the character sequence
does not have leading zeros EXCEPT when the input is of
zero value. It will then output 1 character '0'
The algorithm essentailly tries first to get a sequence of
18 digits, we set hi = 0, and lo = d to begin with.
We then retrieve from a table, for j = 0, 1, ..., 8
that gives us A and B where c_j 2^(59+6j) = A * 10^18 + B.
- hi += A ; lo += B; After each accumulation into lo, we normalize
+ hi += A ; lo += B; After each accumulation into lo, we normalize
immediately. So at the end, we have the decomposition as we need. */
Tmp = coefficient_x >> 59;
#if DECIMAL_CALL_BY_REFERENCE
void
bid64_from_string (UINT64 * pres, char *ps
- _RND_MODE_PARAM _EXC_FLAGS_PARAM
+ _RND_MODE_PARAM _EXC_FLAGS_PARAM
_EXC_MASKS_PARAM _EXC_INFO_PARAM) {
#else
UINT64
bid64_from_string (char *ps
- _RND_MODE_PARAM _EXC_FLAGS_PARAM
+ _RND_MODE_PARAM _EXC_FLAGS_PARAM
_EXC_MASKS_PARAM _EXC_INFO_PARAM) {
#endif
UINT64 sign_x, coefficient_x = 0, rounded = 0, res;
// detect special cases (INF or NaN)
if (!c || (c != '.' && c != '-' && c != '+' && (c < '0' || c > '9'))) {
// Infinity?
- if ((tolower_macro (ps[0]) == 'i' && tolower_macro (ps[1]) == 'n' &&
- tolower_macro (ps[2]) == 'f') && (!ps[3] ||
- (tolower_macro (ps[3]) == 'i' &&
- tolower_macro (ps[4]) == 'n' && tolower_macro (ps[5]) == 'i' &&
- tolower_macro (ps[6]) == 't' && tolower_macro (ps[7]) == 'y' &&
+ if ((tolower_macro (ps[0]) == 'i' && tolower_macro (ps[1]) == 'n' &&
+ tolower_macro (ps[2]) == 'f') && (!ps[3] ||
+ (tolower_macro (ps[3]) == 'i' &&
+ tolower_macro (ps[4]) == 'n' && tolower_macro (ps[5]) == 'i' &&
+ tolower_macro (ps[6]) == 't' && tolower_macro (ps[7]) == 'y' &&
!ps[8]))) {
res = 0x7800000000000000ull;
BID_RETURN (res);
}
// return sNaN
- if (tolower_macro (ps[0]) == 's' && tolower_macro (ps[1]) == 'n' &&
- tolower_macro (ps[2]) == 'a' && tolower_macro (ps[3]) == 'n') {
+ if (tolower_macro (ps[0]) == 's' && tolower_macro (ps[1]) == 'n' &&
+ tolower_macro (ps[2]) == 'a' && tolower_macro (ps[3]) == 'n') {
// case insensitive check for snan
res = 0x7e00000000000000ull;
BID_RETURN (res);
}
}
// detect +INF or -INF
- if ((tolower_macro (ps[1]) == 'i' && tolower_macro (ps[2]) == 'n' &&
- tolower_macro (ps[3]) == 'f') && (!ps[4] ||
- (tolower_macro (ps[4]) == 'i' && tolower_macro (ps[5]) == 'n' &&
- tolower_macro (ps[6]) == 'i' && tolower_macro (ps[7]) == 't' &&
+ if ((tolower_macro (ps[1]) == 'i' && tolower_macro (ps[2]) == 'n' &&
+ tolower_macro (ps[3]) == 'f') && (!ps[4] ||
+ (tolower_macro (ps[4]) == 'i' && tolower_macro (ps[5]) == 'n' &&
+ tolower_macro (ps[6]) == 'i' && tolower_macro (ps[7]) == 't' &&
tolower_macro (ps[8]) == 'y' && !ps[9]))) {
if (c == '+')
res = 0x7800000000000000ull;
// should catch cases such as: 000.0
while (*ps == '0') {
ps++;
- // for numbers such as 0.0000000000000000000000000000000000001001,
+ // for numbers such as 0.0000000000000000000000000000000000001001,
// we want to count the leading zeros
if (rdx_pt_enc) {
right_radix_leading_zeros++;
}
- // if this character is a radix point, make sure we haven't already
+ // if this character is a radix point, make sure we haven't already
// encountered one
if (*(ps) == '.') {
if (rdx_pt_enc == 0) {
rdx_pt_enc = 1;
- // if this is the first radix point, and the next character is NULL,
+ // if this is the first radix point, and the next character is NULL,
// we have a zero
if (!*(ps + 1)) {
res =
// coefficient rounding
switch(rnd_mode){
case ROUNDING_TO_NEAREST:
- midpoint = (c == '5' && !(coefficient_x & 1)) ? 1 : 0;
- // if coefficient is even and c is 5, prepare to round up if
+ midpoint = (c == '5' && !(coefficient_x & 1)) ? 1 : 0;
+ // if coefficient is even and c is 5, prepare to round up if
// subsequent digit is nonzero
// if str[MAXDIG+1] > 5, we MUST round up
// if str[MAXDIG+1] == 5 and coefficient is ODD, ROUND UP!
#include "bid_internal.h"
/*
- * Takes a BID64 as input and converts it to a BID128 and returns it.
+ * Takes a BID64 as input and converts it to a BID128 and returns it.
*/
TYPE0_FUNCTION_ARGTYPE1_NORND (UINT128, bid64_to_bid128, UINT64, x)
// Note: 0x4fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0]
&& (fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// Note: 0x4fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// Calculate f2* - 1/2
tmp64 = fstar.w[1] - onehalf128[ind - 1];
if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0]
&& (fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// Note: 0x500000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x500000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x500000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x500000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x4fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
// ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
- if (!x_sign) { // positive and inexact
+ if (!x_sign) { // positive and inexact
Cstar++;
}
} // else the result is exact
// Note: 0x4fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
if (fstar.w[1] || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
// ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
- if (!x_sign) { // positive and inexact
+ if (!x_sign) { // positive and inexact
Cstar++;
}
// set the inexact flag
// Note: 0x500000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x500000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x500000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x500000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x4fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
Cstar = P128.w[1];
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
// if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
- // C* = floor(C*)-1 (logical right shift; C* has p decimal digits,
+ // C* = floor(C*)-1 (logical right shift; C* has p decimal digits,
// correct by Pr. 1)
// n = C* * 10^(e+x)
// Note: 0x4fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x4fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
fstar.w[0] = P128.w[0];
// the top Ex bits of 10^(-x) are T* = ten2mk128trunc[ind].w[0], e.g.
// if x=1, T*=ten2mk128trunc[0].w[0]=0x1999999999999999
- // C* = floor(C*)-1 (logical right shift; C* has p decimal digits,
+ // C* = floor(C*)-1 (logical right shift; C* has p decimal digits,
// correct by Pr. 1)
// n = C* * 10^(e+x)
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// Calculate f2* - 1/2
tmp64 = fstar.w[1] - onehalf128[ind - 1];
if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
#if DECIMAL_CALL_BY_REFERENCE
void
bid64_to_int64_ceil (SINT64 * pres, UINT64 * px
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
#else
SINT64
bid64_to_int64_ceil (UINT64 x
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
#endif
SINT64 res;
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
#if DECIMAL_CALL_BY_REFERENCE
void
bid64_to_int64_xint (SINT64 * pres, UINT64 * px
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
#else
SINT64
bid64_to_int64_xint (UINT64 x
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
#endif
SINT64 res;
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// Calculate f2* - 1/2
tmp64 = fstar.w[1] - onehalf128[ind - 1];
if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
else
res = C1;
} else { // if (exp > 0) => 1 <= exp <= 18, 1 <= q <= 16, 2 <= q + exp <= 20
- // (the upper limit of 20 on q + exp is due to the fact that
- // +/-C * 10^exp is guaranteed to fit in 64 bits)
+ // (the upper limit of 20 on q + exp is due to the fact that
+ // +/-C * 10^exp is guaranteed to fit in 64 bits)
// res = +/-C * 10^exp (exact)
if (x_sign)
res = -C1 * ten2k64[exp];
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000001; // return +1
}
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
+ // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
+ // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// f* > 1/2 and the result may be exact
tmp64 = fstar.w[0] - 0x8000000000000000ull; // f* - 1/2
if ((tmp64 > ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// Calculate f2* - 1/2
tmp64 = fstar.w[1] - onehalf128[ind - 1];
if (tmp64 || fstar.w[0] > ten2mk128trunc[ind - 1].w[1]) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// set the inexact flag
*pfpsf |= INEXACT_EXCEPTION;
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x9fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000001;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // x <= -1 or 1 <= x <= 2^32 - 1 so if positive, x can be
+ // x <= -1 or 1 <= x <= 2^32 - 1 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x9fffffff6*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffff6ull * ten2k64[q - 11];
if (C1 > tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000001;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // x <= -1 or 1 <= x < 2^32 so if positive, x can be
+ // x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
#if DECIMAL_CALL_BY_REFERENCE
void
bid64_to_uint32_int (unsigned int *pres, UINT64 * px
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
#else
unsigned int
bid64_to_uint32_int (UINT64 x
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
#endif
unsigned int res;
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // x <= -1 or 1 <= x < 2^32 so if positive, x can be
+ // x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0xa00000000*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0xa00000000ull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000000;
BID_RETURN (res);
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // x <= -1 or 1 <= x < 2^32 so if positive, x can be
+ // x <= -1 or 1 <= x < 2^32 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
res = 0x00000001; // return +1
}
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
+ // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Note: 0x9fffffffb*10^(q-11) has q-1 or q digits, where q <= 16
tmp64 = 0x9fffffffbull * ten2k64[q - 11];
if (C1 >= tmp64) {
- // set invalid flag
+ // set invalid flag
*pfpsf |= INVALID_EXCEPTION;
- // return Integer Indefinite
+ // return Integer Indefinite
res = 0x80000000;
BID_RETURN (res);
}
// set inexact flag
*pfpsf |= INEXACT_EXCEPTION;
} else { // if (1 <= q + exp <= 10, 1 <= q <= 16, -15 <= exp <= 9)
- // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
+ // -2^32-1/2 <= x <= -1 or 1 <= x < 2^32-1/2 so if positive, x can be
// rounded to nearest to a 32-bit unsigned integer
if (x_sign) { // x <= -1
// set invalid flag
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] > 0x09 ||
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] > 0x09 ||
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// check for midpoints
if ((fstar.w[1] == 0) && fstar.w[0] &&
(fstar.w[0] <= ten2mk128trunc[ind - 1].w[1])) {
- // ten2mk128trunc[ind -1].w[1] is identical to
+ // ten2mk128trunc[ind -1].w[1] is identical to
// ten2mk128[ind -1].w[1]
// the result is a midpoint; round to nearest
if (Cstar & 0x01) { // Cstar is odd; MP in [EVEN, ODD]
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] >= 0x0a) {
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] >= 0x0a) {
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
#if DECIMAL_CALL_BY_REFERENCE
void
bid64_to_uint64_int (UINT64 * pres, UINT64 * px
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
UINT64 x = *px;
#else
UINT64
bid64_to_uint64_int (UINT64 x
- _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
+ _EXC_FLAGS_PARAM _EXC_MASKS_PARAM _EXC_INFO_PARAM)
{
#endif
UINT64 res;
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] >= 0x0a) {
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0xa0000000000000000
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] >= 0x0a) {
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = C1 * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] > 0x09 ||
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// else cases that can be rounded to a 64-bit int fall through
// to '1 <= q + exp <= 20'
} else { // if (2 <= q <= 16) => 5 <= 21 - q <= 19
- // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
+ // Note: C * 10^(21-q) has 20 or 21 digits; 0x9fffffffffffffffb
// has 21 digits
__mul_64x64_to_128MACH (C, C1, ten2k64[21 - q]);
if (C.w[1] > 0x09 ||
// C* is actually floor(C*) in this case
// C* and f* need shifting and masking, as shown by
// shiftright128[] and maskhigh128[]
- // 1 <= x <= 15
+ // 1 <= x <= 15
// kx = 10^(-x) = ten2mk64[ind - 1]
// C* = (C1 + 1/2 * 10^x) * 10^(-x)
// the approximation of 10^(-x) was rounded up to 54 bits
// Check for exponent underflow and compensate by shifting the product
// Cut off the process at precision+2, since we can't really shift further
-
+
c_prov = z.w[5];
// Round using round-sticky words
c_prov = z.w[5];
// Test inexactness and underflow (when testing tininess before rounding)
-
+
if ((z.w[4] != 0) || (z.w[3] != 0)) {
*pfpsf |= INEXACT_EXCEPTION;
if (c_prov < 1000000ull)
e_out = e_out + 1;
}
}
-
+
// Check for overflow
if (e_out > 90 + 101) {
if (c_prov < 1000000ull)
*pfpsf |= UNDERFLOW_EXCEPTION;
}
-
+
// Round using round-sticky words
// If we spill over into the next decade, correct
// Flag underflow where it may be needed even for |result| = SNN
if (c_prov < 1000000000000000ull)
*pfpsf |= UNDERFLOW_EXCEPTION;
}
-
+
// Round using round-sticky words
// If we spill over into the next decade, correct
// Flag underflow where it may be needed even for |result| = SNN
if (c_prov < 1000000000000000ull)
*pfpsf |= UNDERFLOW_EXCEPTION;
}
-
+
// Round using round-sticky words
// If we spill over into the next decade, correct
// Flag underflow where it may be needed even for |result| = SNN
#define DECIMAL_CALL_BY_REFERENCE 0
#endif
-// If DECIMAL_GLOBAL_ROUNDING is defined then the rounding mode is a global
+// If DECIMAL_GLOBAL_ROUNDING is defined then the rounding mode is a global
// variable _IDEC_glbround, otherwise it is passed as a parameter when needed
#ifndef DECIMAL_GLOBAL_ROUNDING
#endif
// If DECIMAL_GLOBAL_EXCEPTION_FLAGS is defined then the exception status flags
-// are represented by a global variable _IDEC_glbflags, otherwise they are
+// are represented by a global variable _IDEC_glbflags, otherwise they are
// passed as a parameter when needed
#ifndef DECIMAL_GLOBAL_EXCEPTION_FLAGS
#endif
// If DECIMAL_ALTERNATE_EXCEPTION_HANDLING is defined then the exception masks
-// are examined and exception handling information is provided to the caller
+// are examined and exception handling information is provided to the caller
// if alternate exception handling is necessary
#ifndef DECIMAL_ALTERNATE_EXCEPTION_HANDLING
#if DECIMAL_ALTERNATE_EXCEPTION_HANDLING
// If DECIMAL_GLOBAL_EXCEPTION_MASKS is defined then the exception mask bits
// are represented by a global variable _IDEC_exceptionmasks, otherwise they
- // are passed as a parameter when needed; DECIMAL_GLOBAL_EXCEPTION_MASKS is
+ // are passed as a parameter when needed; DECIMAL_GLOBAL_EXCEPTION_MASKS is
// ignored
// if DECIMAL_ALTERNATE_EXCEPTION_HANDLING is not defined
// **************************************************************************
#define DECIMAL_GLOBAL_EXCEPTION_MASKS 0
// **************************************************************************
- // If DECIMAL_GLOBAL_EXCEPTION_INFO is defined then the alternate exception
- // handling information is represented by a global data structure
+ // If DECIMAL_GLOBAL_EXCEPTION_INFO is defined then the alternate exception
+ // handling information is represented by a global data structure
// _IDEC_glbexcepthandling, otherwise it is passed by reference as a
// parameter when needed; DECIMAL_GLOBAL_EXCEPTION_INFO is ignored
// if DECIMAL_ALTERNATE_EXCEPTION_HANDLING is not defined
// Notes: 1) rnd_mode from _RND_MODE_ARG is used by the caller of a function
// from this library, and can be any name
-// 2) rnd_mode and prnd_mode from _RND_MODE_PARAM are fixed names
+// 2) rnd_mode and prnd_mode from _RND_MODE_PARAM are fixed names
// and *must* be used in the library functions
-// 3) _IDEC_glbround is the fixed name for the global variable holding
+// 3) _IDEC_glbround is the fixed name for the global variable holding
// the rounding mode
#if !DECIMAL_GLOBAL_ROUNDING
#endif
// Notes: 1) pfpsf from _EXC_FLAGS_ARG is used by the caller of a function
-// from this library, and can be any name
-// 2) pfpsf from _EXC_FLAGS_PARAM is a fixed name and *must* be used
+// from this library, and can be any name
+// 2) pfpsf from _EXC_FLAGS_PARAM is a fixed name and *must* be used
// in the library functions
-// 3) _IDEC_glbflags is the fixed name for the global variable holding
+// 3) _IDEC_glbflags is the fixed name for the global variable holding
// the floating-point status flags
#if !DECIMAL_GLOBAL_EXCEPTION_FLAGS
#define _EXC_FLAGS_ARG , pfpsf
// from this library, and can be any name
// 2) exc_mask and pexc_mask from _EXC_MASKS_PARAM are fixed names
// and *must* be used in the library functions
- // 3) _IDEC_glbexceptionmasks is the fixed name for the global
+ // 3) _IDEC_glbexceptionmasks is the fixed name for the global
// variable holding the floating-point exception masks
#if !DECIMAL_GLOBAL_EXCEPTION_MASKS
#if DECIMAL_CALL_BY_REFERENCE
// Notes: 1) pexc_info from _EXC_INFO_ARG is used by the caller of a function
// from this library, and can be any name
- // 2) pexc_info from _EXC_INFO_PARAM is a fixed name and *must* be
+ // 2) pexc_info from _EXC_INFO_PARAM is a fixed name and *must* be
// used in the library functions
- // 3) _IDEC_glbexcepthandling is the fixed name for the global
+ // 3) _IDEC_glbexcepthandling is the fixed name for the global
// variable holding the floating-point exception information
#if !DECIMAL_GLOBAL_EXCEPTION_INFO
#define _EXC_INFO_ARG , pexc_info
#include "bid_internal.h"
#define FENCE __fence
-//#define FENCE
+//#define FENCE
//#define DOUBLE_EXTENDED_ON
pCR->w[0] = CX.w[0] - pCQ->w[0] * CY.w[0];
} else {
- // This path works for CX<2^116 only
+ // This path works for CX<2^116 only
// 2^64
d64.i = 0x43f0000000000000;
if (CY.w[1] < (UINT64) (1 << (64 - 51))
&& (__unsigned_compare_gt_128 (CX, CY51))) {
- // Q > 2^51
+ // Q > 2^51
// 2^(-49)*CX/CY
d49.i = 0x3ce0000000000000ull;
int_double t64, d49, d60;
double lx, ly, lq, d128, d192;
- // the quotient is assumed to be at most 113 bits,
+ // the quotient is assumed to be at most 113 bits,
// as needed by BID128 divide routines
// initial dividend
&&
(__unsigned_compare_gt_128 (CA4, CY51))))
{
- // Q > 2^51
+ // Q > 2^51
// 2^(-49)*CA4/CY
d49.i = 0x3ce0000000000000ull;
if ((comb & 0x1f000) == 0x1f000) { //NaN
ba &= 0xfe003fffffffffffULL; // make exponent 0
- bcoeff &= 0x00003fffffffffffull; // NaN payloat is only T.
+ bcoeff &= 0x00003fffffffffffull; // NaN payloat is only T.
if ((bcoeff > 0x0000314dc6448d93ULL) || // significand is non-canonical
((bcoeff == 0x0000314dc6448d93ULL)
&& (bb >= 0x38c15b0a00000000ULL))
if ((comb & 0x7c0) == 0x7c0) { //NaN
ba &= 0xfe0fffff; // make exponent 0
- bcoeff &= 0x000fffff; // NaN payloat is only T.
+ bcoeff &= 0x000fffff; // NaN payloat is only T.
if (bcoeff >= 1000000)
ba &= 0xfff00000; //treat non-canonical significand
return ba;
if ((comb & 0x1f00) == 0x1f00) { //NaN
ba &= 0xfe03ffffffffffffULL; // make exponent 0
- bcoeff &= 0x0003ffffffffffffull; // NaN payloat is only T.
+ bcoeff &= 0x0003ffffffffffffull; // NaN payloat is only T.
if (bcoeff >= 1000000000000000ull)
ba &= 0xfe00000000000000ull; // treat non canonical significand and zero G6-G12
return ba;
if ((comb & 0x1f000) == 0x1f000) { //NaN
ba &= 0xfe003fffffffffffULL; // make exponent 0
- bcoeff &= 0x00003fffffffffffull; // NaN payload is only T.
+ bcoeff &= 0x00003fffffffffffull; // NaN payload is only T.
if ((bcoeff > 0x0000314dc6448d93ULL) || // significand is non-canonical
((bcoeff == 0x0000314dc6448d93ULL)
// Note the following definitions from bid_conf.h: if the status flags are
// global, they have a fixed name recognized by the library functions:
// _IDEC_glbflags; pfpsf, defined as &_IDEC_glbflags, can be used instead; no
-// argument is passed for the status flags to the library functions; if the
+// argument is passed for the status flags to the library functions; if the
// status flags are local then they are passed as an arument, always by
// reference, to the library functions
//
lowerFlags (_IDEC_flags * pflagsmask _EXC_FLAGS_PARAM) {
// *pflagsmask is the logical OR of the flags to be cleared, e.g.
// *pflagsmask =INVALID_EXCEPTION | ZERO_DIVIDE_EXCEPTION | OVERFLOW_EXCEPTION
- // UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION to clear all five IEEE 754R
+ // UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION to clear all five IEEE 754R
// exception flags
*pfpsf = *pfpsf & ~(*pflagsmask & BID_IEEE_FLAGS);
}
void
lowerFlags (_IDEC_flags flagsmask _EXC_FLAGS_PARAM) {
// flagsmask is the logical OR of the flags to be cleared, e.g.
- // flagsmask = INVALID_EXCEPTION | ZERO_DIVIDE_EXCEPTION | OVERFLOW_EXCEPTION
- // UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION to clear all five IEEE 754R
+ // flagsmask = INVALID_EXCEPTION | ZERO_DIVIDE_EXCEPTION | OVERFLOW_EXCEPTION
+ // UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION to clear all five IEEE 754R
// exception flags
*pfpsf = *pfpsf & ~(flagsmask & BID_IEEE_FLAGS);
}
void
testFlags (_IDEC_flags * praised,
_IDEC_flags * pflagsmask _EXC_FLAGS_PARAM) {
- // *praised is a pointer to the result, i.e. the logical OR of the flags
+ // *praised is a pointer to the result, i.e. the logical OR of the flags
// selected by *pflagsmask that are set; e.g. if
// *pflagsmask = INVALID_EXCEPTION | UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION
- // and only the invalid and inexact flags are raised (set) then upon return
+ // and only the invalid and inexact flags are raised (set) then upon return
// *praised = INVALID_EXCEPTION | INEXACT_EXCEPTION
*praised = *pfpsf & (*pflagsmask & BID_IEEE_FLAGS);
}
_IDEC_flags
testFlags (_IDEC_flags flagsmask _EXC_FLAGS_PARAM) {
_IDEC_flags raised;
- // the raturn value raised is the logical OR of the flags
+ // the raturn value raised is the logical OR of the flags
// selected by flagsmask, that are set; e.g. if
// flagsmask = INVALID_EXCEPTION | UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION and
// only the invalid and inexact flags are raised (set) then the return value
restoreFlags (_IDEC_flags flagsvalues,
_IDEC_flags flagsmask _EXC_FLAGS_PARAM) {
// restore the status flags selected by flagsmask to the values speciafied
- // (as a logical OR) in flagsvalues; e.g. if
+ // (as a logical OR) in flagsvalues; e.g. if
// flagsmask = INVALID_EXCEPTION | UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION
- // and only the invalid and inexact flags are raised (set) in flagsvalues
+ // and only the invalid and inexact flags are raised (set) in flagsvalues
// then upon return the invalid status flag will be set, the underflow status
// flag will be clear, and the inexact status flag will be set
*pfpsf = *pfpsf & ~(flagsmask & BID_IEEE_FLAGS);
// *pflagsmask; e.g. if
// *pflagsmask = INVALID_EXCEPTION | UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION
// and only the invalid and inexact flags are raised (set) in the status word,
- // then upon return the value in *pflagsvalues will have the invalid status
+ // then upon return the value in *pflagsvalues will have the invalid status
// flag set, the underflow status flag clear, and the inexact status flag set
*pflagsvalues = *pfpsf & (*pflagsmask & BID_IEEE_FLAGS);
}
_IDEC_flags
saveFlags (_IDEC_flags flagsmask _EXC_FLAGS_PARAM) {
_IDEC_flags flagsvalues;
- // return the status flags specified (as a logical OR) in flagsmask; e.g. if
+ // return the status flags specified (as a logical OR) in flagsmask; e.g. if
// flagsmask = INVALID_EXCEPTION | UNDERFLOW_EXCEPTION | INEXACT_EXCEPTION
// and only the invalid and inexact flags are raised (set) in the status word,
- // then the return value will have the invalid status flag set, the
- // underflow status flag clear, and the inexact status flag set
+ // then the return value will have the invalid status flag set, the
+ // underflow status flag clear, and the inexact status flag set
flagsvalues = *pfpsf & (flagsmask & BID_IEEE_FLAGS);
return (flagsvalues);
}
res =
x_sign | 0x6c70000000000000ull | (C & 0x0007ffffffffffffull);
}
- } else { // |C| >= 10^16 and the result may be inexact
+ } else { // |C| >= 10^16 and the result may be inexact
// the smallest |C| is 10^16 which has 17 decimal digits
// the largest |C| is 0x8000000000000000 = 9223372036854775808 w/ 19 digits
- if (C < 0x16345785d8a0000ull) { // x < 10^17
+ if (C < 0x16345785d8a0000ull) { // x < 10^17
q = 17;
ind = 1; // number of digits to remove for q = 17
} else if (C < 0xde0b6b3a7640000ull) { // C < 10^18
q = 18;
- ind = 2; // number of digits to remove for q = 18
+ ind = 2; // number of digits to remove for q = 18
} else { // C < 10^19
q = 19;
ind = 3; // number of digits to remove for q = 19
}
if (res < 0x0020000000000000ull) { // res < 2^53
res = x_sign | (((UINT64) ind + 398) << 53) | res;
- } else { // res >= 2^53
+ } else { // res >= 2^53
res =
x_sign | 0x6000000000000000ull | (((UINT64) ind + 398) << 51) |
(res & 0x0007ffffffffffffull);
} else { // x >= 2^53
res = 0x6c70000000000000ull | (x & 0x0007ffffffffffffull);
}
- } else { // x >= 10^16 and the result may be inexact
+ } else { // x >= 10^16 and the result may be inexact
// the smallest x is 10^16 which has 17 decimal digits
// the largest x is 0xffffffffffffffff = 18446744073709551615 w/ 20 digits
- if (x < 0x16345785d8a0000ull) { // x < 10^17
+ if (x < 0x16345785d8a0000ull) { // x < 10^17
q = 17;
ind = 1; // number of digits to remove for q = 17
} else if (x < 0xde0b6b3a7640000ull) { // x < 10^18
q = 18;
- ind = 2; // number of digits to remove for q = 18
+ ind = 2; // number of digits to remove for q = 18
} else if (x < 0x8ac7230489e80000ull) { // x < 10^19
q = 19;
ind = 3; // number of digits to remove for q = 19
}
if (res < 0x0020000000000000ull) { // res < 2^53
res = (((UINT64) ind + 398) << 53) | res;
- } else { // res >= 2^53
+ } else { // res >= 2^53
res = 0x6000000000000000ull | (((UINT64) ind + 398) << 51) |
(res & 0x0007ffffffffffffull);
}
_Decimal32 d;
UINT32 i;
};
-
+
union decimal64 {
_Decimal64 d;
UINT64 i;
};
-
+
union decimal128 {
_Decimal128 d;
UINT128 i;
};
-
+
#if BID_HAS_TF_MODE
union float128 {
TFtype f;
* Helper add functions (for fma)
*
* __BID_INLINE__ UINT64 get_add64(
- * UINT64 sign_x, int exponent_x, UINT64 coefficient_x,
- * UINT64 sign_y, int exponent_y, UINT64 coefficient_y,
+ * UINT64 sign_x, int exponent_x, UINT64 coefficient_x,
+ * UINT64 sign_y, int exponent_y, UINT64 coefficient_y,
* int rounding_mode)
*
* __BID_INLINE__ UINT64 get_add128(
- * UINT64 sign_x, int exponent_x, UINT64 coefficient_x,
- * UINT64 sign_y, int final_exponent_y, UINT128 CY,
+ * UINT64 sign_x, int exponent_x, UINT64 coefficient_x,
+ * UINT64 sign_y, int final_exponent_y, UINT128 CY,
* int extra_digits, int rounding_mode)
*
*****************************************************************************
*
* Algorithm description:
*
- * get_add64: same as BID64 add, but arguments are unpacked and there
+ * get_add64: same as BID64 add, but arguments are unpacked and there
* are no special case checks
*
- * get_add128: add 64-bit coefficient to 128-bit product (which contains
- * 16+extra_digits decimal digits),
+ * get_add128: add 64-bit coefficient to 128-bit product (which contains
+ * 16+extra_digits decimal digits),
* return BID64 result
- * - the exponents are compared and the two coefficients are
+ * - the exponents are compared and the two coefficients are
* properly aligned for addition/subtraction
* - multiple paths are needed
* - final result exponent is calculated and the lower term is
- * rounded first if necessary, to avoid manipulating
- * coefficients longer than 128 bits
+ * rounded first if necessary, to avoid manipulating
+ * coefficients longer than 128 bits
*
****************************************************************************/
///////////////////////////////////////////////////////////////////////
//
-// get_add64() is essentially the same as bid_add(), except that
+// get_add64() is essentially the same as bid_add(), except that
// the arguments are unpacked
//
//////////////////////////////////////////////////////////////////////
amount = short_recip_scale[extra_digits];
C0_64 = CT.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64 = C0_64 + coefficient_a;
// filter out difficult (corner) cases
- // the following test is equivalent to
- // ( (initial_coefficient_a + Ts) < P_ca &&
- // (initial_coefficient_a + Ts) > P_ca_m1 ),
- // which ensures the number of digits in coefficient_a does not change
+ // the following test is equivalent to
+ // ( (initial_coefficient_a + Ts) < P_ca &&
+ // (initial_coefficient_a + Ts) > P_ca_m1 ),
+ // which ensures the number of digits in coefficient_a does not change
// after adding (the appropriately scaled and rounded) coefficient_b
if ((UINT64) (C64 - 1000000000000000ull - 1) >
9000000000000000ull - 2) {
amount = short_recip_scale[extra_digits];
C0_64 = CT.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64 = C0_64 + coefficient_a;
} else if (C64 <= 1000000000000000ull) {
// less than 16 digits in result
amount = short_recip_scale[extra_digits];
C0_64 = CT_new.w[1] >> amount;
- // result coefficient
+ // result coefficient
C64_new = C0_64 + coefficient_a;
if (C64_new < 10000000000000000ull) {
C64 = C64_new;
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if (C64 & 1) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
- // this is the same as fractional part of
+ // this is the same as fractional part of
// (initial_P + 0.5*10^extra_digits)/10^extra_digits is exactly zero
// get remainder
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if (C64 & 1) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
// get remainder
if (rmode == 0) //ROUNDING_TO_NEAREST
#endif
if (!remainder_P && (C64 & 1)) {
- // check whether fractional part of initial_P/10^extra_digits
+ // check whether fractional part of initial_P/10^extra_digits
// is exactly .5
// get remainder
return __bid_full_round64 (sign_y, exponent_y, CT, extra_digits,
rounding_mode, fpsc);
}
- // diff_dec2+extra_digits is the number of digits to eliminate from
+ // diff_dec2+extra_digits is the number of digits to eliminate from
// argument CY
diff_dec2 = exponent_x - final_exponent_y;
CX.w[0] = (coefficient_x + sign_x) ^ sign_x;
CX.w[1] = sign_x;
- // check whether CY (rounded to 16 digits) and CX have
+ // check whether CY (rounded to 16 digits) and CX have
// any digits in the same position
diff_dec2 = final_exponent_y - exponent_x;
//////////////////////////////////////////////////////////////////////////
//
-// 0*10^ey + cz*10^ez, ey<ez
+// 0*10^ey + cz*10^ez, ey<ez
//
//////////////////////////////////////////////////////////////////////////
* Add/Subtract Macros
*
*********************************************************************/
-// add 64-bit value to 128-bit
+// add 64-bit value to 128-bit
#define __add_128_64(R128, A128, B64) \
{ \
UINT64 R64H; \
R64H ++; \
(R128).w[1] = R64H; \
}
-// subtract 64-bit value from 128-bit
+// subtract 64-bit value from 128-bit
#define __sub_128_64(R128, A128, B64) \
{ \
UINT64 R64H; \
(R128).w[1] = R64H; \
(R128).w[0] = (A128).w[0] - (B64); \
}
-// add 128-bit value to 128-bit
+// add 128-bit value to 128-bit
// assume no carry-out
#define __add_128_128(R128, A128, B128) \
{ \
S = X1 - Y; \
CY = ((S>X1) || (X1>X0)) ? 1 : 0; \
}
-// increment C128 and check for rounding overflow:
+// increment C128 and check for rounding overflow:
// if (C_128) = 10^34 then (C_128) = 10^33 and increment the exponent
#define INCREMENT(C_128, exp) \
{ \
} \
}
// decrement C128 and check for rounding underflow, but only at the
-// boundary: if C_128 = 10^33 - 1 and exp > 0 then C_128 = 10^34 - 1
-// and decrement the exponent
+// boundary: if C_128 = 10^33 - 1 and exp > 0 then C_128 = 10^34 - 1
+// and decrement the exponent
#define DECREMENT(C_128, exp) \
{ \
C_128.w[0]--; \
\
(P) = PH + (PM>>32); \
}
-// get full 64x64bit product
+// get full 64x64bit product
//
#define __mul_64x64_to_128_full(P, CX, CY) \
{ \
//
-// No overflow/underflow checking
+// No overflow/underflow checking
//
__BID_INLINE__ UINT64
fast_get_BID64 (UINT64 sgn, int expon, UINT64 coeff) {
//
-// No overflow/underflow checking
+// No overflow/underflow checking
// or checking for coefficients equal to 10^16 (after rounding)
//
__BID_INLINE__ UINT64
amount = recip_scale[extra_digits];
C64 = QH >> amount;
- //__shr_128(C128, Q_high, amount);
+ //__shr_128(C128, Q_high, amount);
#ifndef IEEE_ROUND_NEAREST_TIES_AWAY
#ifndef IEEE_ROUND_NEAREST
//
-// This pack macro doesnot check for coefficients above 2^53
+// This pack macro doesnot check for coefficients above 2^53
//
__BID_INLINE__ UINT64
get_BID64_small_mantissa (UINT64 sgn, int expon, UINT64 coeff,
pcoefficient_x->w[1] = x.w[1] & SINFINITY_MASK64;
}
*pexponent_x = 0;
- return 0; // NaN or Infinity
+ return 0; // NaN or Infinity
}
coeff.w[0] = x.w[0];
pcoefficient_x->w[0] = 0;
}
*pexponent_x = 0;
- return 0; // NaN or Infinity
+ return 0; // NaN or Infinity
}
coeff.w[0] = px->w[0];
//
-// Macro used for conversions from string
-// (no additional arguments given for rounding mode, status flags)
+// Macro used for conversions from string
+// (no additional arguments given for rounding mode, status flags)
//
__BID_INLINE__ UINT128 *
get_BID128_string (UINT128 * pres, UINT64 sgn, int expon, UINT128 coeff) {
}
//
-// General pack macro for BID32
+// General pack macro for BID32
//
__BID_INLINE__ UINT32
get_BID32 (UINT32 sgn, int expon, UINT64 coeff, int rmode,
* Note: assume invalid encodings are not passed to this function
*
* Round a number C with q decimal digits, represented as a binary integer
- * to q - x digits. Six different routines are provided for different values
- * of q. The maximum value of q used in the library is q = 3 * P - 1 where
- * P = 16 or P = 34 (so q <= 111 decimal digits).
+ * to q - x digits. Six different routines are provided for different values
+ * of q. The maximum value of q used in the library is q = 3 * P - 1 where
+ * P = 16 or P = 34 (so q <= 111 decimal digits).
* The partitioning is based on the following, where Kx is the scaled
* integer representing the value of 10^(-x) rounded up to a number of bits
* sufficient to ensure correct rounding:
*
* --------------------------------------------------------------------------
- * q x max. value of a max number min. number
+ * q x max. value of a max number min. number
* of bits in C of bits in Kx
* --------------------------------------------------------------------------
*
* GROUP 6: 384 bits
* round384_97_115 ()
*
- * 97 [1,96] 10^97 - 1 < 2^322.23 323 324
+ * 97 [1,96] 10^97 - 1 < 2^322.23 323 324
* ... ... ... ... ...
* 115 [1,114] 10^115 - 1 < 2^382.03 383 384
*
int ind;
// Note:
- // In round128_2_18() positive numbers with 2 <= q <= 18 will be
+ // In round128_2_18() positive numbers with 2 <= q <= 18 will be
// rounded to nearest only for 1 <= x <= 3:
// x = 1 or x = 2 when q = 17
// x = 2 or x = 3 when q = 18
int ind;
// Note:
- // In round128_19_38() positive numbers with 19 <= q <= 38 will be
+ // In round128_19_38() positive numbers with 19 <= q <= 38 will be
// rounded to nearest only for 1 <= x <= 23:
// x = 3 or x = 4 when q = 19
// x = 4 or x = 5 when q = 20
// round a number C with q decimal digits, 19 <= q <= 38
// to q - x digits, 1 <= x <= 37
// C = C + 1/2 * 10^x where the result C fits in 128 bits
- // (because the largest value is 99999999999999999999999999999999999999 +
+ // (because the largest value is 99999999999999999999999999999999999999 +
// 5000000000000000000000000000000000000 =
// 0x4efe43b0c573e7e68a043d8fffffffff, which fits is 127 bits)
- ind = x - 1; // 0 <= ind <= 36
+ ind = x - 1; // 0 <= ind <= 36
if (ind <= 18) { // if 0 <= ind <= 18
tmp64 = C.w[0];
C.w[0] = C.w[0] + midpoint64[ind];
// Cstar = P256 >> Ex
// fstar = low Ex bits of P256
shift = Ex128m128[ind]; // in [2, 63] but have to consider two cases
- if (ind <= 18) { // if 0 <= ind <= 18
+ if (ind <= 18) { // if 0 <= ind <= 18
Cstar.w[0] = (P256.w[2] >> shift) | (P256.w[3] << (64 - shift));
Cstar.w[1] = (P256.w[3] >> shift);
fstar.w[0] = P256.w[0];
int ind;
// Note:
- // In round192_39_57() positive numbers with 39 <= q <= 57 will be
+ // In round192_39_57() positive numbers with 39 <= q <= 57 will be
// rounded to nearest only for 5 <= x <= 42:
// x = 23 or x = 24 or x = 5 or x = 6 when q = 39
// x = 24 or x = 25 or x = 6 or x = 7 when q = 40
// Cstar = P384 >> Ex
// fstar = low Ex bits of P384
shift = Ex192m192[ind]; // in [1, 63] but have to consider three cases
- if (ind <= 18) { // if 0 <= ind <= 18
+ if (ind <= 18) { // if 0 <= ind <= 18
Cstar.w[2] = (P384.w[5] >> shift);
Cstar.w[1] = (P384.w[5] << (64 - shift)) | (P384.w[4] >> shift);
Cstar.w[0] = (P384.w[4] << (64 - shift)) | (P384.w[3] >> shift);
int ind;
// Note:
- // In round256_58_76() positive numbers with 58 <= q <= 76 will be
+ // In round256_58_76() positive numbers with 58 <= q <= 76 will be
// rounded to nearest only for 24 <= x <= 61:
// x = 42 or x = 43 or x = 24 or x = 25 when q = 58
// x = 43 or x = 44 or x = 25 or x = 26 when q = 59
// (because the largest value is 9999999999999999999999999999999999999999
// 999999999999999999999999999999999999 + 500000000000000000000000000
// 000000000000000000000000000000000000000000000000 =
- // 0x1736ca15d27a56cae15cf0e7b403d1f2bd6ebb0a50dc83ffffffffffffffffff,
+ // 0x1736ca15d27a56cae15cf0e7b403d1f2bd6ebb0a50dc83ffffffffffffffffff,
// which fits in 253 bits)
ind = x - 1; // 0 <= ind <= 74
if (ind <= 18) { // if 0 <= ind <= 18
// Cstar = P512 >> Ex
// fstar = low Ex bits of P512
shift = Ex256m256[ind]; // in [0, 63] but have to consider four cases
- if (ind <= 18) { // if 0 <= ind <= 18
+ if (ind <= 18) { // if 0 <= ind <= 18
Cstar.w[3] = (P512.w[7] >> shift);
Cstar.w[2] = (P512.w[7] << (64 - shift)) | (P512.w[6] >> shift);
Cstar.w[1] = (P512.w[6] << (64 - shift)) | (P512.w[5] >> shift);
if (ES.w[0])
ES.w[1]--;
- // A*RS*eps
+ // A*RS*eps
__mul_128x128_to_256 (AE, ES, ARS1);
__add_carry_out (S.w[0], CY, ARS00.w[0], AE.w[0]);
__add_carry_in_out (S.w[2], CY, ARS00.w[2], AE.w[2], CY);
S.w[3] = ARS00.w[3] + AE.w[3] + CY;
} else {
- // A*RS*eps
+ // A*RS*eps
__mul_128x128_to_256 (AE, ES, ARS1);
__sub_borrow_out (S.w[0], CY, ARS00.w[0], AE.w[0]);
/* _divsi3 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* _modsi3 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* _mulsi3 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* _udivmodsi4 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* _udivsi3 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* _umodsi3 for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* Integer arithmetic support for Lattice Mico32.
- Contributed by Jon Beniston <jon@beniston.com>
-
+ Contributed by Jon Beniston <jon@beniston.com>
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef LIBGCC_LM32_H
-#define LIBGCC_LM32_H
+#define LIBGCC_LM32_H
/* Types. */
= { (func_ptr) (-1) };
/* Run all the global destructors on exit from the program. */
-
+
/* Some systems place the number of pointers in the first word of the
table. On SVR4 however, that word is -1. In all cases, the table is
null-terminated. On SVR4, we start from the beginning of the list and
#define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
#define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
-union double_long
+union double_long
{
double d;
struct {
dl.l.upper |= exp << 20;
dl.l.upper |= mant >> 3;
dl.l.lower = mant << 29;
-
+
return dl.d;
}
dl1.d = a1;
- if (!dl1.l.upper && !dl1.l.lower)
+ if (!dl1.l.upper && !dl1.l.lower)
return 0;
exp = EXPD (dl1) - EXCESSD - 31;
l = MANTD (dl1);
- if (exp > 0)
+ if (exp > 0)
{
/* Return largest integer. */
return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
/* Table for software lookup divide for Xilinx MicroBlaze.
-
+
Copyright (C) 2009-2024 Free Software Foundation, Inc.
Contributed by Michael Eager <eager@eagercon.com>.
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-
+
unsigned char _divsi3_table[] =
{
- 0, 0/1, 0/2, 0/3, 0/4, 0/5, 0/6, 0/7,
+ 0, 0/1, 0/2, 0/3, 0/4, 0/5, 0/6, 0/7,
0/8, 0/9, 0/10, 0/11, 0/12, 0/13, 0/14, 0/15,
- 0, 1/1, 1/2, 1/3, 1/4, 1/5, 1/6, 1/7,
+ 0, 1/1, 1/2, 1/3, 1/4, 1/5, 1/6, 1/7,
1/8, 1/9, 1/10, 1/11, 1/12, 1/13, 1/14, 1/15,
- 0, 2/1, 2/2, 2/3, 2/4, 2/5, 2/6, 2/7,
+ 0, 2/1, 2/2, 2/3, 2/4, 2/5, 2/6, 2/7,
2/8, 2/9, 2/10, 2/11, 2/12, 2/13, 2/14, 2/15,
- 0, 3/1, 3/2, 3/3, 3/4, 3/5, 3/6, 3/7,
+ 0, 3/1, 3/2, 3/3, 3/4, 3/5, 3/6, 3/7,
3/8, 3/9, 3/10, 3/11, 3/12, 3/13, 3/14, 3/15,
- 0, 4/1, 4/2, 4/3, 4/4, 4/5, 4/6, 4/7,
+ 0, 4/1, 4/2, 4/3, 4/4, 4/5, 4/6, 4/7,
4/8, 4/9, 4/10, 4/11, 4/12, 4/13, 4/14, 4/15,
- 0, 5/1, 5/2, 5/3, 5/4, 5/5, 5/6, 5/7,
+ 0, 5/1, 5/2, 5/3, 5/4, 5/5, 5/6, 5/7,
5/8, 5/9, 5/10, 5/11, 5/12, 5/13, 5/14, 5/15,
- 0, 6/1, 6/2, 6/3, 6/4, 6/5, 6/6, 6/7,
+ 0, 6/1, 6/2, 6/3, 6/4, 6/5, 6/6, 6/7,
6/8, 6/9, 6/10, 6/11, 6/12, 6/13, 6/14, 6/15,
- 0, 7/1, 7/2, 7/3, 7/4, 7/5, 7/6, 7/7,
+ 0, 7/1, 7/2, 7/3, 7/4, 7/5, 7/6, 7/7,
7/8, 7/9, 7/10, 7/11, 7/12, 7/13, 7/14, 7/15,
- 0, 8/1, 8/2, 8/3, 8/4, 8/5, 8/6, 8/7,
+ 0, 8/1, 8/2, 8/3, 8/4, 8/5, 8/6, 8/7,
8/8, 8/9, 8/10, 8/11, 8/12, 8/13, 8/14, 8/15,
- 0, 9/1, 9/2, 9/3, 9/4, 9/5, 9/6, 9/7,
+ 0, 9/1, 9/2, 9/3, 9/4, 9/5, 9/6, 9/7,
9/8, 9/9, 9/10, 9/11, 9/12, 9/13, 9/14, 9/15,
- 0, 10/1, 10/2, 10/3, 10/4, 10/5, 10/6, 10/7,
+ 0, 10/1, 10/2, 10/3, 10/4, 10/5, 10/6, 10/7,
10/8, 10/9, 10/10, 10/11, 10/12, 10/13, 10/14, 10/15,
- 0, 11/1, 11/2, 11/3, 11/4, 11/5, 11/6, 11/7,
+ 0, 11/1, 11/2, 11/3, 11/4, 11/5, 11/6, 11/7,
11/8, 11/9, 11/10, 11/11, 11/12, 11/13, 11/14, 11/15,
- 0, 12/1, 12/2, 12/3, 12/4, 12/5, 12/6, 12/7,
+ 0, 12/1, 12/2, 12/3, 12/4, 12/5, 12/6, 12/7,
12/8, 12/9, 12/10, 12/11, 12/12, 12/13, 12/14, 12/15,
- 0, 13/1, 13/2, 13/3, 13/4, 13/5, 13/6, 13/7,
+ 0, 13/1, 13/2, 13/3, 13/4, 13/5, 13/6, 13/7,
13/8, 13/9, 13/10, 13/11, 13/12, 13/13, 13/14, 13/15,
- 0, 14/1, 14/2, 14/3, 14/4, 14/5, 14/6, 14/7,
+ 0, 14/1, 14/2, 14/3, 14/4, 14/5, 14/6, 14/7,
14/8, 14/9, 14/10, 14/11, 14/12, 14/13, 14/14, 14/15,
- 0, 15/1, 15/2, 15/3, 15/4, 15/5, 15/6, 15/7,
+ 0, 15/1, 15/2, 15/3, 15/4, 15/5, 15/6, 15/7,
15/8, 15/9, 15/10, 15/11, 15/12, 15/13, 15/14, 15/15,
};
rv += x;
x <<= 1;
y >>= 1;
- }
+ }
return neg ? - rv : rv;
}
{
SItype res = 0;
USItype cnt = a;
-
+
while (cnt)
{
if (cnt & 1)
- res += b;
+ res += b;
b <<= 1;
cnt >>= 1;
}
-
+
return res;
}
/* Integer arithmetic support for Altera Nios II.
-
+
Copyright (C) 2012-2024 Free Software Foundation, Inc.
Contributed by Altera and Mentor Graphics, Inc.
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
-
+
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
{
int actual_oldval, fail;
-
+
while (1)
{
actual_oldval = *ptr;
return actual_oldval;
fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
-
+
if (!fail)
return oldval;
}
/* The sequential registers. */
for (i = 1; i < 24; i++)
NIOS2_REG (i, gregs[i-1]);
-
+
/* The random registers. The kernel stores these in a funny order
in the gregs array. */
NIOS2_REG (RA_REGNO, gregs[23]);
NIOS2_REG (FP_REGNO, gregs[24]);
NIOS2_REG (GP_REGNO, gregs[25]);
NIOS2_REG (EA_REGNO, gregs[27]);
-
+
fs->retaddr_column = EA_REGNO;
fs->signal_frame = 1;
-
+
return _URC_NO_REASON;
}
#undef NIOS2_REG
/* __canonicalize_funcptr_for_compare must be hidden so that it is not
placed in the dynamic symbol table. Like millicode functions, it
- must be linked into all binaries in order access the got table of
+ must be linked into all binaries in order access the got table of
that binary. However, we don't use the millicode calling convention
and the routine must be a normal function so that it can be compiled
as pic code. */
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-#define EFAULT 14
+#define EFAULT 14
#define EBUSY 16
-#define ENOSYS 251
+#define ENOSYS 251
#define _ASM_EFAULT "-14"
{
long fail;
unsigned int actual_oldval;
-
+
while (1)
{
actual_oldval = atomic_load_4 ((volatile unsigned int *)ptr);
return actual_oldval;
fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
-
+
if (__builtin_expect (!fail, 1))
return actual_oldval;
}
In 2.4, the signal trampoline is 4 words, and (CONTEXT)->ra should
point directly at the beginning of the trampoline and struct rt_sigframe.
- In <= 2.6.5-rc2-pa3, the signal trampoline is 9 words, and
- (CONTEXT)->ra points at the 4th word in the trampoline structure. This
+ In <= 2.6.5-rc2-pa3, the signal trampoline is 9 words, and
+ (CONTEXT)->ra points at the 4th word in the trampoline structure. This
is wrong, it should point at the 5th word. This is fixed in 2.6.5-rc2-pa4.
To detect these cases, we first take (CONTEXT)->ra, align it to 64-bytes
{
return (_U_Qfcmp (a, b, QCMP_EQ) == 0);
}
-
+
int
_U_Qfgt (long double a, long double b)
{
/* Legacy sub-word atomics for RISC-V.
-
+
Copyright (C) 2016-2024 Free Software Foundation, Inc.
This file is part of GCC.
.type \name , @function
\name:
.endm
-
+
/* Start a function in its own section, so that it
can be subject to linker garbage collection. */
.macro START_FUNC name
START_ANOTHER_FUNC \name
.endm
-.macro END_ANOTHER_FUNC name
+.macro END_ANOTHER_FUNC name
.size \name , . - \name
.endm
/* End the function. Set the size. */
-.macro END_FUNC name
+.macro END_FUNC name
END_ANOTHER_FUNC \name
.popsection
.endm
uint pad1 [4];
vstate_t vstate;
char bumper [BUMPER_SIZE];
- int mark;
+ int mark;
} extended_context_t;
typedef struct {
switch (*(ra + 18))
{
/* AIX 5.2 */
- case 0x835a0520: /* lwz r26,1312(r26) */
+ case 0x835a0520: /* lwz r26,1312(r26) */
return (ucontext_t *)(context->cfa + 0x70);
/* AIX 5.3 */
* assembly files.
*
* Copyright (C) 2004-2024 Free Software Foundation, Inc.
- *
+ *
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
- */
+ */
/* These are donated from /usr/include/architecture/ppc . */
} _Tinfo_Node ;
/* KeyMgr 3.x is the first one supporting GCC3 stuff natively. */
-#define KEYMGR_API_MAJOR_GCC3 3
+#define KEYMGR_API_MAJOR_GCC3 3
/* ... with these keys. */
#define KEYMGR_GCC3_LIVE_IMAGE_LIST 301 /* loaded images */
-#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */
+#define KEYMGR_GCC3_DW2_OBJ_LIST 302 /* Dwarf2 object list */
/* Node of KEYMGR_GCC3_LIVE_IMAGE_LIST. Info about each resident image. */
struct live_images {
\f
/* These routines are used only on Darwin versions before 10.2.
- Later versions have equivalent code in the system.
+ Later versions have equivalent code in the system.
Eventually, they might go away, although it might be a long time... */
-static void darwin_unwind_dyld_remove_image_hook
+static void darwin_unwind_dyld_remove_image_hook
(struct mach_header *m, unsigned long s);
-static void darwin_unwind_dyld_remove_image_hook
+static void darwin_unwind_dyld_remove_image_hook
(struct mach_header *m, unsigned long s);
extern void __darwin_gcc3_preregister_frame_info (void);
-
+
static void
darwin_unwind_dyld_add_image_hook (struct mach_header *mh, unsigned long slide)
{
/* For each supported Libc, we have to track the code flow
all the way back into the kernel.
-
+
This code is believed to support all released Libc/Libsystem builds since
Jaguar 6C115, including all the security updates. To be precise,
262~1 63~32 6I34-6I35
262~1 63~64 6L29-6L60
262.4.1~1 63~84 6L123-6R172
-
+
320~1 71~101 7B85-7D28
320~1 71~266 7F54-7F56
320~1 71~288 7F112
320.1.3~1 71.1.1~29 7H60-7H105
320.1.3~1 71.1.1~30 7H110-7H113
320.1.3~1 71.1.1~31 7H114
-
+
That's a big table! It would be insane to try to keep track of
every little detail, so we just read the code itself and do what
it would do.
for (;;)
{
uint32_t ins = *pc++;
-
+
if ((ins & 0xFC000003) == 0x48000000) /* b instruction */
{
pc += ((((int32_t) ins & 0x3FFFFFC) ^ 0x2000000) - 0x2000004) / 4;
}
if ((ins & 0xFC0007FF) == 0x7C000378) /* or, including mr */
{
- gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F]
+ gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F]
| gprs [ins >> 21 & 0x1F]);
continue;
}
#define UC_DUAL 50
#define UC_DUAL_VEC 55
-struct gcc_ucontext
+struct gcc_ucontext
{
int onstack;
sigset_t sigmask;
struct gcc_mcontext32 *mcontext;
};
-struct gcc_float_vector_state
+struct gcc_float_vector_state
{
double fpregs[32];
uint32_t fpscr_pad;
_Unwind_Ptr new_cfa;
int i;
static _Unwind_Ptr return_addr;
-
+
/* Yay! We're in a Libc that we understand, and it's made a
system call. In Jaguar, this is a direct system call with value 103;
in Panther and Tiger it is a SYS_syscall call for system call number 184,
and in Leopard it is a direct syscall with number 184. */
-
+
if (gprs[0] == 0x67 /* SYS_SIGRETURN */)
{
uctx = (struct gcc_ucontext *) gprs[3];
float_vector_state = &m64->fvs;
new_cfa = m64->gpr[1][1];
-
+
set_offset (R_CR2, &m64->cr);
for (i = 0; i < 32; i++)
set_offset (i, m64->gpr[i] + 1);
set_offset (R_CTR, m64->ctr + 1);
if (is_vector)
set_offset (R_VRSAVE, &m64->vrsave);
-
+
/* Sometimes, srr0 points to the instruction that caused the exception,
and sometimes to the next instruction to be executed; we want
the latter. */
int i;
float_vector_state = &m->fvs;
-
+
new_cfa = m->gpr[1];
set_offset (R_CR2, &m->cr);
fs->regs.cfa_how = CFA_REG_OFFSET;
fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__;
fs->regs.cfa_offset = new_cfa - old_cfa;;
-
+
/* The choice of column for the return address is somewhat tricky.
Fortunately, the actual choice is private to this file, and
the space it's reserved from is the GCC register space, not the
for (i = 0; i < 32; i++)
set_offset (32 + i, float_vector_state->fpregs + i);
set_offset (R_SPEFSCR, &float_vector_state->fpscr);
-
+
if (is_vector)
{
for (i = 0; i < 32; i++)
__gcc_qmul (double a, double b, double c, double d)
{
double xh, xl, t, tau, u, v, w;
-
+
t = a * c; /* Highest order double term. */
if (unlikely (t == 0) /* Preserve -0. */
return t;
/* Sum terms of two highest orders. */
-
+
/* Use fused multiply-add to get low part of a * c. */
#ifndef __NO_FPRS__
asm ("fmsub %0,%1,%2,%3" : "=f"(tau) : "f"(a), "f"(c), "f"(t));
__gcc_qdiv (double a, double b, double c, double d)
{
double xh, xl, s, sigma, t, tau, u, v, w;
-
+
t = a / c; /* highest order double term */
-
+
if (unlikely (t == 0) /* Preserve -0. */
|| nonfinite (t))
return t;
s = c * t; /* (s,sigma) = c*t exactly. */
w = -(-b + d * t); /* Written to get fnmsub for speed, but not
numerically necessary. */
-
+
/* Use fused multiply-add to get low part of c * t. */
#ifndef __NO_FPRS__
asm ("fmsub %0,%1,%2,%3" : "=f"(sigma) : "f"(c), "f"(t), "f"(s));
sigma = fmsub (c, t, s);
#endif
v = a - s;
-
+
tau = ((v-sigma)+w)/c; /* Correction to t. */
u = t + tau;
emulation routines for IEEE 128-bit floating point on pre-ISA 3.0 machines
without the IEEE 128-bit floating point support. */
-#ifndef __NO_FPRS__
+#ifndef __NO_FPRS__
void
__sfp_handle_exceptions (int _fex)
respective FP_EX_* bit. Note, the ISA labels bits from msb to lsb,
so 22 ISA bits above is 22 bits below when counted from the lsb. */
# define FP_TRAPPING_EXCEPTIONS ((_fpscr.i << 22) & FP_EX_ALL)
-
+
# define FP_RND_NEAREST 0x0
# define FP_RND_ZERO 0x1
# define FP_RND_PINF 0x2
# define _FP_DECL_EX \
union { unsigned long long i; double d; } _fpscr __attribute__ ((unused)) = \
{ .i = FP_RND_NEAREST }
-
+
#define FP_INIT_ROUNDMODE \
do { \
__asm__ __volatile__ ("mffs %0" \
}
/* Number big number & +/- inf */
- if (exp >= 40) {
+ if (exp >= 40) {
/* Don't throw an exception for -1p+63 */
if (!SIGN (fl1) || exp > 40 || FRAC (fl1) != 0)
/* C99 Annex F.4 requires an "invalid" exception to be thrown. */
/* Negative. */
if (SIGN (dl1))
- {
+ {
/* Value is <= -1.0
C99 Annex F.4 requires an "invalid" exception to be thrown. */
if (EXPD (dl1) >= EXPONENT_BIAS)
/* Negative. */
if (SIGN (fl1))
- {
+ {
/* Value is <= -1.0
C99 Annex F.4 requires an "invalid" exception to be thrown. */
if (EXP (fl1) >= EXPONENT_BIAS)
if ((EXP (fl1) == 0xff) && (FRAC (fl1) != 0)) /* NaN */
{
/* C99 Annex F.4 requires an "invalid" exception to be thrown. */
- fexceptdiv (0.0, 0.0);
+ fexceptdiv (0.0, 0.0);
return 0x0ULL;
}
" ret\n");
#elif defined __x86_64__
/* See GLIBC for additional information about this technique. */
-asm(".globl _mcount\n"
+asm(".globl _mcount\n"
" .type _mcount, @function\n"
"_mcount:\n"
/* The compiler calls _mcount after the prologue, and does not
3) if builtin_return_address worked, this could be portable.
However, it would really have to be optimized for arguments of 0
and 1 and do something like what we have here in order to avoid the
- trap per function call performance hit.
+ trap per function call performance hit.
4) the atexit and monsetup calls prevent this from simply
being a leaf routine that doesn't do a "save" (and would thus have
access to %o7 and %i7 directly) but the call to write() at the end
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
- *
+ *
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
__ashlsi3 (SItype a, SItype b)
{
word_type i;
-
+
if (b & 16)
a <<= 16;
if (b & 8)
__ashrsi3 (SItype a, SItype b)
{
word_type i;
-
+
if (b & 16)
a >>= 16;
if (b & 8)
__lshrsi3 (USItype a, USItype b)
{
word_type i;
-
+
if (b & 16)
a >>= 16;
if (b & 8)
static inline void
-__int_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
+__int_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
{
int value = n;
int loop_var;
}
static inline void
-__shrt_int_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
+__shrt_int_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
{
int value = n;
int loop_var;
static inline void
-__byte_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
+__byte_memcpy (void *__restrict s1, const void *__restrict s2, size_t n)
{
int value = n;
int loop_var;
else if (aug[0] == 'P')
{
_Unwind_Ptr personality;
-
+
p = read_encoded_value (context, *p, p + 1, &personality);
fs->personality = (_Unwind_Personality_Fn) personality;
aug += 1;
(defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__))
#define BSD_DL_ITERATE_PHDR_AVAILABLE
#endif
-
+
#if defined(OBJECT_FORMAT_ELF) \
&& !defined(OBJECT_FORMAT_FLAT) \
&& defined(HAVE_LD_EH_FRAME_HDR) \
b32.c = arg_b;
decSingleToWider (&a32.f, &a);
decSingleToWider (&b32.f, &b);
- return dfp_compare_op (op, a, b);
+ return dfp_compare_op (op, a, b);
}
#else
/* decFloat comparisons are supported for decDouble (64 bits) and
a.c = arg_a;
b.c = arg_b;
- return dfp_compare_op (op, a.f, b.f);
+ return dfp_compare_op (op, a.f, b.f);
}
#endif
\f
return STR_TO_BFP (buf, NULL);
}
#endif
-
+
#if defined (L_sf_to_sd) || defined (L_sf_to_dd) || defined (L_sf_to_td) \
|| defined (L_df_to_sd) || defined (L_df_to_dd) || defined (L_df_to_td) \
|| defined (L_kf_to_sd) || defined (L_kf_to_dd) || defined (L_kf_to_td) \
IEEE_TYPE: the corresponding (encoded) IEEE754 type;
(eg decimal32)
-
+
TO_INTERNAL: the name of the decNumber function to convert an
encoded value into the decNumber internal representation;
BFP_VIA_TYPE: Type to which to cast a variable of BPF_TYPE for a
call to sprintf.
-
+
STR_TO_BFP: The function to read the value from a string. */
#if BFP_KIND == 1
#elif WIDTH_TO == 128
#define DFP_TO_DFP DPD_BID_NAME(__dpd_extendsdtd2,__bid_extendsdtd2)
#endif
-#elif WIDTH == 64
+#elif WIDTH == 64
#if WIDTH_TO == 32
#define DFP_TO_DFP DPD_BID_NAME(__dpd_truncddsd2,__bid_truncddsd2)
#elif WIDTH_TO == 128
/* This is a software floating point library which can be used
- for targets without hardware floating point.
+ for targets without hardware floating point.
Copyright (C) 1994-2024 Free Software Foundation, Inc.
This file is part of GCC.
{
#ifdef TFLOAT
return & __thenan_tf;
-#elif defined FLOAT
+#elif defined FLOAT
return & __thenan_sf;
#else
return & __thenan_df;
return x->class == CLASS_ZERO;
}
-INLINE
+INLINE
static void
flip_sign ( fp_number_type * x)
{
#endif
src = &swapped;
#endif
-
+
#if defined TFLOAT && defined HALFFRACBITS
{
halffractype high, low;
-
+
high = src->value_raw >> HALFSHIFT;
low = src->value_raw & (((fractype)1 << HALFSHIFT) - 1);
x >>= 1;
}
}
-#elif defined(FLOAT)
+#elif defined(FLOAT)
/* Multiplying two USIs to get a UDI, we're safe. */
{
UDItype answer = (UDItype)a->fraction.ll * (UDItype)b->fraction.ll;
-
+
high = answer >> BITS_PER_SI;
low = answer;
}
USItype uarg;
int shift;
in.normal_exp = FRACBITS + NGARDS;
- if (in.sign)
+ if (in.sign)
{
/* Special case for minint, since there is no +ve integer
representation for it */
SFtype
__make_fp(fp_class_type class,
unsigned int sign,
- int exp,
+ int exp,
USItype frac)
{
fp_number_type in;
TFtype
__make_tp(fp_class_type class,
unsigned int sign,
- int exp,
+ int exp,
UTItype frac)
{
fp_number_type in;
/* Get file name relocation prefix. Non-absolute values are ignored. */
gcov_prefix = getenv("GCOV_PREFIX");
prefix_length = gcov_prefix ? strlen (gcov_prefix) : 0;
-
+
/* Remove an unnecessary trailing '/' */
if (prefix_length && IS_DIR_SEPARATOR (gcov_prefix[prefix_length - 1]))
prefix_length--;
return;
gcov_do_dump (root->list, root->run_counted, 0);
-
+
root->dumped = 1;
root->run_counted = 1;
}
struct gcov_root __gcov_root;
/* Exactly one of these will be live in the process image. */
-struct gcov_master __gcov_master =
+struct gcov_master __gcov_master =
{GCOV_VERSION, 0};
/* Dynamic pool for gcov_kvp structures. */
gcov_merge_fn merge[GCOV_COUNTERS]; /* merge functions (null for
unused) */
-
+
gcov_unsigned_t n_functions; /* number of functions */
#ifndef IN_GCOV_TOOL
/* Pointer to counters. */
gcov_type *counters;
};
-
+
/* Exactly one of these will be active in the process. */
extern struct gcov_master __gcov_master;
extern struct gcov_kvp *__gcov_kvp_dynamic_pool;
waste. However, some runtime libraries supplied with ICC do contain such
an unorthodox transition, as well as the unwind info to match. This loss
of register restoration doesn't matter in practice, because the exception
- is caught in the native unix abi, where all of the xmm registers are
+ is caught in the native unix abi, where all of the xmm registers are
call clobbered.
Ideally, we'd record some bit to notice when we're failing to restore some
struct auto_ptr_ref
{
_Tp1* _M_ptr;
-
+
explicit
auto_ptr_ref(_Tp1* __p): _M_ptr(__p) { }
} _GLIBCXX11_DEPRECATED;
{
private:
_Tp* _M_ptr;
-
+
public:
/// The pointed-to type.
typedef _Tp element_type;
-
+
/**
* @brief An %auto_ptr is usually constructed from a raw pointer.
* @param __p A pointer (defaults to NULL).
* prohibited. [17.4.3.6]/2
*/
~auto_ptr() { delete _M_ptr; }
-
+
/**
* @brief Smart pointer dereferencing.
*
* what happens when you dereference one of those...)
*/
element_type&
- operator*() const throw()
+ operator*() const throw()
{
__glibcxx_assert(_M_ptr != 0);
- return *_M_ptr;
+ return *_M_ptr;
}
-
+
/**
* @brief Smart pointer dereferencing.
*
* automatically cause to be dereferenced.
*/
element_type*
- operator->() const throw()
+ operator->() const throw()
{
__glibcxx_assert(_M_ptr != 0);
- return _M_ptr;
+ return _M_ptr;
}
-
+
/**
* @brief Bypassing the smart pointer.
* @return The raw pointer being managed.
*/
element_type*
get() const throw() { return _M_ptr; }
-
+
/**
* @brief Bypassing the smart pointer.
* @return The raw pointer being managed.
_M_ptr = 0;
return __tmp;
}
-
+
/**
* @brief Forcibly deletes the managed object.
* @param __p A pointer (defaults to NULL).
_M_ptr = __p;
}
}
-
- /**
+
+ /**
* @brief Automatic conversions
*
* These operations are supposed to convert an %auto_ptr into and from
*/
auto_ptr(auto_ptr_ref<element_type> __ref) throw()
: _M_ptr(__ref._M_ptr) { }
-
+
auto_ptr&
operator=(auto_ptr_ref<element_type> __ref) throw()
{
}
return *this;
}
-
+
template<typename _Tp1>
operator auto_ptr_ref<_Tp1>() throw()
{ return auto_ptr_ref<_Tp1>(this->release()); }
{
typedef typename _Operation::second_argument_type _Arg2_type;
return binder2nd<_Operation>(__fn, _Arg2_type(__x));
- }
+ }
/** @} */
_GLIBCXX_END_NAMESPACE_VERSION
template<>
struct hash<char>
- {
+ {
size_t
operator()(char __x) const
{ return __x; }
template<>
struct hash<unsigned char>
- {
+ {
size_t
operator()(unsigned char __x) const
{ return __x; }
template<>
struct hash<int>
- {
- size_t
- operator()(int __x) const
+ {
+ size_t
+ operator()(int __x) const
{ return __x; }
};
template<>
struct hash<unsigned int>
- {
+ {
size_t
operator()(unsigned int __x) const
{ return __x; }
_Val _M_val;
};
- template<class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ template<class _Val, class _Key, class _HashFcn, class _ExtractKey,
class _EqualKey, class _Alloc = std::allocator<_Val> >
class hashtable;
typedef std::size_t size_type;
typedef _Val& reference;
typedef _Val* pointer;
-
+
_Node* _M_cur;
_Hashtable* _M_ht;
typedef std::size_t size_type;
typedef const _Val& reference;
typedef const _Val* pointer;
-
+
const _Node* _M_cur;
const _Hashtable* _M_ht;
return pos == __last ? *(__last - 1) : *pos;
}
- // Forward declaration of operator==.
+ // Forward declaration of operator==.
template<class _Val, class _Key, class _HF, class _Ex,
class _Eq, class _All>
class hashtable;
// allocator type are identical. This is because, for hashtables,
// this extra storage is negligible. Additionally, a base class
// wouldn't serve any other purposes; it wouldn't, for example,
- // simplify the exception-handling code.
+ // simplify the exception-handling code.
template<class _Val, class _Key, class _HashFcn,
class _ExtractKey, class _EqualKey, class _Alloc>
class hashtable
_ExtractKey _M_get_key;
_Vector_type _M_buckets;
size_type _M_num_elements;
-
+
public:
typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey,
_EqualKey, _Alloc>
{
const size_type __n = _M_bkt_num_key(__key);
size_type __result = 0;
-
+
for (const _Node* __cur = _M_buckets[__n]; __cur;
__cur = __cur->_M_next)
if (_M_equals(_M_get_key(__cur->_M_val), __key))
size_type
erase(const key_type& __key);
-
+
void
erase(const iterator& __it);
_Alloc_traits::destroy(__a, &__n->_M_val);
_M_put_node(__n);
}
-
+
void
_M_erase_bucket(const size_type __n, _Node* __first, _Node* __last);
// Check same length of lists
for (; __cur1 && __cur2;
__cur1 = __cur1->_M_next, __cur2 = __cur2->_M_next)
- { }
+ { }
if (__cur1 || __cur2)
return false;
// Now check one's elements are in the other
{
const size_type __n = _M_bkt_num(__obj);
_Node* __first = _M_buckets[__n];
-
+
for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj)))
return std::pair<iterator, bool>(iterator(__cur, this), false);
-
+
_Node* __tmp = _M_new_node(__obj);
__tmp->_M_next = __first;
_M_buckets[__n] = __tmp;
{
const size_type __n = _M_bkt_num(__obj);
_Node* __first = _M_buckets[__n];
-
+
for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj)))
{
size_type __n = _M_bkt_num(__obj);
_Node* __first = _M_buckets[__n];
-
+
for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj)))
return __cur->_M_val;
-
+
_Node* __tmp = _M_new_node(__obj);
__tmp->_M_next = __first;
_M_buckets[__n] = __tmp;
{
const size_type __n = _M_bkt_num(__p->_M_val);
_Node* __cur = _M_buckets[__n];
-
+
if (__cur == __p)
{
_M_buckets[__n] = __cur->_M_next;
{
_Node* __local_copy = _M_new_node(__cur->_M_val);
_M_buckets[__i] = __local_copy;
-
+
for (_Node* __next = __cur->_M_next;
__next;
__cur = __next, __next = __cur->_M_next)
/**
* @brief Template class basic_ios, virtual base class for all
- * stream classes.
+ * stream classes.
* @ingroup io
*
* @tparam _CharT Type of character stream.
* accessible to users.
*/
basic_ios()
- : ios_base(), _M_tie(0), _M_fill(char_type()), _M_fill_init(false),
+ : ios_base(), _M_tie(0), _M_fill(char_type()), _M_fill_init(false),
_M_streambuf(0), _M_ctype(0), _M_num_put(0), _M_num_get(0)
{ }
{ _M_set_length(0); }
/**
- * Returns true if the %string is empty. Equivalent to
+ * Returns true if the %string is empty. Equivalent to
* <code>*this == ""</code>.
*/
_GLIBCXX_NODISCARD _GLIBCXX20_CONSTEXPR
basic_istream<wchar_t>&
getline(basic_istream<wchar_t>& __in, basic_string<wchar_t>& __str,
wchar_t __delim);
-#endif
+#endif
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
inline string
to_string(float __val)
{
- const int __n =
+ const int __n =
__gnu_cxx::__numeric_traits<float>::__max_exponent10 + 20;
return __gnu_cxx::__to_xstring<string>(&std::vsnprintf, __n,
"%f", __val);
inline string
to_string(double __val)
{
- const int __n =
+ const int __n =
__gnu_cxx::__numeric_traits<double>::__max_exponent10 + 20;
return __gnu_cxx::__to_xstring<string>(&std::vsnprintf, __n,
"%f", __val);
inline string
to_string(long double __val)
{
- const int __n =
+ const int __n =
__gnu_cxx::__numeric_traits<long double>::__max_exponent10 + 20;
return __gnu_cxx::__to_xstring<string>(&std::vsnprintf, __n,
"%Lf", __val);
#endif // _GLIBCXX_USE_C99_STDIO
#if defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_C99_WCHAR
- inline int
+ inline int
stoi(const wstring& __str, size_t* __idx = 0, int __base = 10)
{ return __gnu_cxx::__stoa<long, int>(&std::wcstol, "stoi", __str.c_str(),
__idx, __base); }
- inline long
+ inline long
stol(const wstring& __str, size_t* __idx = 0, int __base = 10)
{ return __gnu_cxx::__stoa(&std::wcstol, "stol", __str.c_str(),
__idx, __base); }
*
* See https://gcc.gnu.org/onlinedocs/libstdc++/manual/strings.html#strings.string.character_types
* for advice on how to make use of this class for @a unusual character
- * types. Also, check out include/ext/pod_char_traits.h.
+ * types. Also, check out include/ext/pod_char_traits.h.
*/
template<typename _CharT>
struct char_traits
static _GLIBCXX20_CONSTEXPR char_type*
copy(char_type* __s1, const char_type* __s2, size_t __n)
- {
+ {
if (__n == 0)
return __s1;
#if __cplusplus >= 202002L
* @param __l Array of dimension lengths.
* @param __s Array of dimension strides between array elements.
*/
- gslice(size_t __o, const valarray<size_t>& __l,
+ gslice(size_t __o, const valarray<size_t>& __l,
const valarray<size_t>& __s);
// XXX: the IS says the copy-ctor and copy-assignment operators are
/// Return array of sizes of slice dimensions.
valarray<size_t> size() const;
-
+
/// Return array of array strides for each dimension.
valarray<size_t> stride() const;
void
_M_increment_use()
{ ++_M_count; }
-
+
size_t
_M_decrement_use()
{ return --_M_count; }
* Pattern" (CRTP) technique, but uses a reconstructed, not
* explicitly passed, template pattern.
*
- * Base class templates are:
+ * Base class templates are:
* - __detail::_Hashtable_base
* - __detail::_Map_base
* - __detail::_Insert
{
__x._M_buckets = _M_buckets;
_M_buckets = &_M_single_bucket;
- }
+ }
else
std::swap(_M_buckets, __x._M_buckets);
insert(const_iterator __hint, const value_type& __v)
{
__hashtable& __h = _M_conjure_hashtable();
- __node_gen_type __node_gen(__h);
+ __node_gen_type __node_gen(__h);
return __h._M_insert(__hint, __v, __node_gen, __unique_keys{});
}
// as permitted (but not required) in the standard, in order to provide
// better type safety in iostream calls. A side effect is that in C++98
// expressions involving them are not compile-time constants.
- enum _Ios_Fmtflags
- {
+ enum _Ios_Fmtflags
+ {
_S_boolalpha = 1L << 0,
_S_dec = 1L << 1,
_S_fixed = 1L << 2,
#endif
enum __attribute__((__flag_enum__)) _Ios_Openmode
- {
+ {
_S_app = 1L << 0,
_S_ate = 1L << 1,
_S_bin = 1L << 2,
enum _Ios_Iostate
- {
+ {
_S_goodbit = 0,
_S_badbit = 1L << 0,
_S_eofbit = 1L << 1,
{ return __a = __a ^ __b; }
- enum _Ios_Seekdir
- {
+ enum _Ios_Seekdir
+ {
_S_beg = 0,
_S_cur = _GLIBCXX_STDIO_SEEK_CUR,
_S_end = _GLIBCXX_STDIO_SEEK_END,
- _S_ios_seekdir_end = 1L << 16
+ _S_ios_seekdir_end = 1L << 16
};
#if __cplusplus >= 201103L
#endif
public:
- /**
+ /**
* @brief These are thrown to indicate problems with io.
* @ingroup exceptions
*
// 0 => OK to delete.
int
- _M_remove_reference()
+ _M_remove_reference()
{
// Be race-detector-friendly. For more info see bits/c++config.
_GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_refcount);
private:
__timepunct_cache&
operator=(const __timepunct_cache&);
-
+
explicit
__timepunct_cache(const __timepunct_cache&);
};
private:
__moneypunct_cache&
operator=(const __moneypunct_cache&);
-
+
explicit
__moneypunct_cache(const __moneypunct_cache&);
};
template<bool _Intl>
iter_type
_M_extract(iter_type __s, iter_type __end, ios_base& __io,
- ios_base::iostate& __err, string& __digits) const;
+ ios_base::iostate& __err, string& __digits) const;
};
template<typename _CharT, typename _InIter>
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
- /**
+ /**
* @defgroup locales Locales
*
* Classes and functions for internationalization and localization.
/// Copy constructor. Both slices refer to the same underlying array.
mask_array (const mask_array&);
-
+
/// Assignment operator. Assigns elements to corresponding elements
/// of @a a.
mask_array& operator=(const mask_array&);
__ostream_write(basic_ostream<_CharT, _Traits>& __out,
const _CharT* __s, streamsize __n)
{
- typedef basic_ostream<_CharT, _Traits> __ostream_type;
+ typedef basic_ostream<_CharT, _Traits> __ostream_type;
typedef typename __ostream_type::ios_base __ios_base;
const streamsize __put = __out.rdbuf()->sputn(__s, __n);
inline void
__ostream_fill(basic_ostream<_CharT, _Traits>& __out, streamsize __n)
{
- typedef basic_ostream<_CharT, _Traits> __ostream_type;
+ typedef basic_ostream<_CharT, _Traits> __ostream_type;
typedef typename __ostream_type::ios_base __ios_base;
const _CharT __c = __out.fill();
* implementation defined type.
* Note: In versions of GCC up to and including GCC 3.3, streamoff
* was typedef long.
- */
+ */
#ifdef __INT64_TYPE__
typedef __INT64_TYPE__ streamoff;
#else
* A random number generator that produces pseudorandom numbers via
* linear function:
* @f[
- * x_{i+1}\leftarrow(ax_{i} + c) \bmod m
+ * x_{i+1}\leftarrow(ax_{i} + c) \bmod m
* @f]
*
* The template parameter @p _UIntType must be an unsigned integral type
* This algorithm was originally invented by Makoto Matsumoto and
* Takuji Nishimura.
*
- * @tparam __w Word size, the number of bits in each element of
+ * @tparam __w Word size, the number of bits in each element of
* the state vector.
* @tparam __n The degree of recursion.
* @tparam __m The period parameter.
* A discrete random number generator that produces pseudorandom
* numbers using:
* @f[
- * x_{i}\leftarrow(x_{i - s} - x_{i - r} - carry_{i-1}) \bmod m
+ * x_{i}\leftarrow(x_{i - s} - x_{i - r} - carry_{i-1}) \bmod m
* @f]
*
* The size of the state is @f$r@f$
* The formula for the normal probability density function is
* @f[
* p(x|\mu,\sigma) = \frac{1}{\sigma \sqrt{2 \pi}}
- * e^{- \frac{{x - \mu}^ {2}}{2 \sigma ^ {2}} }
+ * e^{- \frac{{x - \mu}^ {2}}{2 \sigma ^ {2}} }
* @f]
*
* @headerfile random
* The formula for the normal probability mass function is
* @f[
* p(x|m,s) = \frac{1}{sx\sqrt{2\pi}}
- * \exp{-\frac{(\ln{x} - m)^2}{2s^2}}
+ * \exp{-\frac{(\ln{x} - m)^2}{2s^2}}
* @f]
*
* @headerfile random
* The formula for the gamma probability density function is:
* @f[
* p(x|\alpha,\beta) = \frac{1}{\beta\Gamma(\alpha)}
- * (x/\beta)^{\alpha - 1} e^{-x/\beta}
+ * (x/\beta)^{\alpha - 1} e^{-x/\beta}
* @f]
*
* @headerfile random
* @f[
* p(x|m,n) = \frac{\Gamma((m+n)/2)}{\Gamma(m/2)\Gamma(n/2)}
* (\frac{m}{n})^{m/2} x^{(m/2)-1}
- * (1 + \frac{mx}{n})^{-(m+n)/2}
+ * (1 + \frac{mx}{n})^{-(m+n)/2}
* @f]
*
* @headerfile random
* The formula for the normal probability mass function is:
* @f[
* p(x|n) = \frac{1}{\sqrt(n\pi)} \frac{\Gamma((n+1)/2)}{\Gamma(n/2)}
- * (1 + \frac{x^2}{n}) ^{-(n+1)/2}
+ * (1 + \frac{x^2}{n}) ^{-(n+1)/2}
* @f]
*
* @headerfile random
{
typedef typename std::gamma_distribution<result_type>::param_type
param_type;
-
+
const result_type __g = _M_gd(__urng, param_type(__p.n() / 2, 2));
return _M_nd(__urng) * std::sqrt(__p.n() / __g);
}
* The formula for the normal probability density function is:
* @f[
* p(x|\alpha,\beta) = \frac{\alpha}{\beta} (\frac{x}{\beta})^{\alpha-1}
- * \exp{(-(\frac{x}{\beta})^\alpha)}
+ * \exp{(-(\frac{x}{\beta})^\alpha)}
* @f]
*
* @headerfile random
* The formula for the normal probability mass function is
* @f[
* p(x|a,b) = \frac{1}{b}
- * \exp( \frac{a-x}{b} - \exp(\frac{a-x}{b}))
+ * \exp( \frac{a-x}{b} - \exp(\frac{a-x}{b}))
* @f]
*
* @headerfile random
#if __cpp_impl_three_way_comparison < 201907L
/**
- * @brief Return true if two piecewise constant distributions have
+ * @brief Return true if two piecewise constant distributions have
* different parameters.
*/
template<typename _RealType>
/// @endcond
{
typedef iterator_traits<_BiIter> __iter_traits;
-
+
public:
typedef typename __iter_traits::value_type value_type;
typedef typename __iter_traits::difference_type difference_type;
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 543. valarray slice default constructor
inline
- slice::slice()
+ slice::slice()
: _M_off(0), _M_sz(0), _M_st(0) {}
inline
* @f]
* is generally found to be within 10<sup>-15</sup> for 64-bit double on
* linux-x86_64 systems over most of the ranges of validity.
- *
+ *
* @todo Provide accuracy comparisons on a per-function basis for a small
* number of targets.
*
__tailSize -= __remainder;
// __first here is always pointing to one past the last element of
// next possible match.
- _RandomAccessIter __backTrack = __first;
+ _RandomAccessIter __backTrack = __first;
while (__unary_pred(--__backTrack))
{
if (--__remainder == 0)
__glibcxx_function_requires(_UnaryPredicateConcept<_Predicate,
typename iterator_traits<_InputIterator>::value_type>)
__glibcxx_requires_valid_range(__first, __last);
-
+
for (; __first != __last; ++__first)
if (__pred(*__first))
{
* @param __last A forward iterator.
* @return first + (last - middle).
*
- * Rotates the elements of the range @p [__first,__last) by
+ * Rotates the elements of the range @p [__first,__last) by
* @p (__middle - __first) positions so that the element at @p __middle
* is moved to @p __first, the element at @p __middle+1 is moved to
* @p __first+1 and so on for each element in the range
* @return An iterator designating the end of the resulting sequence.
*
* Copies the elements of the range @p [__first,__last) to the
- * range beginning at @result, rotating the copied elements by
+ * range beginning at @result, rotating the copied elements by
* @p (__middle-__first) positions so that the element at @p __middle
* is moved to @p __result, the element at @p __middle+1 is moved
* to @p __result+1 and so on for each element in the range @p
* [__first,__last).
*
- * Performs
+ * Performs
* @p *(__result+(n+(__last-__middle))%(__last-__first))=*(__first+n)
* for each @p n in the range @p [0,__last-__first).
*/
++__result_real_last;
++__first;
}
-
+
std::__make_heap(__result_first, __result_real_last, __comp);
while (__first != __last)
{
* @ingroup sorting_algorithms
* @param __first Start of range.
* @param __last End of range.
- * @return make_pair(m, M), where m is the first iterator i in
+ * @return make_pair(m, M), where m is the first iterator i in
* [__first, __last) such that no other element in the range is
* smaller, and where M is the last iterator i in [__first, __last)
* such that no other element in the range is larger.
* @param __first Start of range.
* @param __last End of range.
* @param __comp Comparison functor.
- * @return make_pair(m, M), where m is the first iterator i in
+ * @return make_pair(m, M), where m is the first iterator i in
* [__first, __last) such that no other element in the range is
* smaller, and where M is the last iterator i in [__first, __last)
* such that no other element in the range is larger.
typename iterator_traits<_InputIterator2>::value_type>)
__glibcxx_function_requires(_LessThanOpConcept<
typename iterator_traits<_InputIterator2>::value_type,
- typename iterator_traits<_InputIterator1>::value_type>)
+ typename iterator_traits<_InputIterator1>::value_type>)
__glibcxx_requires_sorted_set(__first1, __last1, __first2);
__glibcxx_requires_sorted_set(__first2, __last2, __first1);
__glibcxx_requires_irreflexive2(__first1, __last1);
typename iterator_traits<_InputIterator2>::value_type>)
__glibcxx_function_requires(_LessThanOpConcept<
typename iterator_traits<_InputIterator2>::value_type,
- typename iterator_traits<_InputIterator1>::value_type>)
+ typename iterator_traits<_InputIterator1>::value_type>)
__glibcxx_requires_sorted_set(__first1, __last1, __first2);
__glibcxx_requires_sorted_set(__first2, __last2, __first1);
__glibcxx_requires_irreflexive2(__first1, __last1);
++__first1;
++__first2;
}
- return std::copy(__first2, __last2,
+ return std::copy(__first2, __last2,
std::copy(__first1, __last1, __result));
}
typename iterator_traits<_InputIterator2>::value_type>)
__glibcxx_function_requires(_LessThanOpConcept<
typename iterator_traits<_InputIterator2>::value_type,
- typename iterator_traits<_InputIterator1>::value_type>)
+ typename iterator_traits<_InputIterator1>::value_type>)
__glibcxx_requires_sorted_set(__first1, __last1, __first2);
__glibcxx_requires_sorted_set(__first2, __last2, __first1);
__glibcxx_requires_irreflexive2(__first1, __last1);
struct unary_function
{
/// @c argument_type is the type of the argument
- typedef _Arg argument_type;
+ typedef _Arg argument_type;
/// @c result_type is the return type
- typedef _Result result_type;
+ typedef _Result result_type;
} _GLIBCXX11_DEPRECATED;
/**
struct binary_function
{
/// @c first_argument_type is the type of the first argument
- typedef _Arg1 first_argument_type;
+ typedef _Arg1 first_argument_type;
/// @c second_argument_type is the type of the second argument
typedef _Arg2 second_argument_type;
__parent--;
}
}
-
+
/**
* @brief Construct a heap over a range.
* @param __first Start of heap.
__glibcxx_requires_irreflexive(__first, __last);
__gnu_cxx::__ops::_Iter_less_iter __comp;
- return __first +
+ return __first +
std::__is_heap_until(__first, std::distance(__first, __last), __comp);
}
_GLIBCXX17_CONSTEXPR move_iterator
operator-(difference_type __n) const
{ return move_iterator(_M_current - __n); }
-
+
_GLIBCXX17_CONSTEXPR move_iterator&
operator-=(difference_type __n)
- {
+ {
_M_current -= __n;
return *this;
}
_GLIBCXX_NODISCARD [[__gnu__::__always_inline__]]
inline _GLIBCXX17_CONSTEXPR _BidirectionalIterator
prev(_BidirectionalIterator __x, typename
- iterator_traits<_BidirectionalIterator>::difference_type __n = 1)
+ iterator_traits<_BidirectionalIterator>::difference_type __n = 1)
{
// concept requirements
__glibcxx_function_requires(_BidirectionalIteratorConcept<
_GLIBCXX_END_NAMESPACE_CXX11
/// A string of @c char
- typedef basic_string<char> string;
+ typedef basic_string<char> string;
/// A string of @c wchar_t
- typedef basic_string<wchar_t> wstring;
+ typedef basic_string<wchar_t> wstring;
#ifdef _GLIBCXX_USE_CHAR8_T
/// A string of @c char8_t
#if __cplusplus >= 201103L
/// A string of @c char16_t
- typedef basic_string<char16_t> u16string;
+ typedef basic_string<char16_t> u16string;
/// A string of @c char32_t
- typedef basic_string<char32_t> u32string;
+ typedef basic_string<char32_t> u32string;
#endif
/** @} */
* @tparam _Hash Hashing function object type, defaults to hash<_Value>.
* @tparam _Pred Predicate function object type, defaults
* to equal_to<_Value>.
- * @tparam _Alloc Allocator type, defaults to
+ * @tparam _Alloc Allocator type, defaults to
* std::allocator<std::pair<const _Key, _Tp>>.
*
* Meets the requirements of a <a href="tables.html#65">container</a>, and
*
* @param __k Key to use for finding a possibly existing pair in
* the unordered_map.
- * @param __args Arguments used to generate the .second for a
+ * @param __args Arguments used to generate the .second for a
* new pair instance.
*
* @return A pair, of which the first element is an iterator that points
* should be inserted.
* @param __k Key to use for finding a possibly existing pair in
* the unordered_map.
- * @param __args Arguments used to generate the .second for a
+ * @param __args Arguments used to generate the .second for a
* new pair instance.
* @return An iterator that points to the element with key of the
* std::pair built from @a __args (may or may not be that
* @param __x Pair to be inserted (see std::make_pair for easy
* creation of pairs).
*
- * @return A pair, of which the first element is an iterator that
- * points to the possibly inserted pair, and the second is
+ * @return A pair, of which the first element is an iterator that
+ * points to the possibly inserted pair, and the second is
* a bool that is true if the pair was actually inserted.
*
* This function attempts to insert a (key, value) %pair into the
* @brief Attempts to insert a std::pair into the %unordered_map.
* @param __k Key to use for finding a possibly existing pair in
* the map.
- * @param __obj Argument used to generate the .second for a pair
+ * @param __obj Argument used to generate the .second for a pair
* instance.
*
- * @return A pair, of which the first element is an iterator that
- * points to the possibly inserted pair, and the second is
+ * @return A pair, of which the first element is an iterator that
+ * points to the possibly inserted pair, and the second is
* a bool that is true if the pair was actually inserted.
*
* This function attempts to insert a (key, value) %pair into the
* %unordered_map. An %unordered_map relies on unique keys and thus a
* %pair is only inserted if its first element (the key) is not already
* present in the %unordered_map.
- * If the %pair was already in the %unordered_map, the .second of
+ * If the %pair was already in the %unordered_map, the .second of
* the %pair is assigned from __obj.
*
* Insertion requires amortized constant time.
* pair should be inserted.
* @param __k Key to use for finding a possibly existing pair in
* the unordered_map.
- * @param __obj Argument used to generate the .second for a pair
+ * @param __obj Argument used to generate the .second for a pair
* instance.
* @return An iterator that points to the element with key of
* @a __x (may or may not be the %pair passed in).
*
* This function is not concerned about whether the insertion took place,
* and thus does not return a boolean like the single-argument insert()
- * does.
+ * does.
* If the %pair was already in the %unordered map, the .second of
* the %pair is assigned from __obj.
* Note that the first parameter is only a hint and can
size_type
bucket(const key_type& __key) const
{ return _M_h.bucket(__key); }
-
+
/**
* @brief Returns a read/write iterator pointing to the first bucket
* element.
size_type
bucket(const key_type& __key) const
{ return _M_h.bucket(__key); }
-
+
/**
* @brief Returns a read/write iterator pointing to the first bucket
* element.
{
public:
typedef typename _Dom::value_type value_type;
-
+
_GBase (const _Dom& __e, const valarray<size_t>& __i)
: _M_expr (__e), _M_index(__i) {}
-
+
value_type
operator[] (size_t __i) const
{ return _M_expr[_M_index[__i]]; }
-
+
size_t
size () const
{ return _M_index.size(); }
{
public:
typedef _Tp value_type;
-
+
_GBase (_Array<_Tp> __a, const valarray<size_t>& __i)
: _M_array (__a), _M_index(__i) {}
-
+
value_type
operator[] (size_t __i) const
{ return _M_array._M_data[_M_index[__i]]; }
-
+
size_t
size () const
{ return _M_index.size(); }
{
typedef _GBase<_Dom> _Base;
typedef typename _Base::value_type value_type;
-
+
_GClos (const _Dom& __e, const valarray<size_t>& __i)
: _Base (__e, __i) {}
};
{
typedef _GBase<_Array<_Tp> > _Base;
typedef typename _Base::value_type value_type;
-
+
_GClos (_Array<_Tp> __a, const valarray<size_t>& __i)
: _Base (__a, __i) {}
};
_IBase (const _Dom& __e, const valarray<size_t>& __i)
: _M_expr (__e), _M_index (__i) {}
-
+
value_type
operator[] (size_t __i) const
{ return _M_expr[_M_index[__i]]; }
-
+
size_t
size() const
{ return _M_index.size(); }
{
typedef _IBase<_Dom> _Base;
typedef typename _Base::value_type value_type;
-
+
_IClos (const _Dom& __e, const valarray<size_t>& __i)
: _Base (__e, __i) {}
};
{
typedef _IBase<valarray<_Tp> > _Base;
typedef _Tp value_type;
-
+
_IClos (const valarray<_Tp>& __a, const valarray<size_t>& __i)
: _Base (__a, __i) {}
};
} // namespace __detail
-
+
//
// class _Expr
//
while (__n--)
*__a++ = __t;
}
-
+
// fill strided array __a[<__n-1 : __s>] with __t
template<typename _Tp>
inline void
__valarray_fill(_Tp* __restrict__ __a, size_t __n,
size_t __s, const _Tp& __t)
- {
+ {
for (size_t __i = 0; __i < __n; ++__i, __a += __s)
*__a = __t;
}
for (size_t __j = 0; __j < __n; ++__j, ++__i)
__a[*__i] = __t;
}
-
+
// copy plain array __a[<__n>] in __b[<__n>]
// For non-fundamental types, it is wrong to say 'memcpy()'
template<typename _Tp, bool>
explicit _Array(_Tp* const __restrict__);
explicit _Array(const valarray<_Tp>&);
_Array(const _Tp* __restrict__, size_t);
-
+
_Tp* begin() const;
-
+
_Tp* const __restrict__ _M_data;
};
{ return _Oper()(_M_expr[__i]); }
size_t size() const { return _M_expr.size(); }
-
+
private:
typename _ValArrayRef<_Arg>::__type _M_expr;
};
{
public:
typedef typename _Dom::value_type value_type;
-
+
_SBase (const _Dom& __e, const slice& __s)
: _M_expr (__e), _M_slice (__s) {}
-
+
value_type
operator[] (size_t __i) const
{ return _M_expr[_M_slice.start () + __i * _M_slice.stride ()]; }
-
+
size_t
size() const
{ return _M_slice.size (); }
{
public:
typedef _Tp value_type;
-
+
_SBase (_Array<_Tp> __a, const slice& __s)
: _M_array (__a._M_data+__s.start()), _M_size (__s.size()),
_M_stride (__s.stride()) {}
-
+
value_type
operator[] (size_t __i) const
{ return _M_array._M_data[__i * _M_stride]; }
-
+
size_t
size() const
{ return _M_size; }
{
typedef _SBase<_Dom> _Base;
typedef typename _Base::value_type value_type;
-
+
_SClos (const _Dom& __e, const slice& __s) : _Base (__e, __s) {}
};
{
typedef _SBase<_Array<_Tp> > _Base;
typedef _Tp value_type;
-
+
_SClos (_Array<_Tp> __a, const slice& __s) : _Base (__a, __s) {}
};
} // namespace __detail
using ::intmax_t;
using ::intptr_t;
-
+
using ::uint8_t;
using ::uint16_t;
using ::uint32_t;
#endif //_GLIBCXX_USE_WCHAR_T
-#endif
+#endif
#endif
* @brief GNU debug code, replaces standard behavior with debug behavior.
*/
namespace std
-{
- namespace __debug { }
+{
+ namespace __debug { }
}
/** @namespace __gnu_debug
#define _GLIBCXX_WRITE_MEM_BARRIER __atomic_thread_fence (__ATOMIC_RELEASE)
#endif
-#endif
+#endif
*
* It is to be used only for built-in types or PODs. Notable
* differences are:
- *
+ *
* 1. Not all accessor functions are present.
* 2. Used ONLY for PODs.
* 3. No Allocator template argument. Uses ::operator new() to get
typedef std::ptrdiff_t difference_type;
};
- enum
- {
+ enum
+ {
bits_per_byte = 8,
bits_per_block = sizeof(std::size_t) * std::size_t(bits_per_byte)
};
// _Tp should be a pointer type.
template<typename _Tp>
- class _Inclusive_between
+ class _Inclusive_between
{
typedef _Tp pointer;
pointer _M_ptr_value;
typedef typename std::pair<_Tp, _Tp> _Block_pair;
-
+
public:
- _Inclusive_between(pointer __ptr) : _M_ptr_value(__ptr)
+ _Inclusive_between(pointer __ptr) : _M_ptr_value(__ptr)
{ }
-
- bool
+
+ bool
operator()(_Block_pair __bp) const throw()
{
- if (std::less_equal<pointer>()(_M_ptr_value, __bp.second)
+ if (std::less_equal<pointer>()(_M_ptr_value, __bp.second)
&& std::greater_equal<pointer>()(_M_ptr_value, __bp.first))
return true;
else
return false;
}
};
-
+
// Used to pass a Functor to functions by reference.
template<typename _Functor>
- class _Functor_Ref
+ class _Functor_Ref
{
_Functor& _M_fref;
-
+
public:
typedef typename _Functor::argument_type argument_type;
typedef typename _Functor::result_type result_type;
- _Functor_Ref(_Functor& __fref) : _M_fref(__fref)
+ _Functor_Ref(_Functor& __fref) : _M_fref(__fref)
{ }
- result_type
- operator()(argument_type __arg)
+ result_type
+ operator()(argument_type __arg)
{ return _M_fref(__arg); }
};
// _Tp should be a pointer type, and _Alloc is the Allocator for
// the vector.
template<typename _Tp>
- class _Ffit_finder
+ class _Ffit_finder
{
typedef std::pair<_Tp, _Tp> _Block_pair;
typedef __detail::__mini_vector<_Block_pair> _BPVector;
_Ffit_finder() : _M_pbitmap(0), _M_data_offset(0)
{ }
- bool
+ bool
operator()(_Block_pair __bp) throw()
{
using std::size_t;
}
return false;
}
-
+
std::size_t*
_M_get() const throw()
{ return _M_pbitmap; }
std::size_t* _M_curr_bmap;
std::size_t* _M_last_bmap_in_block;
_Index_type _M_curr_index;
-
+
public:
// Use the 2nd parameter with care. Make sure that such an
// entry exists in the vector before passing that particular
// index to this ctor.
_Bitmap_counter(_BPVector& Rvbp, long __index = -1) : _M_vbp(Rvbp)
{ this->_M_reset(__index); }
-
- void
+
+ void
_M_reset(long __index = -1) throw()
{
if (__index == -1)
_M_curr_index = __index;
_M_curr_bmap = reinterpret_cast<std::size_t*>
(_M_vbp[_M_curr_index].first) - 1;
-
+
_GLIBCXX_DEBUG_ASSERT(__index <= (long)_M_vbp.size() - 1);
-
+
_M_last_bmap_in_block = _M_curr_bmap
- - ((_M_vbp[_M_curr_index].second
- - _M_vbp[_M_curr_index].first + 1)
+ - ((_M_vbp[_M_curr_index].second
+ - _M_vbp[_M_curr_index].first + 1)
/ std::size_t(bits_per_block) - 1);
}
-
+
// Dangerous Function! Use with extreme care. Pass to this
// function ONLY those values that are known to be correct,
// otherwise this will mess up big time.
void
_M_set_internal_bitmap(std::size_t* __new_internal_marker) throw()
{ _M_curr_bmap = __new_internal_marker; }
-
+
bool
_M_finished() const throw()
{ return(_M_curr_bmap == 0); }
-
+
_Bitmap_counter&
operator++() throw()
{
--_M_curr_bmap;
return *this;
}
-
+
std::size_t*
_M_get() const throw()
{ return _M_curr_bmap; }
-
- pointer
+
+ pointer
_M_base() const throw()
{ return _M_vbp[_M_curr_index].first; }
* ((reinterpret_cast<std::size_t*>(this->_M_base())
- _M_curr_bmap) - 1);
}
-
+
_Index_type
_M_where() const throw()
{ return _M_curr_index; }
/** @brief Mark a memory address as allocated by re-setting the
* corresponding bit in the bit-map.
*/
- inline void
+ inline void
__bit_allocate(std::size_t* __pbmap, std::size_t __pos) throw()
{
std::size_t __mask = 1 << __pos;
__mask = ~__mask;
*__pbmap &= __mask;
}
-
+
/** @brief Mark a memory address as free by setting the
* corresponding bit in the bit-map.
*/
- inline void
+ inline void
__bit_free(std::size_t* __pbmap, std::size_t __pos) throw()
{
std::size_t __mask = 1 << __pos;
__free_list.pop_back();
}
}
-
+
// Just add the block to the list of free lists unconditionally.
iterator __temp = __detail::__lower_bound
- (__free_list.begin(), __free_list.end(),
+ (__free_list.begin(), __free_list.end(),
*__addr, _LT_pointer_compare());
// We may insert the new free list before _temp;
* @return true if the wastage incurred is acceptable, else returns
* false.
*/
- bool
+ bool
_M_should_i_give(std::size_t __block_size,
std::size_t __required_size) throw()
{
const std::size_t __max_wastage_percentage = 36;
- if (__block_size >= __required_size &&
+ if (__block_size >= __required_size &&
(((__block_size - __required_size) * 100 / __block_size)
< __max_wastage_percentage))
return true;
* @param __addr The pointer to the memory block that was given
* by a call to the _M_get function.
*/
- inline void
+ inline void
_M_insert(std::size_t* __addr) throw()
{
#if defined __GTHREADS
this->_M_validate(reinterpret_cast<std::size_t*>(__addr) - 1);
// See discussion as to why this is 1!
}
-
+
/** @brief This function gets a block of memory of the specified
* size from the free list.
*
/** @brief This function just clears the internal Free List, and
* gives back all the memory to the OS.
*/
- void
+ void
_M_clear();
};
// Forward declare the class.
- template<typename _Tp>
+ template<typename _Tp>
class bitmap_allocator;
// Specialize for void:
struct aligned_size
{
enum
- {
+ {
modulus = _BSize % _AlignSize,
value = _BSize + (modulus ? _AlignSize - (modulus) : 0)
};
#if defined _GLIBCXX_DEBUG
// Complexity: O(lg(N)). Where, N is the number of block of size
// sizeof(value_type).
- void
+ void
_S_check_for_free_blocks() throw()
{
typedef typename __detail::_Ffit_finder<_Alloc_block*> _FFF;
* is the number of blocks of size sizeof(value_type) within
* the newly acquired block. Having a tight bound.
*/
- void
+ void
_S_refill_pool() _GLIBCXX_THROW(std::bad_alloc)
{
using std::size_t;
const size_t __num_bitmaps = (_S_block_size
/ size_t(__detail::bits_per_block));
- const size_t __size_to_allocate = sizeof(size_t)
- + _S_block_size * sizeof(_Alloc_block)
+ const size_t __size_to_allocate = sizeof(size_t)
+ + _S_block_size * sizeof(_Alloc_block)
+ __num_bitmaps * sizeof(size_t);
size_t* __temp =
++__temp;
// The Header information goes at the Beginning of the Block.
- _Block_pair __bp =
+ _Block_pair __bp =
std::make_pair(reinterpret_cast<_Alloc_block*>
- (__temp + __num_bitmaps),
+ (__temp + __num_bitmaps),
reinterpret_cast<_Alloc_block*>
- (__temp + __num_bitmaps)
+ (__temp + __num_bitmaps)
+ _S_block_size - 1);
-
+
// Fill the Vector with this information.
_S_mem_blocks.push_back(__bp);
* function to have a complexity referred to commonly as:
* Amortized Constant time.
*/
- pointer
+ pointer
_M_allocate_single_object() _GLIBCXX_THROW(std::bad_alloc)
{
using std::size_t;
// Now, get the address of the bit we marked as allocated.
pointer __ret = reinterpret_cast<pointer>
(__bpi->first + __fff._M_offset() + __nz_bit);
- size_t* __puse_count =
+ size_t* __puse_count =
reinterpret_cast<size_t*>
(__bpi->first) - (__detail::__num_bitmaps(*__bpi) + 1);
-
+
++(*__puse_count);
return __ret;
}
* close to each other and this case is handled in O(1) time by
* the deallocate function.
*/
- void
+ void
_M_deallocate_single_object(pointer __p) throw()
{
using std::size_t;
// Get the position of the iterator that has been found.
const size_t __rotate = (__displacement
% size_t(__detail::bits_per_block));
- size_t* __bitmapC =
+ size_t* __bitmapC =
reinterpret_cast<size_t*>
(_S_mem_blocks[__diff].first) - 1;
__bitmapC -= (__displacement / size_t(__detail::bits_per_block));
-
+
__detail::__bit_free(__bitmapC, __rotate);
size_t* __puse_count = reinterpret_cast<size_t*>
(_S_mem_blocks[__diff].first)
- (__detail::__num_bitmaps(_S_mem_blocks[__diff]) + 1);
-
+
_GLIBCXX_DEBUG_ASSERT(*__puse_count != 0);
--(*__puse_count);
if (__builtin_expect(*__puse_count == 0, false))
{
_S_block_size /= 2;
-
+
// We can safely remove this block.
// _Block_pair __bp = _S_mem_blocks[__diff];
this->_M_insert(__puse_count);
// free list, and hence had been erased from the vector,
// so the size of the vector gets reduced by 1.
if ((_Difference_type)_S_last_request._M_where() >= __diff--)
- _S_last_request._M_reset(__diff);
+ _S_last_request._M_reset(__diff);
// If the Index into the vector of the region of memory
// that might hold the next address that will be passed to
~bitmap_allocator() _GLIBCXX_USE_NOEXCEPT
{ }
- _GLIBCXX_NODISCARD pointer
+ _GLIBCXX_NODISCARD pointer
allocate(size_type __n)
{
if (__n > this->max_size())
if (__builtin_expect(__n == 1, true))
return this->_M_allocate_single_object();
else
- {
+ {
const size_type __b = __n * sizeof(value_type);
return reinterpret_cast<pointer>(::operator new(__b));
}
}
- _GLIBCXX_NODISCARD pointer
+ _GLIBCXX_NODISCARD pointer
allocate(size_type __n, typename bitmap_allocator<void>::const_pointer)
{ return allocate(__n); }
- void
+ void
deallocate(pointer __p, size_type __n) throw()
{
if (__builtin_expect(__p != 0, true))
}
}
- pointer
+ pointer
address(reference __r) const _GLIBCXX_NOEXCEPT
{ return std::__addressof(__r); }
- const_pointer
+ const_pointer
address(const_reference __r) const _GLIBCXX_NOEXCEPT
{ return std::__addressof(__r); }
- size_type
+ size_type
max_size() const _GLIBCXX_USE_NOEXCEPT
{ return size_type(-1) / sizeof(value_type); }
{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
template<typename _Up>
- void
+ void
destroy(_Up* __p)
{ __p->~_Up(); }
#else
- void
+ void
construct(pointer __p, const_reference __data)
{ ::new((void *)__p) value_type(__data); }
- void
+ void
destroy(pointer __p)
{ __p->~value_type(); }
#endif
};
template<typename _Tp1, typename _Tp2>
- bool
- operator==(const bitmap_allocator<_Tp1>&,
+ bool
+ operator==(const bitmap_allocator<_Tp1>&,
const bitmap_allocator<_Tp2>&) throw()
{ return true; }
-
+
#if __cpp_impl_three_way_comparison < 201907L
template<typename _Tp1, typename _Tp2>
- bool
- operator!=(const bitmap_allocator<_Tp1>&,
- const bitmap_allocator<_Tp2>&) throw()
+ bool
+ operator!=(const bitmap_allocator<_Tp1>&,
+ const bitmap_allocator<_Tp2>&) throw()
{ return false; }
#endif
= 2 * std::size_t(__detail::bits_per_block);
template<typename _Tp>
- typename bitmap_allocator<_Tp>::_BPVector::size_type
+ typename bitmap_allocator<_Tp>::_BPVector::size_type
bitmap_allocator<_Tp>::_S_last_dealloc_index = 0;
template<typename _Tp>
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace __gnu_cxx
-#endif
+#endif
class encoding_state
{
public:
- // Types:
+ // Types:
// NB: A conversion descriptor subsumes and enhances the
// functionality of a simple state type such as mbstate_t.
typedef iconv_t descriptor_type;
-
+
protected:
// Name of internal character set encoding.
std::string _M_int_enc;
int _M_bytes;
public:
- explicit
- encoding_state()
+ explicit
+ encoding_state()
: _M_in_desc(0), _M_out_desc(0), _M_ext_bom(0), _M_int_bom(0), _M_bytes(0)
{ }
- explicit
- encoding_state(const char* __int, const char* __ext,
+ explicit
+ encoding_state(const char* __int, const char* __ext,
int __ibom = 0, int __ebom = 0, int __bytes = 1)
- : _M_int_enc(__int), _M_ext_enc(__ext), _M_in_desc(0), _M_out_desc(0),
+ : _M_int_enc(__int), _M_ext_enc(__ext), _M_in_desc(0), _M_out_desc(0),
_M_ext_bom(__ebom), _M_int_bom(__ibom), _M_bytes(__bytes)
{ init(); }
}
~encoding_state()
- { destroy(); }
+ { destroy(); }
bool
good() const throw()
- {
+ {
const descriptor_type __err = (iconv_t)(-1);
- bool __test = _M_in_desc && _M_in_desc != __err;
+ bool __test = _M_in_desc && _M_in_desc != __err;
__test &= _M_out_desc && _M_out_desc != __err;
return __test;
}
-
+
int
character_ratio() const
{ return _M_bytes; }
internal_encoding() const
{ return _M_int_enc; }
- int
+ int
internal_bom() const
{ return _M_int_bom; }
external_encoding() const
{ return _M_ext_enc; }
- int
+ int
external_bom() const
{ return _M_ext_bom; }
destroy() throw()
{
const descriptor_type __err = (iconv_t)(-1);
- if (_M_in_desc && _M_in_desc != __err)
+ if (_M_in_desc && _M_in_desc != __err)
{
iconv_close(_M_in_desc);
_M_in_desc = 0;
}
- if (_M_out_desc && _M_out_desc != __err)
+ if (_M_out_desc && _M_out_desc != __err)
{
iconv_close(_M_out_desc);
_M_out_desc = 0;
class codecvt<_InternT, _ExternT, encoding_state>
: public __codecvt_abstract_base<_InternT, _ExternT, encoding_state>
{
- public:
+ public:
// Types:
typedef codecvt_base::result result;
typedef _InternT intern_type;
// Data Members:
static locale::id id;
- explicit
+ explicit
codecvt(size_t __refs = 0)
: __codecvt_abstract_base<intern_type, extern_type, state_type>(__refs)
{ }
- explicit
+ explicit
codecvt(state_type& __enc, size_t __refs = 0)
: __codecvt_abstract_base<intern_type, extern_type, state_type>(__refs)
{ }
protected:
- virtual
+ virtual
~codecvt() { }
virtual result
- do_out(state_type& __state, const intern_type* __from,
+ do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const;
virtual result
- do_unshift(state_type& __state, extern_type* __to,
+ do_unshift(state_type& __state, extern_type* __to,
extern_type* __to_end, extern_type*& __to_next) const;
virtual result
- do_in(state_type& __state, const extern_type* __from,
+ do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
- intern_type* __to, intern_type* __to_end,
+ intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const;
- virtual int
+ virtual int
do_encoding() const throw();
- virtual bool
+ virtual bool
do_always_noconv() const throw();
- virtual int
- do_length(state_type&, const extern_type* __from,
+ virtual int
+ do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const;
- virtual int
+ virtual int
do_max_length() const throw();
};
template<typename _InternT, typename _ExternT>
- locale::id
+ locale::id
codecvt<_InternT, _ExternT, encoding_state>::id;
// This adaptor works around the signature problems of the second
template<typename _InternT, typename _ExternT>
codecvt_base::result
codecvt<_InternT, _ExternT, encoding_state>::
- do_out(state_type& __state, const intern_type* __from,
+ do_out(state_type& __state, const intern_type* __from,
const intern_type* __from_end, const intern_type*& __from_next,
extern_type* __to, extern_type* __to_end,
extern_type*& __to_next) const
const size_t __fmultiple = sizeof(intern_type);
size_t __fbytes = __fmultiple * (__from_end - __from);
const size_t __tmultiple = sizeof(extern_type);
- size_t __tbytes = __tmultiple * (__to_end - __to);
-
+ size_t __tbytes = __tmultiple * (__to_end - __to);
+
// Argument list for iconv specifies a byte sequence. Thus,
// all to/from arrays must be brutally casted to char*.
char* __cto = reinterpret_cast<char*>(__to);
// merry way.
int __int_bom = __state.internal_bom();
if (__int_bom)
- {
+ {
size_t __size = __from_end - __from;
intern_type* __cfixed = static_cast<intern_type*>
(__builtin_alloca(sizeof(intern_type) * (__size + 1)));
char_traits<intern_type>::copy(__cfixed + 1, __from, __size);
__cfrom = reinterpret_cast<char*>(__cfixed);
__conv = __iconv_adaptor(iconv, __desc, &__cfrom,
- &__fbytes, &__cto, &__tbytes);
+ &__fbytes, &__cto, &__tbytes);
}
else
{
intern_type* __cfixed = const_cast<intern_type*>(__from);
__cfrom = reinterpret_cast<char*>(__cfixed);
- __conv = __iconv_adaptor(iconv, __desc, &__cfrom, &__fbytes,
- &__cto, &__tbytes);
+ __conv = __iconv_adaptor(iconv, __desc, &__cfrom, &__fbytes,
+ &__cto, &__tbytes);
}
if (__conv != size_t(-1))
__to_next = reinterpret_cast<extern_type*>(__cto);
__ret = codecvt_base::ok;
}
- else
+ else
{
if (__fbytes < __fmultiple * (__from_end - __from))
{
__ret = codecvt_base::error;
}
}
- return __ret;
+ return __ret;
}
template<typename _InternT, typename _ExternT>
codecvt_base::result
codecvt<_InternT, _ExternT, encoding_state>::
- do_unshift(state_type& __state, extern_type* __to,
+ do_unshift(state_type& __state, extern_type* __to,
extern_type* __to_end, extern_type*& __to_next) const
{
result __ret = codecvt_base::error;
{
const descriptor_type& __desc = __state.in_descriptor();
const size_t __tmultiple = sizeof(intern_type);
- size_t __tlen = __tmultiple * (__to_end - __to);
-
+ size_t __tlen = __tmultiple * (__to_end - __to);
+
// Argument list for iconv specifies a byte sequence. Thus,
// all to/from arrays must be brutally casted to char*.
char* __cto = reinterpret_cast<char*>(__to);
size_t __conv = __iconv_adaptor(iconv,__desc, 0, 0,
- &__cto, &__tlen);
-
+ &__cto, &__tlen);
+
if (__conv != size_t(-1))
{
__to_next = reinterpret_cast<extern_type*>(__cto);
else
__ret = codecvt_base::partial;
}
- else
+ else
__ret = codecvt_base::error;
}
- return __ret;
+ return __ret;
}
-
+
template<typename _InternT, typename _ExternT>
codecvt_base::result
codecvt<_InternT, _ExternT, encoding_state>::
- do_in(state_type& __state, const extern_type* __from,
+ do_in(state_type& __state, const extern_type* __from,
const extern_type* __from_end, const extern_type*& __from_next,
- intern_type* __to, intern_type* __to_end,
+ intern_type* __to, intern_type* __to_end,
intern_type*& __to_next) const
- {
+ {
result __ret = codecvt_base::error;
if (__state.good())
{
const size_t __fmultiple = sizeof(extern_type);
size_t __flen = __fmultiple * (__from_end - __from);
const size_t __tmultiple = sizeof(intern_type);
- size_t __tlen = __tmultiple * (__to_end - __to);
-
+ size_t __tlen = __tmultiple * (__to_end - __to);
+
// Argument list for iconv specifies a byte sequence. Thus,
// all to/from arrays must be brutally casted to char*.
char* __cto = reinterpret_cast<char*>(__to);
// merry way.
int __ext_bom = __state.external_bom();
if (__ext_bom)
- {
+ {
size_t __size = __from_end - __from;
extern_type* __cfixed = static_cast<extern_type*>
(__builtin_alloca(sizeof(extern_type) * (__size + 1)));
char_traits<extern_type>::copy(__cfixed + 1, __from, __size);
__cfrom = reinterpret_cast<char*>(__cfixed);
__conv = __iconv_adaptor(iconv, __desc, &__cfrom,
- &__flen, &__cto, &__tlen);
+ &__flen, &__cto, &__tlen);
}
else
{
extern_type* __cfixed = const_cast<extern_type*>(__from);
__cfrom = reinterpret_cast<char*>(__cfixed);
__conv = __iconv_adaptor(iconv, __desc, &__cfrom,
- &__flen, &__cto, &__tlen);
+ &__flen, &__cto, &__tlen);
}
-
+
if (__conv != size_t(-1))
{
__from_next = reinterpret_cast<const extern_type*>(__cfrom);
__to_next = reinterpret_cast<intern_type*>(__cto);
__ret = codecvt_base::ok;
}
- else
+ else
{
if (__flen < static_cast<size_t>(__from_end - __from))
{
__ret = codecvt_base::error;
}
}
- return __ret;
+ return __ret;
}
-
+
template<typename _InternT, typename _ExternT>
- int
+ int
codecvt<_InternT, _ExternT, encoding_state>::
do_encoding() const throw()
{
int __ret = 0;
if (sizeof(_ExternT) <= sizeof(_InternT))
__ret = sizeof(_InternT) / sizeof(_ExternT);
- return __ret;
+ return __ret;
}
-
+
template<typename _InternT, typename _ExternT>
- bool
+ bool
codecvt<_InternT, _ExternT, encoding_state>::
do_always_noconv() const throw()
{ return false; }
-
+
template<typename _InternT, typename _ExternT>
- int
+ int
codecvt<_InternT, _ExternT, encoding_state>::
- do_length(state_type&, const extern_type* __from,
+ do_length(state_type&, const extern_type* __from,
const extern_type* __end, size_t __max) const
{ return std::min(__max, static_cast<size_t>(__end - __from)); }
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 74. Garbled text for codecvt::do_max_length
template<typename _InternT, typename _ExternT>
- int
+ int
codecvt<_InternT, _ExternT, encoding_state>::
do_max_length() const throw()
{ return 1; }
#endif
#include <exception>
-#include <bits/gthr.h>
+#include <bits/gthr.h>
#include <bits/functexcept.h>
#include <bits/cpp_type_traits.h>
#include <ext/type_traits.h>
// _S_mutex multi-threaded code that requires additional support
// from gthr.h or abstraction layers in concurrence.h.
// _S_atomic multi-threaded code using atomic operations.
- enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
+ enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
// Compile time constant that indicates prefered locking policy in
// the current configuration.
__throw_concurrence_wait_error()
{ _GLIBCXX_THROW_OR_ABORT(__concurrence_wait_error()); }
#endif
-
- class __mutex
+
+ class __mutex
{
private:
#if __GTHREADS && defined __GTHREAD_MUTEX_INIT
__mutex& operator=(const __mutex&);
public:
- __mutex()
- {
+ __mutex()
+ {
#if __GTHREADS && ! defined __GTHREAD_MUTEX_INIT
if (__gthread_active_p())
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
}
#if __GTHREADS && ! defined __GTHREAD_MUTEX_INIT
- ~__mutex()
- {
+ ~__mutex()
+ {
if (__gthread_active_p())
- __gthread_mutex_destroy(&_M_mutex);
+ __gthread_mutex_destroy(&_M_mutex);
}
-#endif
+#endif
void lock()
{
}
#endif
}
-
+
void unlock()
{
#if __GTHREADS
{ return &_M_mutex; }
};
- class __recursive_mutex
+ class __recursive_mutex
{
private:
#if __GTHREADS && defined __GTHREAD_RECURSIVE_MUTEX_INIT
__recursive_mutex& operator=(const __recursive_mutex&);
public:
- __recursive_mutex()
- {
+ __recursive_mutex()
+ {
#if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
if (__gthread_active_p())
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
#endif
void lock()
- {
+ {
#if __GTHREADS
if (__gthread_active_p())
{
}
#endif
}
-
+
void unlock()
- {
+ {
#if __GTHREADS
if (__gthread_active_p())
{
__cond& operator=(const __cond&);
public:
- __cond()
- {
+ __cond()
+ {
#if __GTHREADS && ! defined __GTHREAD_COND_INIT
if (__gthread_active_p())
__GTHREAD_COND_INIT_FUNCTION(&_M_cond);
}
#if __GTHREADS && ! defined __GTHREAD_COND_INIT
- ~__cond()
- {
+ ~__cond()
+ {
if (__gthread_active_p())
- __gthread_cond_destroy(&_M_cond);
+ __gthread_cond_destroy(&_M_cond);
}
-#endif
+#endif
void broadcast()
{
// _M_extra is the number of objects that correspond to the
// extra space where debug information is stored.
size_type _M_extra;
-
+
_Alloc _M_allocator;
template<typename _Alloc2,
size_type _S_extra()
{
const std::size_t __obj_size = sizeof(value_type);
- return (sizeof(size_type) + __obj_size - 1) / __obj_size;
+ return (sizeof(size_type) + __obj_size - 1) / __obj_size;
}
public:
_GLIBCXX_NODISCARD pointer
allocate(size_type __n)
{
- pointer __res = _M_allocator.allocate(__n + _M_extra);
+ pointer __res = _M_allocator.allocate(__n + _M_extra);
size_type* __ps = reinterpret_cast<size_type*>(__res);
*__ps = __n;
return __res + _M_extra;
// Note the non-standard pointer types.
typedef _Pointer_adapter<_Relative_pointer_impl<_Tp> > pointer;
- typedef _Pointer_adapter<_Relative_pointer_impl<const _Tp> >
+ typedef _Pointer_adapter<_Relative_pointer_impl<const _Tp> >
const_pointer;
typedef _Tp& reference;
struct rebind
{ typedef _ExtPtr_allocator<_Up> other; };
- _ExtPtr_allocator() _GLIBCXX_USE_NOEXCEPT
+ _ExtPtr_allocator() _GLIBCXX_USE_NOEXCEPT
: _M_real_alloc() { }
_ExtPtr_allocator(const _ExtPtr_allocator& __rarg) _GLIBCXX_USE_NOEXCEPT
{ construct(__p.get(), std::forward<_Args>(__args)...); }
template<typename _Up>
- void
+ void
destroy(_Up* __p)
{ __p->~_Up(); }
* @brief An allocator that uses malloc.
* @ingroup allocators
*
- * This is precisely the allocator defined in the C++ Standard.
+ * This is precisely the allocator defined in the C++ Standard.
* - all allocation calls malloc
* - all deallocation calls free
*/
#if __cplusplus <= 201703L
size_type
- max_size() const _GLIBCXX_USE_NOEXCEPT
+ max_size() const _GLIBCXX_USE_NOEXCEPT
{ return _M_max_size(); }
#if __cplusplus >= 201103L
{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
template<typename _Up>
- void
+ void
destroy(_Up* __p)
noexcept(std::is_nothrow_destructible<_Up>::value)
{ __p->~_Up(); }
// NB: In any case must be >= sizeof(_Block_record), that
// is 4 on 32 bit machines and 8 on 64 bit machines.
size_t _M_align;
-
+
// Allocation requests (after round-up to power of 2) below
// this value will be handled by the allocator. A raw new/
// call will be used for requests larger than this value.
// NB: Must be much smaller than _M_chunk_size and in any
// case <= 32768.
- size_t _M_max_bytes;
+ size_t _M_max_bytes;
// Size in bytes of the smallest bin.
// NB: Must be a power of 2 and >= _M_align (and of course
// value. Based on previous discussions on the libstdc++
// mailing list we have chosen the value below.
// See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
- // NB: At least one order of magnitude > _M_max_bytes.
+ // NB: At least one order of magnitude > _M_max_bytes.
size_t _M_chunk_size;
// The maximum number of supported threads. For
// _M_freelist_headroom % of the freelist, we move these
// records back to the global pool.
size_t _M_freelist_headroom;
-
+
// Set to true forces all allocations to use new().
- bool _M_force_new;
-
+ bool _M_force_new;
+
explicit
_Tune()
: _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
- _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
- _M_freelist_headroom(_S_freelist_headroom),
+ _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
+ _M_freelist_headroom(_S_freelist_headroom),
_M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
{ }
explicit
- _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
- size_t __maxthreads, size_t __headroom, bool __force)
+ _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
+ size_t __maxthreads, size_t __headroom, bool __force)
: _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
_M_chunk_size(__chunk), _M_max_threads(__maxthreads),
_M_freelist_headroom(__headroom), _M_force_new(__force)
{ }
};
-
+
struct _Block_address
{
void* _M_initial;
_Block_address* _M_next;
};
-
+
const _Tune&
_M_get_options() const
{ return _M_options; }
void
_M_set_options(_Tune __t)
- {
+ {
if (!_M_init)
_M_options = __t;
}
_M_get_align()
{ return _M_options._M_align; }
- explicit
- __pool_base()
+ explicit
+ __pool_base()
: _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
- explicit
+ explicit
__pool_base(const _Tune& __options)
: _M_options(__options), _M_binmap(0), _M_init(false) { }
private:
- explicit
+ explicit
__pool_base(const __pool_base&);
__pool_base&
protected:
// Configuration options.
_Tune _M_options;
-
+
_Binmap_type* _M_binmap;
// Configuration of the pool object via _M_options can happen
// A list of the initial addresses of all allocated blocks.
_Block_address* _M_address;
};
-
+
void
_M_initialize_once()
{
void
_M_destroy() throw();
- char*
+ char*
_M_reserve_block(size_t __bytes, const size_t __thread_id);
-
+
void
_M_reclaim_block(char* __p, size_t __bytes) throw ();
-
- size_t
+
+ size_t
_M_get_thread_id() { return 0; }
-
+
const _Bin_record&
_M_get_bin(size_t __which)
{ return _M_bin[__which]; }
-
+
void
_M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
{ }
- explicit __pool()
+ explicit __pool()
: _M_bin(0), _M_bin_size(1) { }
- explicit __pool(const __pool_base::_Tune& __tune)
+ explicit __pool(const __pool_base::_Tune& __tune)
: __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
private:
// power of 2 size. Memory to this "array" is allocated in
// _M_initialize().
_Bin_record* _M_bin;
-
+
// Actual value calculated in _M_initialize().
- size_t _M_bin_size;
+ size_t _M_bin_size;
void
_M_initialize();
};
-
+
#ifdef __GTHREADS
/// Specialization for thread enabled, via gthreads.h.
template<>
{
// Points to next free thread id record. NULL if last record in list.
_Thread_record* _M_next;
-
+
// Thread id ranging from 1 to _S_max_threads.
size_t _M_id;
};
-
+
union _Block_record
{
// Points to the block_record of the next free block.
_Block_record* _M_next;
-
+
// The thread id of the thread which has requested this block.
size_t _M_thread_id;
};
-
+
struct _Bin_record
{
// An "array" of pointers to the first free block for each
// thread id. Memory to this "array" is allocated in
// _S_initialize() for _S_max_threads + global pool 0.
_Block_record** _M_first;
-
+
// A list of the initial addresses of all allocated blocks.
_Block_address* _M_address;
// for _S_max_threads + global pool 0.
size_t* _M_free;
size_t* _M_used;
-
+
// Each bin has its own mutex which is used to ensure data
// integrity while changing "ownership" on a block. The mutex
// is initialized in _S_initialize().
__gthread_mutex_t* _M_mutex;
};
-
+
// XXX GLIBCXX_ABI Deprecated
void
_M_initialize(__destroy_handler);
void
_M_destroy() throw();
- char*
+ char*
_M_reserve_block(size_t __bytes, const size_t __thread_id);
-
+
void
_M_reclaim_block(char* __p, size_t __bytes) throw ();
-
+
const _Bin_record&
_M_get_bin(size_t __which)
{ return _M_bin[__which]; }
-
+
void
- _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
+ _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
size_t __thread_id)
{
if (__gthread_active_p())
void
_M_destroy_thread_key(void*) throw ();
- size_t
+ size_t
_M_get_thread_id();
- explicit __pool()
- : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0)
+ explicit __pool()
+ : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0)
{ }
- explicit __pool(const __pool_base::_Tune& __tune)
- : __pool_base(__tune), _M_bin(0), _M_bin_size(1),
- _M_thread_freelist(0)
+ explicit __pool(const __pool_base::_Tune& __tune)
+ : __pool_base(__tune), _M_bin(0), _M_bin_size(1),
+ _M_thread_freelist(0)
{ }
private:
struct __common_pool
{
typedef _PoolTp<_Thread> pool_type;
-
+
static pool_type&
_S_get_pool()
- {
+ {
static pool_type _S_pool;
return _S_pool;
}
struct __common_pool_base;
template<template <bool> class _PoolTp>
- struct __common_pool_base<_PoolTp, false>
+ struct __common_pool_base<_PoolTp, false>
: public __common_pool<_PoolTp, false>
{
using __common_pool<_PoolTp, false>::_S_get_pool;
static bool __init;
if (__builtin_expect(__init == false, false))
{
- _S_get_pool()._M_initialize_once();
+ _S_get_pool()._M_initialize_once();
__init = true;
}
}
: public __common_pool<_PoolTp, true>
{
using __common_pool<_PoolTp, true>::_S_get_pool;
-
+
static void
- _S_initialize()
+ _S_initialize()
{ _S_get_pool()._M_initialize_once(); }
static void
_S_initialize_once()
- {
+ {
static bool __init;
if (__builtin_expect(__init == false, false))
{
// Double check initialization. May be necessary on some
// systems for proper construction when not compiling with
// thread flags.
- _S_get_pool()._M_initialize_once();
+ _S_get_pool()._M_initialize_once();
__init = true;
}
}
template<template <bool> class _PoolTp, bool _Thread>
struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
{
- template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
+ template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
bool _Thread1 = _Thread>
struct _M_rebind
{ typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
using __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
};
-
+
template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
struct __per_type_pool
{
typedef _Tp value_type;
typedef _PoolTp<_Thread> pool_type;
-
+
static pool_type&
_S_get_pool()
{
struct __per_type_pool_base;
template<typename _Tp, template <bool> class _PoolTp>
- struct __per_type_pool_base<_Tp, _PoolTp, false>
- : public __per_type_pool<_Tp, _PoolTp, false>
+ struct __per_type_pool_base<_Tp, _PoolTp, false>
+ : public __per_type_pool<_Tp, _PoolTp, false>
{
using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
static bool __init;
if (__builtin_expect(__init == false, false))
{
- _S_get_pool()._M_initialize_once();
+ _S_get_pool()._M_initialize_once();
__init = true;
}
}
#ifdef __GTHREADS
template<typename _Tp, template <bool> class _PoolTp>
- struct __per_type_pool_base<_Tp, _PoolTp, true>
- : public __per_type_pool<_Tp, _PoolTp, true>
+ struct __per_type_pool_base<_Tp, _PoolTp, true>
+ : public __per_type_pool<_Tp, _PoolTp, true>
{
using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
static void
- _S_initialize()
+ _S_initialize()
{ _S_get_pool()._M_initialize_once(); }
static void
_S_initialize_once()
- {
+ {
static bool __init;
if (__builtin_expect(__init == false, false))
{
// Double check initialization. May be necessary on some
// systems for proper construction when not compiling with
// thread flags.
- _S_get_pool()._M_initialize_once();
+ _S_get_pool()._M_initialize_once();
__init = true;
}
}
/// Policy for individual __pool objects.
template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
- struct __per_type_pool_policy
+ struct __per_type_pool_policy
: public __per_type_pool_base<_Tp, _PoolTp, _Thread>
{
- template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
+ template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
bool _Thread1 = _Thread>
struct _M_rebind
{ typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
/// Base class for _Tp dependent member functions.
template<typename _Tp>
- class __mt_alloc_base
+ class __mt_alloc_base
{
public:
typedef std::size_t size_type;
{ return std::__addressof(__x); }
size_type
- max_size() const _GLIBCXX_USE_NOEXCEPT
+ max_size() const _GLIBCXX_USE_NOEXCEPT
{ return size_type(-1) / sizeof(_Tp); }
#if __cplusplus >= 201103L
{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
template<typename _Up>
- void
+ void
destroy(_Up* __p) { __p->~_Up(); }
#else
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 402. wrong new expression in [some_] allocator::construct
- void
- construct(pointer __p, const _Tp& __val)
+ void
+ construct(pointer __p, const _Tp& __val)
{ ::new((void *)__p) _Tp(__val); }
- void
+ void
destroy(pointer __p) { __p->~_Tp(); }
#endif
};
* Further details:
* https://gcc.gnu.org/onlinedocs/libstdc++/manual/mt_allocator.html
*/
- template<typename _Tp,
+ template<typename _Tp,
typename _Poolp = __common_pool_policy<__pool, __thread_default> >
class __mt_alloc : public __mt_alloc_base<_Tp>
{
template<typename _Tp1, typename _Poolp1 = _Poolp>
struct rebind
- {
+ {
typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
typedef __mt_alloc<_Tp1, pol_type> other;
};
const __pool_base::_Tune
_M_get_options()
- {
+ {
// Return a copy, not a reference, for external consumption.
return __policy_type::_S_get_pool()._M_get_options();
}
-
+
void
_M_set_options(__pool_base::_Tune __t)
{ __policy_type::_S_get_pool()._M_set_options(__t); }
void* __ret = ::operator new(__bytes);
return static_cast<_Tp*>(__ret);
}
-
+
// Round up to power of 2 and figure out which bin to use.
const size_type __which = __pool._M_get_binmap(__bytes);
const size_type __thread_id = __pool._M_get_thread_id();
-
+
// Find out if we have blocks on our freelist. If so, go ahead
// and use them directly without having to lock anything.
char* __c;
typedef typename __pool_type::_Block_record _Block_record;
_Block_record* __block = __bin._M_first[__thread_id];
__bin._M_first[__thread_id] = __block->_M_next;
-
+
__pool._M_adjust_freelist(__bin, __block, __thread_id);
__c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
}
}
return static_cast<_Tp*>(static_cast<void*>(__c));
}
-
+
template<typename _Tp, typename _Poolp>
void
__mt_alloc<_Tp, _Poolp>::
__pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
}
}
-
+
template<typename _Tp, typename _Poolp>
inline bool
operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
{ return true; }
-
+
#if __cpp_impl_three_way_comparison < 201907L
template<typename _Tp, typename _Poolp>
inline bool
static char_type*
move(char_type* __s1, const char_type* __s2, size_t __n)
- {
+ {
if (__n == 0)
return __s1;
return static_cast<char_type*>
{ return __c1 == __c2; }
static int_type
- eof()
+ eof()
{
int_type __r = { static_cast<typename __gnu_cxx::__conditional_type
<std::__is_integer<int_type>::__value,
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
- /**
+ /**
* @brief A storage policy for use with _Pointer_adapter<> which yields a
* standard pointer.
- *
+ *
* A _Storage_policy is required to provide 4 things:
* 1) A get() API for returning the stored pointer value.
* 2) An set() API for storing a pointer value.
* 4) An operator<() to support pointer comparison.
* 5) An operator==() to support pointer comparison.
*/
- template<typename _Tp>
- class _Std_pointer_impl
+ template<typename _Tp>
+ class _Std_pointer_impl
{
public:
// the type this pointer points to.
typedef _Tp element_type;
-
+
// A method to fetch the pointer value as a standard T* value;
- inline _Tp*
- get() const
+ inline _Tp*
+ get() const
{ return _M_value; }
-
+
// A method to set the pointer value, from a standard T* value;
- inline void
- set(element_type* __arg)
+ inline void
+ set(element_type* __arg)
{ _M_value = __arg; }
-
+
// Comparison of pointers
inline bool
operator<(const _Std_pointer_impl& __rarg) const
{ return (_M_value < __rarg._M_value); }
-
+
inline bool
operator==(const _Std_pointer_impl& __rarg) const
{ return (_M_value == __rarg._M_value); }
* @brief A storage policy for use with _Pointer_adapter<> which stores
* the pointer's address as an offset value which is relative to
* its own address.
- *
+ *
* This is intended for pointers within shared memory regions which
* might be mapped at different addresses by different processes.
* For null pointers, a value of 1 is used. (0 is legitimate
* there is no reason why any normal pointer would point 1 byte into
* its own pointer address.
*/
- template<typename _Tp>
- class _Relative_pointer_impl
+ template<typename _Tp>
+ class _Relative_pointer_impl
{
public:
typedef _Tp element_type;
-
+
_Tp*
- get() const
+ get() const
{
if (_M_diff == 1)
return 0;
return reinterpret_cast<_Tp*>(reinterpret_cast<uintptr_t>(this)
+ _M_diff);
}
-
- void
+
+ void
set(_Tp* __arg)
{
if (!__arg)
_M_diff = 1;
else
- _M_diff = reinterpret_cast<uintptr_t>(__arg)
+ _M_diff = reinterpret_cast<uintptr_t>(__arg)
- reinterpret_cast<uintptr_t>(this);
}
-
+
// Comparison of pointers
inline bool
operator<(const _Relative_pointer_impl& __rarg) const
typedef __UINTPTR_TYPE__ uintptr_t;
uintptr_t _M_diff;
};
-
+
/**
* Relative_pointer_impl needs a specialization for const T because of
* the casting done during pointer arithmetic.
*/
- template<typename _Tp>
- class _Relative_pointer_impl<const _Tp>
+ template<typename _Tp>
+ class _Relative_pointer_impl<const _Tp>
{
public:
typedef const _Tp element_type;
-
+
const _Tp*
get() const
{
return reinterpret_cast<const _Tp*>
(reinterpret_cast<uintptr_t>(this) + _M_diff);
}
-
- void
+
+ void
set(const _Tp* __arg)
{
if (!__arg)
_M_diff = 1;
else
- _M_diff = reinterpret_cast<uintptr_t>(__arg)
+ _M_diff = reinterpret_cast<uintptr_t>(__arg)
- reinterpret_cast<uintptr_t>(this);
}
-
+
// Comparison of pointers
inline bool
operator<(const _Relative_pointer_impl& __rarg) const
operator==(const _Relative_pointer_impl& __rarg) const
{ return (reinterpret_cast<uintptr_t>(this->get())
== reinterpret_cast<uintptr_t>(__rarg.get())); }
-
+
private:
typedef __UINTPTR_TYPE__ uintptr_t;
uintptr_t _M_diff;
* _Pointer_adapter for cases of void*, const void*, and so on.
*/
struct _Invalid_type { };
-
+
template<typename _Tp>
- struct _Reference_type
+ struct _Reference_type
{ typedef _Tp& reference; };
- template<>
- struct _Reference_type<void>
+ template<>
+ struct _Reference_type<void>
{ typedef _Invalid_type& reference; };
- template<>
- struct _Reference_type<const void>
+ template<>
+ struct _Reference_type<const void>
{ typedef const _Invalid_type& reference; };
- template<>
- struct _Reference_type<volatile void>
+ template<>
+ struct _Reference_type<volatile void>
{ typedef volatile _Invalid_type& reference; };
- template<>
- struct _Reference_type<volatile const void>
+ template<>
+ struct _Reference_type<volatile const void>
{ typedef const volatile _Invalid_type& reference; };
/**
* std::iterator_traits<> is normally specialized for const T*, so
* that value_type is still T.
*/
- template<typename _Tp>
- struct _Unqualified_type
+ template<typename _Tp>
+ struct _Unqualified_type
{ typedef _Tp type; };
-
- template<typename _Tp>
- struct _Unqualified_type<const _Tp>
+
+ template<typename _Tp>
+ struct _Unqualified_type<const _Tp>
{ typedef _Tp type; };
-
+
/**
* The following provides an 'alternative pointer' that works with
* the containers when specified as the pointer typedef of the
* const _Tp* const == const _Pointer_adapter<_Std_pointer_impl<const _Tp> >;
*/
template<typename _Storage_policy>
- class _Pointer_adapter : public _Storage_policy
+ class _Pointer_adapter : public _Storage_policy
{
public:
typedef typename _Storage_policy::element_type element_type;
typedef _Pointer_adapter pointer;
typedef typename _Reference_type<element_type>::reference reference;
- // Reminder: 'const' methods mean that the method is valid when the
- // pointer is immutable, and has nothing to do with whether the
+ // Reminder: 'const' methods mean that the method is valid when the
+ // pointer is immutable, and has nothing to do with whether the
// 'pointee' is const.
// Default Constructor (Convert from element_type*)
{ _Storage_policy::set(__arg); }
// Copy constructor from _Pointer_adapter of same type.
- _Pointer_adapter(const _Pointer_adapter& __arg)
+ _Pointer_adapter(const _Pointer_adapter& __arg)
{ _Storage_policy::set(__arg.get()); }
// Convert from _Up* if conversion to element_type* is valid.
// Destructor
~_Pointer_adapter() { }
-
+
// Assignment operator
_Pointer_adapter&
- operator=(const _Pointer_adapter& __arg)
+ operator=(const _Pointer_adapter& __arg)
{
- _Storage_policy::set(__arg.get());
- return *this;
+ _Storage_policy::set(__arg.get());
+ return *this;
}
template<typename _Up>
_Pointer_adapter&
operator=(const _Pointer_adapter<_Up>& __arg)
{
- _Storage_policy::set(__arg.get());
- return *this;
+ _Storage_policy::set(__arg.get());
+ return *this;
}
template<typename _Up>
_Pointer_adapter&
operator=(_Up* __arg)
{
- _Storage_policy::set(__arg);
- return *this;
+ _Storage_policy::set(__arg);
+ return *this;
}
// Operator*, returns element_type&
- inline reference
- operator*() const
+ inline reference
+ operator*() const
{ return *(_Storage_policy::get()); }
// Operator->, returns element_type*
- inline element_type*
- operator->() const
+ inline element_type*
+ operator->() const
{ return _Storage_policy::get(); }
// Operator[], returns a element_type& to the item at that loc.
public:
operator __unspecified_bool_type() const
{
- return _Storage_policy::get() == 0 ? 0 :
- &_Pointer_adapter::operator->;
+ return _Storage_policy::get() == 0 ? 0 :
+ &_Pointer_adapter::operator->;
}
// ! operator (for: if (!ptr)...)
inline bool
- operator!() const
+ operator!() const
{ return (_Storage_policy::get() == 0); }
#endif
-
+
// Pointer differences
- inline friend std::ptrdiff_t
- operator-(const _Pointer_adapter& __lhs, element_type* __rhs)
+ inline friend std::ptrdiff_t
+ operator-(const _Pointer_adapter& __lhs, element_type* __rhs)
{ return (__lhs.get() - __rhs); }
-
- inline friend std::ptrdiff_t
- operator-(element_type* __lhs, const _Pointer_adapter& __rhs)
+
+ inline friend std::ptrdiff_t
+ operator-(element_type* __lhs, const _Pointer_adapter& __rhs)
{ return (__lhs - __rhs.get()); }
-
+
template<typename _Up>
- inline friend std::ptrdiff_t
- operator-(const _Pointer_adapter& __lhs, _Up* __rhs)
+ inline friend std::ptrdiff_t
+ operator-(const _Pointer_adapter& __lhs, _Up* __rhs)
{ return (__lhs.get() - __rhs); }
-
+
template<typename _Up>
- inline friend std::ptrdiff_t
+ inline friend std::ptrdiff_t
operator-(_Up* __lhs, const _Pointer_adapter& __rhs)
{ return (__lhs - __rhs.get()); }
template<typename _Up>
- inline std::ptrdiff_t
- operator-(const _Pointer_adapter<_Up>& __rhs) const
+ inline std::ptrdiff_t
+ operator-(const _Pointer_adapter<_Up>& __rhs) const
{ return (_Storage_policy::get() - __rhs.get()); }
-
+
// Pointer math
// Note: There is a reason for all this overloading based on different
// integer types. In some libstdc++-v3 test cases, a templated
// operator+ is declared which can match any types. This operator
- // tends to "steal" the recognition of _Pointer_adapter's own operator+
+ // tends to "steal" the recognition of _Pointer_adapter's own operator+
// unless the integer type matches perfectly.
#define _CXX_POINTER_ARITH_OPERATOR_SET(INT_TYPE) \
return *this; \
} \
// END of _CXX_POINTER_ARITH_OPERATOR_SET macro
-
+
// Expand into the various pointer arithmetic operators needed.
_CXX_POINTER_ARITH_OPERATOR_SET(short);
_CXX_POINTER_ARITH_OPERATOR_SET(unsigned short);
#endif
// Mathematical Manipulators
- inline _Pointer_adapter&
+ inline _Pointer_adapter&
operator++()
{
- _Storage_policy::set(_Storage_policy::get() + 1);
+ _Storage_policy::set(_Storage_policy::get() + 1);
return *this;
}
-
- inline _Pointer_adapter
+
+ inline _Pointer_adapter
operator++(int)
{
_Pointer_adapter __tmp(*this);
_Storage_policy::set(_Storage_policy::get() + 1);
return __tmp;
}
-
- inline _Pointer_adapter&
- operator--()
+
+ inline _Pointer_adapter&
+ operator--()
{
- _Storage_policy::set(_Storage_policy::get() - 1);
+ _Storage_policy::set(_Storage_policy::get() - 1);
return *this;
}
-
+
inline _Pointer_adapter
- operator--(int)
+ operator--(int)
{
_Pointer_adapter __tmp(*this);
_Storage_policy::set(_Storage_policy::get() - 1);
{ return __lhs.get() OPERATOR __rhs.get(); } \
\
// End GCC_CXX_POINTER_COMPARISON_OPERATION_SET Macro
-
+
// Expand into the various comparison operators needed.
_GCC_CXX_POINTER_COMPARISON_OPERATION_SET(==)
_GCC_CXX_POINTER_COMPARISON_OPERATION_SET(!=)
template<typename _Tp>
inline bool
operator==(const _Pointer_adapter<_Tp>& __lhs, int __rhs)
- { return __lhs.get() == reinterpret_cast<void*>(__rhs); }
+ { return __lhs.get() == reinterpret_cast<void*>(__rhs); }
template<typename _Tp>
inline bool
operator==(int __lhs, const _Pointer_adapter<_Tp>& __rhs)
- { return __rhs.get() == reinterpret_cast<void*>(__lhs); }
+ { return __rhs.get() == reinterpret_cast<void*>(__lhs); }
template<typename _Tp>
inline bool
operator!=(const _Pointer_adapter<_Tp>& __lhs, int __rhs)
- { return __lhs.get() != reinterpret_cast<void*>(__rhs); }
+ { return __lhs.get() != reinterpret_cast<void*>(__rhs); }
template<typename _Tp>
inline bool
operator!=(int __lhs, const _Pointer_adapter<_Tp>& __rhs)
- { return __rhs.get() != reinterpret_cast<void*>(__lhs); }
+ { return __rhs.get() != reinterpret_cast<void*>(__lhs); }
/**
* Comparison operators for _Pointer_adapter defer to the base class'
*/
template<typename _Tp>
inline bool
- operator==(const _Pointer_adapter<_Tp>& __lhs,
+ operator==(const _Pointer_adapter<_Tp>& __lhs,
const _Pointer_adapter<_Tp>& __rhs)
{ return __lhs._Tp::operator==(__rhs); }
template<typename _Tp>
inline bool
- operator<=(const _Pointer_adapter<_Tp>& __lhs,
+ operator<=(const _Pointer_adapter<_Tp>& __lhs,
const _Pointer_adapter<_Tp>& __rhs)
{ return __lhs._Tp::operator<(__rhs) || __lhs._Tp::operator==(__rhs); }
template<typename _Tp>
inline bool
- operator!=(const _Pointer_adapter<_Tp>& __lhs,
+ operator!=(const _Pointer_adapter<_Tp>& __lhs,
const _Pointer_adapter<_Tp>& __rhs)
{ return !(__lhs._Tp::operator==(__rhs)); }
template<typename _Tp>
inline bool
- operator>(const _Pointer_adapter<_Tp>& __lhs,
+ operator>(const _Pointer_adapter<_Tp>& __lhs,
const _Pointer_adapter<_Tp>& __rhs)
{ return !(__lhs._Tp::operator<(__rhs) || __lhs._Tp::operator==(__rhs)); }
template<typename _Tp>
inline bool
- operator>=(const _Pointer_adapter<_Tp>& __lhs,
+ operator>=(const _Pointer_adapter<_Tp>& __lhs,
const _Pointer_adapter<_Tp>& __rhs)
{ return !(__lhs._Tp::operator<(__rhs)); }
#if _GLIBCXX_HOSTED
template<typename _CharT, typename _Traits, typename _StoreT>
inline std::basic_ostream<_CharT, _Traits>&
- operator<<(std::basic_ostream<_CharT, _Traits>& __os,
+ operator<<(std::basic_ostream<_CharT, _Traits>& __os,
const _Pointer_adapter<_StoreT>& __p)
{ return (__os << __p.get()); }
#endif // HOSTED
enum { _S_align = 8 };
enum { _S_max_bytes = 128 };
enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
-
+
union _Obj
{
union _Obj* _M_free_list_link;
char _M_client_data[1]; // The client sees this.
};
-
+
static _Obj* volatile _S_free_list[_S_free_list_size];
// Chunk allocation state.
static char* _S_start_free;
static char* _S_end_free;
- static size_t _S_heap_size;
-
+ static size_t _S_heap_size;
+
size_t
_M_round_up(size_t __bytes)
{ return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
-
+
_GLIBCXX_CONST _Obj* volatile*
_M_get_free_list(size_t __bytes) throw ();
-
+
__mutex&
_M_get_mutex() throw ();
// free list.
void*
_M_refill(size_t __n);
-
+
// Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number.
char*
{ return std::__addressof(__x); }
size_type
- max_size() const _GLIBCXX_USE_NOEXCEPT
+ max_size() const _GLIBCXX_USE_NOEXCEPT
{ return std::size_t(-1) / sizeof(_Tp); }
#if __cplusplus >= 201103L
{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
template<typename _Up>
- void
+ void
destroy(_Up* __p) { __p->~_Up(); }
#else
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 402. wrong new expression in [some_] allocator::construct
- void
- construct(pointer __p, const _Tp& __val)
+ void
+ construct(pointer __p, const _Tp& __val)
{ ::new((void *)__p) _Tp(__val); }
- void
+ void
destroy(pointer __p) { __p->~_Tp(); }
#endif
allocate(size_type __n, const void* = 0);
void
- deallocate(pointer __p, size_type __n);
+ deallocate(pointer __p, size_type __n);
};
template<typename _Tp>
else
{
_Obj* volatile* __free_list = _M_get_free_list(__bytes);
-
+
__scoped_lock sentry(_M_get_mutex());
_Obj* __restrict__ __result = *__free_list;
if (__builtin_expect(__result == 0, 0))
_S_char_ptr_len(const _CharT* __s)
{
const _CharT* __p = __s;
-
+
while (!_S_is0(*__p))
++__p;
return (__p - __s);
{
if (!_S_is_basic_char_type((_CharT*)0))
std::_Destroy(__s, __s + __n, __a);
-
+
// This has to be a static member, so this gets a bit messy
__a.deallocate(__s,
_Rope_RopeLeaf<_CharT, _Alloc>::_S_rounded_up_size(__n));
__left->
_M_get_allocator());
size_t __depth = __result->_M_depth;
-
+
if (__depth > 20
&& (__result->_M_size < 1000
|| __depth > size_t(__detail::_S_max_rope_depth)))
if (__orig_size + __slen <= size_t(_S_copy_max)
&& __detail::_S_leaf == __r->_M_tag)
{
- __result = _S_destr_leaf_concat_char_iter((_RopeLeaf*)__r, __s,
+ __result = _S_destr_leaf_concat_char_iter((_RopeLeaf*)__r, __s,
__slen);
return __result;
}
return __result;
}
#endif /* !__GC */
-
+
template <class _CharT, class _Alloc>
typename rope<_CharT, _Alloc>::_RopeRep*
rope<_CharT, _Alloc>::
size_t __len = __base->_M_size;
size_t __adj_endp1;
const size_t __lazy_threshold = 128;
-
+
if (__endp1 >= __len)
{
if (0 == __start)
}
else
__adj_endp1 = __len;
-
+
}
else
__adj_endp1 = __endp1;
_RopeRep* __right = __c->_M_right;
size_t __left_len = __left->_M_size;
_RopeRep* __result;
-
+
if (__adj_endp1 <= __left_len)
return _S_substring(__left, __start, __endp1);
else if (__start >= __left_len)
__start,
__left_len));
_Self_destruct_ptr __right_result(_S_substring(__right, 0,
- __endp1
+ __endp1
- __left_len));
__result = _S_concat(__left_result, __right_result);
return __result;
__adj_endp1 - __start,
__base->_M_get_allocator());
return __result;
-
+
} // *** else fall through: ***
}
case __detail::_S_function:
if (__start >= __adj_endp1)
return 0;
__result_len = __adj_endp1 - __start;
-
+
if (__result_len > __lazy_threshold)
goto lazy;
__section = (_CharT*)
private:
_CharT* _M_buf_ptr;
public:
-
+
_Rope_flatten_char_consumer(_CharT* __buffer)
{ _M_buf_ptr = __buffer; }
~_Rope_flatten_char_consumer() {}
-
+
bool
operator()(const _CharT* __leaf, std::size_t __n)
{
_CharT _M_pattern;
public:
std::size_t _M_count; // Number of nonmatching characters
-
+
_Rope_find_char_char_consumer(_CharT __p)
: _M_pattern(__p), _M_count(0) {}
-
+
~_Rope_find_char_char_consumer() {}
-
+
bool
operator()(const _CharT* __leaf, std::size_t __n)
{
{
char __f = __o.fill();
std::size_t __i;
-
+
for (__i = 0; __i < __n; __i++)
__o.put(__f);
}
size_t __rope_len = __r.size();
_Rope_insert_char_consumer<_CharT, _Traits> __c(__o);
bool __is_simple = _Rope_is_simple((_CharT*)0);
-
+
if (__rope_len < __w)
__pad_len = __w - __rope_len;
else
_RopeConcatenation* __c = (_RopeConcatenation*)__r;
_RopeRep* __left = __c->_M_left;
_RopeRep* __right = __c->_M_right;
-
+
#ifdef __GC
printf("Concatenation %p (depth = %d, len = %ld, %s balanced)\n",
__r, __r->_M_depth, __r->_M_size,
else
{
const char* __kind;
-
+
switch (__r->_M_tag)
{
case __detail::_S_leaf:
_Self_destruct_ptr __prefix(_S_substring(__r, 0, __max_len));
_CharT __buffer[__max_len + 1];
bool __too_big = __r->_M_size > __prefix->_M_size;
-
+
_S_flatten(__prefix, __buffer);
__buffer[__prefix->_M_size] = _S_eos((_CharT*)0);
printf("%s%s\n", (char*)__buffer,
// __forest[__i]._M_size >= _S_min_len[__i]
// __forest[__i]._M_depth = __i
// References from forest are included in refcount.
-
+
for (__i = 0; __i <= int(__detail::_S_max_rope_depth); ++__i)
__forest[__i] = 0;
__try
_S_unref(__forest[__i]);
__throw_exception_again;
}
-
+
if (__result->_M_depth > int(__detail::_S_max_rope_depth))
std::__throw_length_error(__N("rope::_S_balance"));
return(__result);
{
_RopeConcatenation* __c = (_RopeConcatenation*)__r;
-
+
_S_add_to_forest(__c->_M_left, __forest);
_S_add_to_forest(__c->_M_right, __forest);
}
_RopeRep* __too_tiny = 0; // included in refcount
int __i; // forest[0..__i-1] is empty
std::size_t __s = __r->_M_size;
-
+
for (__i = 0; __s >= _S_min_len[__i+1]/* not this bucket */; ++__i)
{
if (0 != __forest[__i])
_S_fetch(_RopeRep* __r, size_type __i)
{
__GC_CONST _CharT* __cstr = __r->_M_c_string;
-
+
if (0 != __cstr)
return __cstr[__i];
for(;;)
_RopeConcatenation* __c = (_RopeConcatenation*)__r;
_RopeRep* __left = __c->_M_left;
std::size_t __left_len = __left->_M_size;
-
+
if (__i >= __left_len)
{
__i -= __left_len;
__r = __c->_M_right;
- }
+ }
else
__r = __left;
}
{
_RopeFunction* __f = (_RopeFunction*)__r;
_CharT __result;
-
+
(*(__f->_M_fn))(__i, 1, &__result);
return __result;
}
}
}
}
-
+
#ifndef __GC
// Return a uniquely referenced character slot for the given
// position, or 0 if that's not possible.
{
_RopeRep* __clrstack[__detail::_S_max_rope_depth];
std::size_t __csptr = 0;
-
+
for(;;)
{
if (__r->_M_ref_count > 1)
_RopeConcatenation* __c = (_RopeConcatenation*)__r;
_RopeRep* __left = __c->_M_left;
std::size_t __left_len = __left->_M_size;
-
+
if (__c->_M_c_string != 0)
__clrstack[__csptr++] = __c;
if (__i >= __left_len)
{
__i -= __left_len;
__r = __c->_M_right;
- }
+ }
else
__r = __left;
}
{
std::size_t __left_len;
std::size_t __right_len;
-
+
if (0 == __right)
return 0 != __left;
if (0 == __left)
else
__result = power(__base_rope, __exponent,
_Rope_Concat_fn<_CharT, _Alloc>());
-
+
if (0 != __remainder)
__result += __remainder_rope;
}
else
__result = __remainder_rope;
-
+
this->_M_tree_ptr = __result._M_tree_ptr;
this->_M_tree_ptr->_M_ref_nonnil();
}
-
+
template<class _CharT, class _Alloc>
_CharT
rope<_CharT, _Alloc>::_S_empty_c_str[1];
-
+
template<class _CharT, class _Alloc>
const _CharT*
rope<_CharT, _Alloc>::
__gthread_mutex_unlock (&this->_M_tree_ptr->_M_c_string_lock);
return(__result);
}
-
+
template<class _CharT, class _Alloc>
const _CharT* rope<_CharT, _Alloc>::
replace_with_c_str()
}
// Algorithm specializations. More should be added.
-
+
template<class _Rope_iterator> // was templated on CharT and Alloc
void // VC++ workaround
_Rope_rotate(_Rope_iterator __first,
{
typedef typename _Rope_iterator::value_type _CharT;
typedef typename _Rope_iterator::_allocator_type _Alloc;
-
+
rope<_CharT, _Alloc>& __r(__first.container());
rope<_CharT, _Alloc> __prefix = __r.substr(0, __first.index());
rope<_CharT, _Alloc> __suffix =
typedef __vstring_utility<_CharT, _Traits, _Alloc> _Util_Base;
typedef typename _Util_Base::_CharT_alloc_type _CharT_alloc_type;
typedef typename _CharT_alloc_type::size_type size_type;
-
+
private:
// Data Members:
typename _Util_Base::template _Alloc_hider<_CharT_alloc_type>
size_type _M_string_length;
enum { _S_local_capacity = 15 };
-
+
union
{
_CharT _M_local_data[_S_local_capacity + 1];
// Create & Destroy
_CharT*
_M_create(size_type&, size_type);
-
+
void
_M_dispose()
{
// requires special behaviour if _InIterator is an integral type
template<typename _InIterator>
void
- _M_construct_aux(_InIterator __beg, _InIterator __end,
+ _M_construct_aux(_InIterator __beg, _InIterator __end,
std::__false_type)
{
typedef typename std::iterator_traits<_InIterator>::iterator_category
void
_M_construct(_InIterator __beg, _InIterator __end,
std::input_iterator_tag);
-
+
// For forward_iterators up to random_access_iterators, used for
// string::iterator, _CharT*, etc.
template<typename _FwdIterator>
_M_capacity() const
{
return _M_is_local() ? size_type(_S_local_capacity)
- : _M_allocated_capacity;
+ : _M_allocated_capacity;
}
bool
_M_data()[__len++] = *__beg;
++__beg;
}
-
+
__try
{
while (__beg != __end)
size_type __len2)
{
const size_type __how_much = _M_length() - __pos - __len1;
-
+
size_type __new_capacity = _M_length() + __len2 - __len1;
_CharT* __r = _M_create(__new_capacity, _M_capacity());
if (__how_much)
this->_S_copy(__r + __pos + __len2,
_M_data() + __pos + __len1, __how_much);
-
+
_M_dispose();
_M_data(__r);
_M_capacity(__new_capacity);
auto beg = __b.map_construct().begin();
auto end = __b.map_construct().end();
for (; beg != end; ++beg)
- __b.log_to_string(error, *beg);
+ __b.log_to_string(error, *beg);
}
#endif
return os << error;
}
template<typename _Up>
- void
+ void
destroy(_Up* __p)
{
erase_construct(__p);
// Define a nested type if some predicate holds.
template<bool, typename>
- struct __enable_if
+ struct __enable_if
{ };
template<typename _Tp>
// Given an integral builtin type, return the corresponding unsigned type.
template<typename _Tp>
struct __add_unsigned
- {
+ {
private:
typedef __enable_if<std::__is_integer<_Tp>::__value, _Tp> __if_type;
-
+
public:
- typedef typename __if_type::__type __type;
+ typedef typename __if_type::__type __type;
};
template<>
// Given an integral builtin type, return the corresponding signed type.
template<typename _Tp>
struct __remove_unsigned
- {
+ {
private:
typedef __enable_if<std::__is_integer<_Tp>::__value, _Tp> __if_type;
-
+
public:
- typedef typename __if_type::__type __type;
+ typedef typename __if_type::__type __type;
};
template<>
#pragma GCC diagnostic pop
-#endif
+#endif
/**
* @class __versa_string vstring.h
- * @brief Template class __versa_string.
+ * @brief Template class __versa_string.
* @ingroup extensions
*
* Data structure managing sequences of characters and
- * character-like objects.
+ * character-like objects.
*/
template<typename _CharT, typename _Traits, typename _Alloc,
template <typename, typename, typename> class _Base>
class __versa_string
: private _Base<_CharT, _Traits, _Alloc>
{
- typedef _Base<_CharT, _Traits, _Alloc> __vstring_base;
+ typedef _Base<_CharT, _Traits, _Alloc> __vstring_base;
typedef typename __vstring_base::_CharT_alloc_type _CharT_alloc_type;
typedef __alloc_traits<_CharT_alloc_type> _CharT_alloc_traits;
/**
* @brief Destroy the string instance.
*/
- ~__versa_string() _GLIBCXX_NOEXCEPT { }
+ ~__versa_string() _GLIBCXX_NOEXCEPT { }
/**
* @brief Assign the value of @a str to this string.
* @param __str Source string.
*/
__versa_string&
- operator=(const __versa_string& __str)
+ operator=(const __versa_string& __str)
{ return this->assign(__str); }
#if __cplusplus >= 201103L
* @param __s Source null-terminated string.
*/
__versa_string&
- operator=(const _CharT* __s)
+ operator=(const _CharT* __s)
{ return this->assign(__s); }
/**
* (*this)[0] == @a __c.
*/
__versa_string&
- operator=(_CharT __c)
- {
- this->assign(1, __c);
+ operator=(_CharT __c)
+ {
+ this->assign(1, __c);
return *this;
}
{ this->_M_clear(); }
/**
- * Returns true if the %string is empty. Equivalent to
+ * Returns true if the %string is empty. Equivalent to
* <code>*this == ""</code>.
*/
_GLIBCXX_NODISCARD bool
*/
__versa_string&
operator+=(_CharT __c)
- {
+ {
this->push_back(__c);
return *this;
}
*/
void
push_back(_CharT __c)
- {
+ {
const size_type __size = this->size();
if (__size + 1 > this->capacity() || this->_M_is_shared())
this->_M_mutate(__size, size_type(0), 0, size_type(1));
_GLIBCXX_DEBUG_PEDASSERT(__p >= _M_ibegin() && __p <= _M_iend());
const size_type __pos = __p - _M_ibegin();
this->replace(__p, __p, __n, __c);
- return iterator(this->_M_data() + __pos);
+ return iterator(this->_M_data() + __pos);
}
#else
/**
#if __cplusplus >= 201103L
insert(const_iterator __p, _CharT __c)
#else
- insert(iterator __p, _CharT __c)
+ insert(iterator __p, _CharT __c)
#endif
{
_GLIBCXX_DEBUG_PEDASSERT(__p >= _M_ibegin() && __p <= _M_iend());
*/
__versa_string&
erase(size_type __pos = 0, size_type __n = npos)
- {
+ {
this->_M_erase(_M_check(__pos, "__versa_string::erase"),
_M_limit(__pos, __n));
return *this;
#if __cplusplus >= 201103L
erase(const_iterator __position)
#else
- erase(iterator __position)
+ erase(iterator __position)
#endif
{
_GLIBCXX_DEBUG_PEDASSERT(__position >= _M_ibegin()
* Removes the characters in the range [pos,pos + n1) from this
* string. In place, the characters of @a __s are inserted. If
* @a pos is beyond end of string, out_of_range is thrown. If
- * the length of result exceeds max_size(), length_error is thrown.
+ * the length of result exceeds max_size(), length_error is thrown.
* The value of the string doesn't change if an error is thrown.
*/
__versa_string&
#if __cplusplus >= 201103L
replace(const_iterator __i1, const_iterator __i2, const _CharT* __s)
#else
- replace(iterator __i1, iterator __i2, const _CharT* __s)
+ replace(iterator __i1, iterator __i2, const _CharT* __s)
#endif
{
__glibcxx_requires_string(__s);
return this->replace(__i1 - _M_ibegin(), __i2 - __i1,
__k1.base(), __k2 - __k1);
}
-
+
#if __cplusplus >= 201103L
/**
* @brief Replace range of characters with initializer_list.
inline basic_istream<_CharT, _Traits>&
getline(basic_istream<_CharT, _Traits>& __is,
__gnu_cxx::__versa_string<_CharT, _Traits, _Alloc, _Base>& __str)
- { return getline(__is, __str, __is.widen('\n')); }
+ { return getline(__is, __str, __is.widen('\n')); }
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
#endif // _GLIBCXX_USE_C99_STDIO
#if defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_C99_WCHAR
- inline int
+ inline int
stoi(const __wvstring& __str, std::size_t* __idx = 0, int __base = 10)
{ return __gnu_cxx::__stoa<long, int>(&std::wcstol, "stoi", __str.c_str(),
__idx, __base); }
- inline long
+ inline long
stol(const __wvstring& __str, std::size_t* __idx = 0, int __base = 10)
{ return __gnu_cxx::__stoa(&std::wcstol, "stol", __str.c_str(),
__idx, __base); }
typedef __versa_string<char> __vstring;
typedef __vstring __sso_string;
- typedef
+ typedef
__versa_string<char, std::char_traits<char>,
std::allocator<char>, __rc_string_base> __rc_string;
#if __cplusplus >= 201103L
typedef __versa_string<char16_t> __u16vstring;
typedef __u16vstring __u16sso_string;
- typedef
+ typedef
__versa_string<char16_t, std::char_traits<char16_t>,
std::allocator<char16_t>, __rc_string_base> __u16rc_string;
typedef __versa_string<char32_t> __u32vstring;
typedef __u32vstring __u32sso_string;
- typedef
+ typedef
__versa_string<char32_t, std::char_traits<char32_t>,
std::allocator<char32_t>, __rc_string_base> __u32rc_string;
#endif // C++11
template<typename _RAIter1, typename _RAIter2, typename _Predicate>
pair<_RAIter1, _RAIter2>
__mismatch_switch(_RAIter1 __begin1, _RAIter1 __end1,
- _RAIter2 __begin2, _Predicate __pred,
+ _RAIter2 __begin2, _Predicate __pred,
random_access_iterator_tag, random_access_iterator_tag)
{
if (_GLIBCXX_PARALLEL_CONDITION(true))
template<typename _RAIter1, typename _RAIter2, typename _Predicate>
pair<_RAIter1, _RAIter2>
__mismatch_switch(_RAIter1 __begin1, _RAIter1 __end1,
- _RAIter2 __begin2, _RAIter2 __end2, _Predicate __pred,
+ _RAIter2 __begin2, _RAIter2 __end2, _Predicate __pred,
random_access_iterator_tag, random_access_iterator_tag)
{
if (_GLIBCXX_PARALLEL_CONDITION(true))
// Sequential fallback
template<typename _IIter1, typename _IIter2>
inline bool
- equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
+ equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
__gnu_parallel::sequential_tag)
{ return _GLIBCXX_STD_A::equal(__begin1, __end1, __begin2); }
// Sequential fallback
template<typename _IIter1, typename _IIter2, typename _Predicate>
inline bool
- equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
+ equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
_Predicate __pred, __gnu_parallel::sequential_tag)
{ return _GLIBCXX_STD_A::equal(__begin1, __end1, __begin2, __pred); }
template<typename _IIter1, typename _IIter2, typename _Predicate>
_GLIBCXX20_CONSTEXPR
inline bool
- equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
+ equal(_IIter1 __begin1, _IIter1 __end1, _IIter2 __begin2,
_Predicate __pred)
{
#if __cplusplus > 201703L
template<typename _RAIter1, typename _RAIter2, typename _Predicate>
inline bool
__equal_switch(_RAIter1 __begin1, _RAIter1 __end1,
- _RAIter2 __begin2, _RAIter2 __end2, _Predicate __pred,
+ _RAIter2 __begin2, _RAIter2 __end2, _Predicate __pred,
random_access_iterator_tag, random_access_iterator_tag)
{
if (_GLIBCXX_PARALLEL_CONDITION(true))
// Sequential fallback
template<typename _IIter1, typename _IIter2>
inline bool
- lexicographical_compare(_IIter1 __begin1, _IIter1 __end1,
- _IIter2 __begin2, _IIter2 __end2,
+ lexicographical_compare(_IIter1 __begin1, _IIter1 __end1,
+ _IIter2 __begin2, _IIter2 __end2,
__gnu_parallel::sequential_tag)
{ return _GLIBCXX_STD_A::lexicographical_compare(__begin1, __end1,
__begin2, __end2); }
// Sequential fallback
template<typename _IIter1, typename _IIter2, typename _Predicate>
inline bool
- lexicographical_compare(_IIter1 __begin1, _IIter1 __end1,
- _IIter2 __begin2, _IIter2 __end2,
+ lexicographical_compare(_IIter1 __begin1, _IIter1 __end1,
+ _IIter2 __begin2, _IIter2 __end2,
_Predicate __pred, __gnu_parallel::sequential_tag)
{ return _GLIBCXX_STD_A::lexicographical_compare(
__begin1, __end1, __begin2, __end2, __pred); }
typename _Predicate, typename _IteratorTag1, typename _IteratorTag2>
inline bool
__lexicographical_compare_switch(_IIter1 __begin1, _IIter1 __end1,
- _IIter2 __begin2, _IIter2 __end2,
+ _IIter2 __begin2, _IIter2 __end2,
_Predicate __pred,
_IteratorTag1, _IteratorTag2)
{ return _GLIBCXX_STD_A::lexicographical_compare(
__lexicographical_compare_switch(_RAIter1 __begin1, _RAIter1 __end1,
_RAIter2 __begin2, _RAIter2 __end2,
_Predicate __pred,
- random_access_iterator_tag,
+ random_access_iterator_tag,
random_access_iterator_tag)
{
if (_GLIBCXX_PARALLEL_CONDITION(true))
if ((__end1 - __begin1) < (__end2 - __begin2))
{
typedef pair<_RAIter1, _RAIter2> _SpotType;
- _SpotType __mm = __mismatch_switch(__begin1, __end1, __begin2,
- _EqualFromLessCompare(__pred),
- random_access_iterator_tag(),
+ _SpotType __mm = __mismatch_switch(__begin1, __end1, __begin2,
+ _EqualFromLessCompare(__pred),
+ random_access_iterator_tag(),
random_access_iterator_tag());
return (__mm.first == __end1)
else
{
typedef pair<_RAIter2, _RAIter1> _SpotType;
- _SpotType __mm = __mismatch_switch(__begin2, __end2, __begin1,
- _EqualFromLessCompare(__pred),
- random_access_iterator_tag(),
+ _SpotType __mm = __mismatch_switch(__begin2, __end2, __begin1,
+ _EqualFromLessCompare(__pred),
+ random_access_iterator_tag(),
random_access_iterator_tag());
return (__mm.first != __end2)
template<typename _RAIter, typename _BiPredicate>
_RAIter
- __adjacent_find_switch(_RAIter, _RAIter, _BiPredicate,
+ __adjacent_find_switch(_RAIter, _RAIter, _BiPredicate,
random_access_iterator_tag);
_FIter
search_n(_FIter, _FIter, _Integer, const _Tp&, _BiPredicate,
__gnu_parallel::sequential_tag);
-
+
template<typename _FIter, typename _Integer, typename _Tp>
_FIter
search_n(_FIter, _FIter, _Integer, const _Tp&);
template<typename _IIter, typename _OIter, typename _UnaryOperation>
_OIter
- transform(_IIter, _IIter, _OIter, _UnaryOperation,
+ transform(_IIter, _IIter, _OIter, _UnaryOperation,
__gnu_parallel::sequential_tag);
template<typename _IIter, typename _OIter, typename _UnaryOperation>
_OIter
- transform(_IIter, _IIter, _OIter, _UnaryOperation,
+ transform(_IIter, _IIter, _OIter, _UnaryOperation,
__gnu_parallel::_Parallelism);
template<typename _IIter, typename _OIter, typename _UnaryOperation,
typename _IterTag1, typename _IterTag2>
_OIter
- __transform1_switch(_IIter, _IIter, _OIter, _UnaryOperation,
+ __transform1_switch(_IIter, _IIter, _OIter, _UnaryOperation,
_IterTag1, _IterTag2);
-
+
template<typename _RAIIter, typename _RAOIter, typename _UnaryOperation>
_RAOIter
- __transform1_switch(_RAIIter, _RAIIter, _RAOIter, _UnaryOperation,
- random_access_iterator_tag, random_access_iterator_tag,
+ __transform1_switch(_RAIIter, _RAIIter, _RAOIter, _UnaryOperation,
+ random_access_iterator_tag, random_access_iterator_tag,
__gnu_parallel::_Parallelism __parallelism
= __gnu_parallel::parallel_balanced);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _BiOperation>
_OIter
- transform(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
+ transform(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
__gnu_parallel::sequential_tag);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _BiOperation>
_OIter
- transform(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
+ transform(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
__gnu_parallel::_Parallelism);
template<typename _RAIter1, typename _RAIter2, typename _RAIter3,
typename _BiOperation>
_RAIter3
- __transform2_switch(_RAIter1, _RAIter1, _RAIter2, _RAIter3, _BiOperation,
- random_access_iterator_tag, random_access_iterator_tag,
+ __transform2_switch(_RAIter1, _RAIter1, _RAIter2, _RAIter3, _BiOperation,
+ random_access_iterator_tag, random_access_iterator_tag,
random_access_iterator_tag,
__gnu_parallel::_Parallelism __parallelism
= __gnu_parallel::parallel_balanced);
typename _BiOperation, typename _Tag1,
typename _Tag2, typename _Tag3>
_OIter
- __transform2_switch(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
+ __transform2_switch(_IIter1, _IIter1, _IIter2, _OIter, _BiOperation,
_Tag1, _Tag2, _Tag3);
template<typename _FIter, typename _Tp>
void
- replace(_FIter, _FIter, const _Tp&, const _Tp&,
+ replace(_FIter, _FIter, const _Tp&, const _Tp&,
__gnu_parallel::sequential_tag);
template<typename _FIter, typename _Tp>
template<typename _RAIter, typename _Tp>
void
- __replace_switch(_RAIter, _RAIter, const _Tp&, const _Tp&,
+ __replace_switch(_RAIter, _RAIter, const _Tp&, const _Tp&,
random_access_iterator_tag, __gnu_parallel::_Parallelism);
typename _IterTag>
void
__replace_if_switch(_FIter, _FIter, _Predicate, const _Tp&, _IterTag);
-
+
template<typename _RAIter, typename _Predicate, typename _Tp>
void
__replace_if_switch(_RAIter, _RAIter, _Predicate, const _Tp&,
template<typename _IIter1, typename _IIter2, typename _OIter>
_OIter
- merge(_IIter1, _IIter1, _IIter2, _IIter2, _OIter,
+ merge(_IIter1, _IIter1, _IIter2, _IIter2, _OIter,
__gnu_parallel::sequential_tag);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Compare>
_OIter
- merge(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
+ merge(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
__gnu_parallel::sequential_tag);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Compare, typename _IterTag1, typename _IterTag2,
typename _IterTag3>
_OIter
- __merge_switch(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
+ __merge_switch(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
_IterTag1, _IterTag2, _IterTag3);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Compare>
_OIter
- __merge_switch(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
- random_access_iterator_tag, random_access_iterator_tag,
+ __merge_switch(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Compare,
+ random_access_iterator_tag, random_access_iterator_tag,
random_access_iterator_tag);
template<typename _FIter, typename _Predicate>
_FIter
partition(_FIter, _FIter, _Predicate, __gnu_parallel::sequential_tag);
-
+
template<typename _FIter, typename _Predicate>
_FIter
partition(_FIter, _FIter, _Predicate);
template<typename _FIter, typename _Predicate, typename _IterTag>
_FIter
__partition_switch(_FIter, _FIter, _Predicate, _IterTag);
-
+
template<typename _RAIter, typename _Predicate>
_RAIter
__partition_switch(
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Predicate>
- _OIter
+ _OIter
set_union(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Predicate);
template<typename _IIter1, typename _IIter2, typename _Predicate,
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Predicate>
- _OIter
+ _OIter
set_intersection(_IIter1, _IIter1, _IIter2, _IIter2, _OIter, _Predicate);
template<typename _IIter1, typename _IIter2, typename _Predicate,
_Predicate, __gnu_parallel::sequential_tag);
template<typename _IIter1, typename _IIter2, typename _OIter>
- _OIter
+ _OIter
set_symmetric_difference(_IIter1, _IIter1, _IIter2, _IIter2, _OIter);
template<typename _IIter1, typename _IIter2, typename _OIter,
typename _Predicate>
- _OIter
+ _OIter
set_symmetric_difference(_IIter1, _IIter1, _IIter2, _IIter2, _OIter,
_Predicate);
* @namespace std::__parallel
* @brief GNU parallel code, replaces standard behavior with parallel behavior.
*/
-namespace std _GLIBCXX_VISIBILITY(default)
-{
- namespace __parallel { }
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+ namespace __parallel { }
}
/**
* @namespace __gnu_sequential
* @brief GNU sequential classes for public use.
*/
-namespace __gnu_sequential
-{
+namespace __gnu_sequential
+{
// Import whatever is the serial version.
#ifdef _GLIBCXX_PARALLEL
using namespace std::_GLIBCXX_STD_A;
#else
using namespace std;
-#endif
+#endif
}
// and active, which imples that the OpenMP runtime is actually
// going to be linked in.
inline _ThreadIndex
- __get_max_threads()
- {
+ __get_max_threads()
+ {
_ThreadIndex __i = omp_get_max_threads();
- return __i > 1 ? __i : 1;
+ return __i > 1 ? __i : 1;
}
- inline bool
+ inline bool
__is_parallel(const _Parallelism __p) { return __p != sequential; }
struct __generic_find_selector
{ };
- /**
+ /**
* @brief Test predicate on a single element, used for std::find()
* and std::find_if ().
*/
*/
template<typename _RAIter1, typename _RAIter2,
typename _Pred>
- bool
+ bool
operator()(_RAIter1 __i1, _RAIter2 __i2, _Pred __pred)
{ return __pred(*__i1); }
*/
template<typename _RAIter1, typename _RAIter2,
typename _Pred>
- std::pair<_RAIter1, _RAIter2>
+ std::pair<_RAIter1, _RAIter2>
_M_sequential_algorithm(_RAIter1 __begin1,
_RAIter1 __end1,
_RAIter2 __begin2, _Pred __pred)
*/
template<typename _RAIter1, typename _RAIter2,
typename _Pred>
- bool
+ bool
operator()(_RAIter1 __i1, _RAIter2 __i2, _Pred __pred)
{
// Passed end iterator is one short.
/** @brief Test inverted predicate on a single element. */
struct __mismatch_selector : public __generic_find_selector
{
- /**
+ /**
* @brief Test on one position.
* @param __i1 _Iterator on first sequence.
* @param __i2 _Iterator on second sequence (unused).
- * @param __pred Find predicate.
+ * @param __pred Find predicate.
*/
template<typename _RAIter1, typename _RAIter2,
typename _Pred>
- bool
+ bool
operator()(_RAIter1 __i1, _RAIter2 __i2, _Pred __pred)
{ return !__pred(*__i1, *__i2); }
- /**
+ /**
* @brief Corresponding sequential algorithm on a sequence.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
- * @param __pred Find predicate.
+ * @param __pred Find predicate.
*/
template<typename _RAIter1, typename _RAIter2,
typename _Pred>
* @param __num_parts Number of parts to split the sequence into.
* @param __f Functor to be applied to each element by traversing __it
* @param __oversampling Oversampling factor. If 0, then the
- * partitions will differ in at most
- * \f$\sqrt{\mathrm{end} - \mathrm{begin}}\f$
+ * partitions will differ in at most
+ * \f$\sqrt{\mathrm{end} - \mathrm{begin}}\f$
* elements. Otherwise, the ratio between the
* longest and the shortest part is bounded by
* \f$1/(\mathrm{oversampling} \cdot \mathrm{num\_parts})\f$
};
/** @brief Base class for unguarded _LoserTree implementation.
- *
+ *
* The whole element is copied into the tree structure.
*
* No guarding is done, therefore not a single input sequence must
* multiple sorted sequences. Also serves for splitting such
* sequence sets.
*
- * The algorithm description can be found in
+ * The algorithm description can be found in
*
* P. J. Varman, S. D. Scheufler, B. R. Iyer, and G. R. Ricard.
* Merging Multiple Lists on Hierarchical-Memory Multiprocessors.
#pragma GCC diagnostic pop // -Wdeprecated-declarations
- /**
+ /**
* @brief Splits several sorted sequences at a certain global __rank,
* resulting in a splitting point for each sequence.
* The sequences are passed via a sequence of random-access
* __result will be stored in. Each element of the sequence is an
* iterator that points to the first element on the greater part of
* the respective __sequence.
- * @param __comp The ordering functor, defaults to std::less<_Tp>.
+ * @param __comp The ordering functor, defaults to std::less<_Tp>.
*/
template<typename _RanSeqs, typename _RankType, typename _RankIterator,
typename _Compare>
__a[__sample[__j].second] += __n + 1;
for (; __j < __m; __j++)
__b[__sample[__j].second] -= __n + 1;
-
+
// Further refinement.
while (__n > 0)
{
std::vector<std::pair<_ValueType, _SeqNumber> >,
_LexicographicReverse<_ValueType, _SeqNumber, _Compare> >
__pq(__lrcomp);
-
+
for (_SeqNumber __i = 0; __i < __m; __i++)
if (__b[__i] < __ns[__i])
__pq.push(std::make_pair(__S(__i)[__b[__i]], __i));
}
- /**
+ /**
* @brief Selects the element at a certain global __rank from several
* sorted sequences.
*
* @param __offset The rank of the selected element in the global
* subsequence of elements equal to the selected element. If the
* selected element is unique, this number is 0.
- * @param __comp The ordering functor, defaults to std::less.
+ * @param __comp The ordering functor, defaults to std::less.
*/
template<typename _Tp, typename _RanSeqs, typename _RankType,
typename _Compare>
if (__bi1._M_current == __bi1._M_end) // __bi2 is sup
return false;
return !(__bi1.__comp)(*__bi2, *__bi1); // normal compare
- }
+ }
};
template<typename _RAIter, typename _Compare>
// (Settings::multiway_merge_splitting
// == __gnu_parallel::_Settings::EXACT).
- std::vector<_RAIter1>* __offsets =
+ std::vector<_RAIter1>* __offsets =
new std::vector<_RAIter1>[__num_threads];
std::vector<std::pair<_RAIter1, _RAIter1> > __se(__k);
_DifferenceType* __es = new _DifferenceType[__num_samples + 2];
- __equally_split(__sd->_M_starts[__iam + 1] - __sd->_M_starts[__iam],
+ __equally_split(__sd->_M_starts[__iam + 1] - __sd->_M_starts[__iam],
__num_samples + 1, __es);
for (_DifferenceType __i = 0; __i < __num_samples; ++__i)
}
};
- /** @brief Split by sampling. */
+ /** @brief Split by sampling. */
template<typename _RAIter, typename _Compare,
typename _SortingPlacesIterator>
struct _SplitConsistently<false, _RAIter, _Compare, _SortingPlacesIterator>
}
}
};
-
+
template<bool __stable, typename _RAIter, typename _Compare>
struct __possibly_stable_sort
{ };
{
__sd._M_num_threads = __num_threads;
__sd._M_source = __begin;
-
+
__sd._M_temporary = new _ValueType*[__num_threads];
if (!__exact)
template<typename _IIter, typename _OIter, typename _BinaryOper>
_OIter
- adjacent_difference(_IIter, _IIter, _OIter, _BinaryOper,
+ adjacent_difference(_IIter, _IIter, _OIter, _BinaryOper,
__gnu_parallel::sequential_tag);
template<typename _IIter, typename _OIter>
template<typename _IIter, typename _OIter, typename _BinaryOper>
_OIter
- adjacent_difference(_IIter, _IIter, _OIter, _BinaryOper,
+ adjacent_difference(_IIter, _IIter, _OIter, _BinaryOper,
__gnu_parallel::_Parallelism);
template<typename _IIter, typename _OIter, typename _BinaryOper,
template<typename _IIter, typename _OIter, typename _BinaryOper>
_OIter
- __adjacent_difference_switch(_IIter, _IIter, _OIter, _BinaryOper,
- random_access_iterator_tag,
- random_access_iterator_tag,
+ __adjacent_difference_switch(_IIter, _IIter, _OIter, _BinaryOper,
+ random_access_iterator_tag,
+ random_access_iterator_tag,
__gnu_parallel::_Parallelism __parallelism
= __gnu_parallel::parallel_unbalanced);
typename _BinaryFunction1, typename _BinaryFunction2,
typename _Tag1, typename _Tag2>
_Tp
- __inner_product_switch(_IIter1, _IIter1, _IIter2, _Tp, _BinaryFunction1,
+ __inner_product_switch(_IIter1, _IIter1, _IIter2, _Tp, _BinaryFunction1,
_BinaryFunction2, _Tag1, _Tag2);
# pragma omp single
{
__num_threads = omp_get_num_threads();
-
+
__borders = new _DifferenceType[__num_threads + 2];
if (__s.partial_sum_dilation == 1.0f)
if (__s.partition_chunk_share > 0.0)
__chunk_size = std::max<_DifferenceType>
- (__s.partition_chunk_size, (double)__n
+ (__s.partition_chunk_size, (double)__n
* __s.partition_chunk_share / (double)__num_threads);
else
__chunk_size = __s.partition_chunk_size;
* @param __comp Comparator.
*/
template<typename _RAIter, typename _Compare>
- void
- __parallel_nth_element(_RAIter __begin, _RAIter __nth,
+ void
+ __parallel_nth_element(_RAIter __begin, _RAIter __nth,
_RAIter __end, _Compare __comp)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
// Bit results.
int __bits_left;
-
+
static uint32_t
__scale_down(uint64_t __x,
#if _GLIBCXX_SCALE_DOWN_FPU
/** @brief Random shuffle code executed by each thread.
* @param __pus Array of thread-local data records. */
template<typename _RAIter, typename _RandomNumberGenerator>
- void
+ void
__parallel_random_shuffle_drs_pu(_DRSSorterPU<_RAIter,
_RandomNumberGenerator>* __pus)
{
/** @brief Round up to the next greater power of 2.
* @param __x _Integer to round up */
template<typename _Tp>
- _Tp
+ _Tp
__round_up_to_pow2(_Tp __x)
{
if (__x <= 1)
# pragma omp single
{
__pus = new _DRSSorterPU<_RAIter, _RandomNumber>[__num_threads];
-
+
__sd._M_temporaries = new _ValueType*[__num_threads];
__sd._M_dist = new _DifferenceType*[__num_bins + 1];
__sd._M_bin_proc = new _ThreadIndex[__num_bins];
// Distribute according to oracles.
for (_DifferenceType __i = 0; __i < __n; ++__i)
- ::new(&(__target[(__dist0[__oracles[__i]])++]))
+ ::new(&(__target[(__dist0[__oracles[__i]])++]))
_ValueType(*(__begin + __i));
for (int __b = 0; __b < __num_bins; ++__b)
delete[] __dist0;
delete[] __dist1;
delete[] __oracles;
-
+
for (_DifferenceType __i = 0; __i < __n; ++__i)
__target[__i].~_ValueType();
::operator delete(__target);
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
- * @param __off Returned __offsets.
+ * @param __off Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
- __calc_borders(_RAIter __elements, _DifferenceTp __length,
+ __calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
omp_destroy_lock(&__result_lock);
delete[] __splitters;
-
+
// Return iterator on found element.
return (__begin1 + __result);
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wlong-long" // LL literal
-/**
+/**
* @brief Determine at compile(?)-time if the parallel variant of an
* algorithm should be called.
* @param __c A condition that is convertible to bool that is overruled by
struct _Settings
{
_AlgorithmStrategy algorithm_strategy;
-
+
_SortAlgorithm sort_algorithm;
_PartialSumAlgorithm partial_sum_algorithm;
_MultiwayMergeAlgorithm multiway_merge_algorithm;
/// Minimal input size for parallel std::transform.
_SequenceIndex transform_minimal_n;
- /// Minimal input size for unique_copy.
+ /// Minimal input size for unique_copy.
_SequenceIndex unique_copy_minimal_n;
_SequenceIndex workstealing_chunk_size;
static void
set(_Settings&) throw();
- explicit
+ explicit
_Settings() :
algorithm_strategy(heuristic),
sort_algorithm(MWMS),
void
__parallel_sort(_RAIter __begin, _RAIter __end,
_Compare __comp, _Parallelism __parallelism);
-
- /**
+
+ /**
* @brief Choose multiway mergesort, splitting variant at run-time,
* for parallel sorting.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
(__begin, __end, __comp, __parallelism.__get_num_threads());
}
- /**
+ /**
* @brief Choose multiway mergesort with exact splitting,
* for parallel sorting.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
(__begin, __end, __comp, __parallelism.__get_num_threads());
}
- /**
+ /**
* @brief Choose multiway mergesort with splitting by sampling,
* for parallel sorting.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
__parallelism.__get_num_threads());
}
- /**
+ /**
* @brief Choose multiway mergesort with exact splitting,
* for parallel sorting.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
* @param __end End iterator of input sequence.
* @param __comp Comparator.
* @tparam __stable Sort stable.
- * @callgraph
+ * @callgraph
*/
template<bool __stable, typename _RAIter, typename _Compare>
inline void
parallel_taskqueue
};
- /// Strategies for run-time algorithm selection:
+ /// Strategies for run-time algorithm selection:
// force_sequential, force_parallel, heuristic.
enum _AlgorithmStrategy
{
force_parallel
};
- /// Sorting algorithms:
+ /// Sorting algorithms:
// multi-way mergesort, quicksort, load-balanced quicksort.
- enum _SortAlgorithm
- {
- MWMS,
- QS,
- QS_BALANCED
+ enum _SortAlgorithm
+ {
+ MWMS,
+ QS,
+ QS_BALANCED
};
- /// Merging algorithms:
+ /// Merging algorithms:
// bubblesort-alike, loser-tree variants, enum __sentinel.
enum _MultiwayMergeAlgorithm
{
};
/// Partial sum algorithms: recursive, linear.
- enum _PartialSumAlgorithm
- {
- RECURSIVE,
- LINEAR
+ enum _PartialSumAlgorithm
+ {
+ RECURSIVE,
+ LINEAR
};
/// Sorting/merging algorithms: sampling, __exact.
- enum _SplittingAlgorithm
- {
- SAMPLING,
- EXACT
+ enum _SplittingAlgorithm
+ {
+ SAMPLING,
+ EXACT
};
/// Find algorithms:
// growing blocks, equal-sized blocks, equal splitting.
- enum _FindAlgorithm
- {
- GROWING_BLOCKS,
- CONSTANT_SIZE_BLOCKS,
- EQUAL_SPLIT
+ enum _FindAlgorithm
+ {
+ GROWING_BLOCKS,
+ CONSTANT_SIZE_BLOCKS,
+ EQUAL_SPLIT
};
/**
// <http://www.gnu.org/licenses/>.
/** @file tr1/complex.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_COMPLEX_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/ctype.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_CTYPE_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/fenv.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_FENV_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/float.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_FLOAT_H
return __result;
}
};
-
+
template<>
struct _Fnv_hash_base<8>
{
/** @file tr1/hashtable_policy.h
* This is an internal header file, included by other library headers.
- * Do not attempt to use it directly.
+ * Do not attempt to use it directly.
* @headername{tr1/unordered_map, tr1/unordered_set}
*/
namespace std _GLIBCXX_VISIBILITY(default)
-{
+{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
namespace tr1
// Auxiliary types used for all instantiations of _Hashtable: nodes
// and iterators.
-
+
// Nodes, used to wrap elements stored in the hash table. A policy
// template parameter of class template _Hashtable controls whether
// nodes also store a hash code. In some cases (e.g. strings) this
{
_Node_iterator_base(_Hash_node<_Value, __cache>* __p)
: _M_cur(__p) { }
-
+
void
_M_incr()
{ _M_cur = _M_cur->_M_next; }
reference
operator*() const
{ return this->_M_cur->_M_v; }
-
+
pointer
operator->() const
{ return std::__addressof(this->_M_cur->_M_v); }
_Node_iterator&
operator++()
- {
+ {
this->_M_incr();
- return *this;
+ return *this;
}
-
+
_Node_iterator
operator++(int)
- {
+ {
_Node_iterator __tmp(*this);
this->_M_incr();
return __tmp;
reference
operator*() const
{ return this->_M_cur->_M_v; }
-
+
pointer
operator->() const
{ return std::__addressof(this->_M_cur->_M_v); }
_Node_const_iterator&
operator++()
- {
+ {
this->_M_incr();
- return *this;
+ return *this;
}
-
+
_Node_const_iterator
operator++(int)
- {
+ {
_Node_const_iterator __tmp(*this);
this->_M_incr();
return __tmp;
reference
operator*() const
{ return this->_M_cur_node->_M_v; }
-
+
pointer
operator->() const
{ return std::__addressof(this->_M_cur_node->_M_v); }
_Hashtable_iterator&
operator++()
- {
+ {
this->_M_incr();
return *this;
}
-
+
_Hashtable_iterator
operator++(int)
- {
+ {
_Hashtable_iterator __tmp(*this);
this->_M_incr();
return __tmp;
reference
operator*() const
{ return this->_M_cur_node->_M_v; }
-
+
pointer
operator->() const
{ return std::__addressof(this->_M_cur_node->_M_v); }
_Hashtable_const_iterator&
operator++()
- {
+ {
this->_M_incr();
return *this;
}
-
+
_Hashtable_const_iterator
operator++(int)
- {
+ {
_Hashtable_const_iterator __tmp(*this);
this->_M_incr();
return __tmp;
float
max_load_factor() const
- { return _M_max_load_factor; }
+ { return _M_max_load_factor; }
// Return a bucket size no smaller than n.
std::size_t
_M_next_bkt(std::size_t __n) const;
-
+
// Return a bucket count appropriate for n elements
std::size_t
_M_bkt_for_elements(std::size_t __n) const;
-
+
// __n_bkt is current bucket count, __n_elt is current element count,
// and __n_ins is number of elements to be inserted. Do we need to
// increase bucket count? If so, return make_pair(true, n), where n
extern const unsigned long __prime_list[];
// XXX This is a hack. There's no good reason for any of
- // _Prime_rehash_policy's member functions to be inline.
+ // _Prime_rehash_policy's member functions to be inline.
// Return a prime no smaller than n.
inline std::size_t
// iterator that can be dereferenced to get the last prime.
const unsigned long* __p
= std::lower_bound(__prime_list, __prime_list + _S_n_primes - 1, __n);
- _M_next_resize =
+ _M_next_resize =
static_cast<std::size_t>(__builtin_ceil(*__p * _M_max_load_factor));
return *__p;
}
// Finds the smallest prime p such that alpha p > __n_elt + __n_ins.
// If p > __n_bkt, return make_pair(true, p); otherwise return
- // make_pair(false, 0). In principle this isn't very different from
+ // make_pair(false, 0). In principle this isn't very different from
// _M_bkt_for_elements.
// The only tricky part is that we're caching the element count at
return std::make_pair(true,
_M_next_bkt(__builtin_ceil(__min_bkts)));
}
- else
+ else
{
_M_next_resize = static_cast<std::size_t>
(__builtin_ceil(__n_bkt * _M_max_load_factor));
// form pair<T1, T2> and a key extraction policy that returns the
// first part of the pair, the hashtable gets a mapped_type typedef.
// If it satisfies those criteria and also has unique keys, then it
- // also gets an operator[].
+ // also gets an operator[].
template<typename _Key, typename _Value, typename _Ex, bool __unique,
typename _Hashtable>
struct _Map_base { };
-
+
template<typename _Key, typename _Pair, typename _Hashtable>
struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, false, _Hashtable>
{
struct _Map_base<_Key, _Pair, std::_Select1st<_Pair>, true, _Hashtable>
{
typedef typename _Pair::second_type mapped_type;
-
+
mapped_type&
operator[](const _Key& __k);
};
// we have a dummy type as placeholder.
// (2) Whether or not we cache hash codes. Caching hash codes is
// meaningless if we have a ranged hash function.
- // We also put the key extraction and equality comparison function
+ // We also put the key extraction and equality comparison function
// objects here, for convenience.
-
- // Primary template: unused except as a hook for specializations.
+
+ // Primary template: unused except as a hook for specializations.
template<typename _Key, typename _Value,
typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash,
: _M_extract(__ex), _M_eq(__eq), _M_ranged_hash(__h) { }
typedef void* _Hash_code_type;
-
+
_Hash_code_type
_M_hash_code(const _Key& __key) const
{ return 0; }
-
+
std::size_t
_M_bucket_index(const _Key& __k, _Hash_code_type,
std::size_t __n) const
_M_bucket_index(const _Hash_node<_Value, false>* __p,
std::size_t __n) const
{ return _M_ranged_hash(_M_extract(__p->_M_v), __n); }
-
+
bool
_M_compare(const _Key& __k, _Hash_code_type,
_Hash_node<_Value, false>* __n) const
_M_copy_code(_Hash_node<_Value, false>*,
const _Hash_node<_Value, false>*) const
{ }
-
+
void
_M_swap(_Hash_code_base& __x)
{
// No specialization for ranged hash function while caching hash codes.
// That combination is meaningless, and trying to do it is an error.
-
-
+
+
// Specialization: ranged hash function, cache hash codes. This
// combination is meaningless, so we provide only a declaration
- // and no definition.
+ // and no definition.
template<typename _Key, typename _Value,
typename _ExtractKey, typename _Equal,
typename _H1, typename _H2, typename _Hash>
// Specialization: hash function and range-hashing function, no
// caching of hash codes. H is provided but ignored. Provides
- // typedef and accessor required by TR1.
+ // typedef and accessor required by TR1.
template<typename _Key, typename _Value,
typename _ExtractKey, typename _Equal,
typename _H1, typename _H2>
_Hash_code_type
_M_hash_code(const _Key& __k) const
{ return _M_h1(__k); }
-
+
std::size_t
_M_bucket_index(const _Key&, _Hash_code_type __c,
std::size_t __n) const
_H2 _M_h2;
};
- // Specialization: hash function and range-hashing function,
+ // Specialization: hash function and range-hashing function,
// caching hash codes. H is provided but ignored. Provides
// typedef and accessor required by TR1.
template<typename _Key, typename _Value,
_Default_ranged_hash, true>
{
typedef _H1 hasher;
-
+
hasher
hash_function() const
{ return _M_h1; }
: _M_extract(__ex), _M_eq(__eq), _M_h1(__h1), _M_h2(__h2) { }
typedef std::size_t _Hash_code_type;
-
+
_Hash_code_type
_M_hash_code(const _Key& __k) const
{ return _M_h1(__k); }
-
+
std::size_t
_M_bucket_index(const _Key&, _Hash_code_type __c,
std::size_t __n) const
std::swap(_M_h1, __x._M_h1);
std::swap(_M_h2, __x._M_h2);
}
-
+
protected:
_ExtractKey _M_extract;
_Equal _M_eq;
// <http://www.gnu.org/licenses/>.
/** @file tr1/inttypes.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_INTTYPES_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/limits.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_LIMITS_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/math.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_MATH_H
*/
namespace __detail
{
- template<typename _UIntType, int __w,
+ template<typename _UIntType, int __w,
bool = __w < std::numeric_limits<_UIntType>::digits>
struct _Shift
{ static const _UIntType __value = 0; };
*/
template<typename _Engine, typename _Distribution>
struct _Adaptor
- {
+ {
typedef typename _Engine::result_type _Engine_result_type;
typedef typename _Distribution::input_type result_type;
__return_value = result_type(_M_g() - _M_g.min())
/ result_type(_M_g.max() - _M_g.min() + result_type(1));
else
- __return_value = (((_M_g() - _M_g.min())
+ __return_value = (((_M_g() - _M_g.min())
/ (_M_g.max() - _M_g.min()))
* std::numeric_limits<result_type>::max());
return __return_value;
* requirements. <table border=1 cellpadding=10 cellspacing=0>
* <caption align=top>Random Number Generator Requirements</caption>
* <tr><td>To be documented.</td></tr> </table>
- *
+ *
* @{
*/
/**
* Compares two linear congruential random number generator
* objects of the same type for equality.
- *
+ *
* @param __lhs A linear congruential random number generator object.
* @param __rhs Another linear congruential random number generator obj.
*
/**
* @brief The Marsaglia-Zaman generator.
- *
+ *
* This is a model of a Generalized Fibonacci discrete random number
* generator, sometimes referred to as the SWC generator.
*
public:
/** The type of the generated random value. */
typedef _IntType result_type;
-
+
// parameter values
static const _IntType modulus = __m;
static const int long_lag = __r;
* @var _M_x The state of the generator. This is a ring buffer.
* @var _M_carry The carry.
* @var _M_p Current index of x(i - r).
- * @var _M_npows Precomputed negative powers of 2.
+ * @var _M_npows Precomputed negative powers of 2.
*/
template<typename _RealType, int __w, int __s, int __r>
class subtract_with_carry_01
public:
/** The type of the generated random value. */
typedef _RealType result_type;
-
+
// parameter values
static const int word_size = __w;
static const int long_lag = __r;
subtract_with_carry_01(_Gen& __g)
{
this->seed(__g);
- _M_initialize_npows();
+ _M_initialize_npows();
}
/**
// _GLIBCXX_RESOLVE_LIB_DEFECTS
// 508. Bad parameters for ranlux64_base_01.
- typedef subtract_with_carry_01<double, 48, 5, 12> ranlux64_base_01;
+ typedef subtract_with_carry_01<double, 48, 5, 12> ranlux64_base_01;
/**
// constructors and member function
xor_combine()
- : _M_b1(), _M_b2()
+ : _M_b1(), _M_b2()
{ _M_initialize_max(); }
xor_combine(const base1_type& __rng1, const base2_type& __rng2)
class uniform_int
{
__glibcxx_class_requires(_IntType, _IntegerConcept)
-
+
public:
/** The type of the parameters of the distribution. */
typedef _IntType input_type;
explicit
bernoulli_distribution(double __p = 0.5)
: _M_p(__p)
- {
+ {
_GLIBCXX_DEBUG_ASSERT((_M_p >= 0.0) && (_M_p <= 1.0));
}
/**
* @brief A discrete geometric random number distribution.
*
- * The formula for the geometric probability mass function is
+ * The formula for the geometric probability mass function is
* @f$ p(i) = (1 - p)p^{i-1} @f$ where @f$ p @f$ is the parameter of the
* distribution.
*/
/**
* @brief A discrete binomial random number distribution.
*
- * The formula for the binomial probability mass function is
+ * The formula for the binomial probability mass function is
* @f$ p(i) = \binom{n}{i} p^i (1 - p)^{t - i} @f$ where @f$ t @f$
* and @f$ p @f$ are the parameters of the distribution.
*/
_IntType
t() const
{ return _M_t; }
-
+
/**
* Gets the distribution @p p parameter.
*/
/**
* @brief An exponential continuous distribution for random numbers.
*
- * The formula for the exponential probability mass function is
+ * The formula for the exponential probability mass function is
* @f$ p(x) = \lambda e^{-\lambda x} @f$.
*
* <table border=1 cellpadding=10 cellspacing=0>
explicit
exponential_distribution(const result_type& __lambda = result_type(1))
: _M_lambda(__lambda)
- {
+ {
_GLIBCXX_DEBUG_ASSERT(_M_lambda > 0);
}
/**
* @brief A normal continuous distribution for random numbers.
*
- * The formula for the normal probability mass function is
- * @f$ p(x) = \frac{1}{\sigma \sqrt{2 \pi}}
+ * The formula for the normal probability mass function is
+ * @f$ p(x) = \frac{1}{\sigma \sqrt{2 \pi}}
* e^{- \frac{{x - mean}^ {2}}{2 \sigma ^ {2}} } @f$.
*/
template<typename _RealType = double>
normal_distribution(const result_type& __mean = result_type(0),
const result_type& __sigma = result_type(1))
: _M_mean(__mean), _M_sigma(__sigma), _M_saved_available(false)
- {
+ {
_GLIBCXX_DEBUG_ASSERT(_M_sigma > 0);
}
result_type _M_mean;
result_type _M_sigma;
result_type _M_saved;
- bool _M_saved_available;
+ bool _M_saved_available;
};
/**
* @brief A gamma continuous distribution for random numbers.
*
- * The formula for the gamma probability mass function is
+ * The formula for the gamma probability mass function is
* @f$ p(x) = \frac{1}{\Gamma(\alpha)} x^{\alpha - 1} e^{-x} @f$.
*/
template<typename _RealType = double>
explicit
gamma_distribution(const result_type& __alpha_val = result_type(1))
: _M_alpha(__alpha_val)
- {
+ {
_GLIBCXX_DEBUG_ASSERT(_M_alpha > 0);
_M_initialize();
}
class _Sp_counted_base
: public _Mutex_base<_Lp>
{
- public:
+ public:
_Sp_counted_base()
: _M_use_count(1), _M_weak_count(1) { }
-
+
virtual
- ~_Sp_counted_base() // nothrow
+ ~_Sp_counted_base() // nothrow
{ }
-
+
// Called when _M_use_count drops to zero, to release the resources
// managed by *this.
virtual void
_M_dispose() = 0; // nothrow
-
+
// Called when _M_weak_count drops to zero.
virtual void
_M_destroy() // nothrow
{ delete this; }
-
+
virtual void*
_M_get_deleter(const std::type_info&) = 0;
void
_M_add_ref_copy()
{ __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
-
+
void
_M_add_ref_lock();
-
+
void
_M_release() // nothrow
{
}
}
}
-
+
void
_M_weak_add_ref() // nothrow
{ __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
_M_destroy();
}
}
-
+
long
_M_get_use_count() const // nothrow
{
return const_cast<const volatile _Atomic_word&>(_M_use_count);
}
- private:
+ private:
_Sp_counted_base(_Sp_counted_base const&);
_Sp_counted_base& operator=(_Sp_counted_base const&);
}
}
- template<>
+ template<>
inline void
_Sp_counted_base<_S_atomic>::
_M_add_ref_lock()
if (__count == 0)
__throw_bad_weak_ptr();
// Replace the current counter value with the old value + 1, as
- // long as it's not changed meanwhile.
+ // long as it's not changed meanwhile.
}
while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
- true, __ATOMIC_ACQ_REL,
+ true, __ATOMIC_ACQ_REL,
__ATOMIC_RELAXED));
}
// Precondition: __d(__p) must not throw.
_Sp_counted_base_impl(_Ptr __p, _Deleter __d)
: _M_ptr(__p), _M_del(__d) { }
-
+
virtual void
_M_dispose() // nothrow
{ _M_del(_M_ptr); }
-
+
virtual void*
_M_get_deleter(const std::type_info& __ti)
{
return 0;
#endif
}
-
+
private:
_Sp_counted_base_impl(const _Sp_counted_base_impl&);
_Sp_counted_base_impl& operator=(const _Sp_counted_base_impl&);
-
+
_Ptr _M_ptr; // copy constructor must not throw
_Deleter _M_del; // copy constructor must not throw
};
template<_Lock_policy _Lp = __default_lock_policy>
class __shared_count
{
- public:
+ public:
__shared_count()
: _M_pi(0) // nothrow
{ }
-
+
template<typename _Ptr>
__shared_count(_Ptr __p) : _M_pi(0)
{
// Throw bad_weak_ptr when __r._M_get_use_count() == 0.
explicit
__shared_count(const __weak_count<_Lp>& __r);
-
+
~__shared_count() // nothrow
{
if (_M_pi != 0)
_M_pi->_M_release();
}
-
+
__shared_count(const __shared_count& __r)
: _M_pi(__r._M_pi) // nothrow
{
if (_M_pi != 0)
_M_pi->_M_add_ref_copy();
}
-
+
__shared_count&
operator=(const __shared_count& __r) // nothrow
{
}
return *this;
}
-
+
void
_M_swap(__shared_count& __r) // nothrow
{
__r._M_pi = _M_pi;
_M_pi = __tmp;
}
-
+
long
_M_get_use_count() const // nothrow
{ return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
bool
_M_unique() const // nothrow
{ return this->_M_get_use_count() == 1; }
-
+
friend inline bool
operator==(const __shared_count& __a, const __shared_count& __b)
{ return __a._M_pi == __b._M_pi; }
-
+
friend inline bool
operator<(const __shared_count& __a, const __shared_count& __b)
{ return std::less<_Sp_counted_base<_Lp>*>()(__a._M_pi, __b._M_pi); }
-
+
void*
_M_get_deleter(const std::type_info& __ti) const
{ return _M_pi ? _M_pi->_M_get_deleter(__ti) : 0; }
__weak_count()
: _M_pi(0) // nothrow
{ }
-
+
__weak_count(const __shared_count<_Lp>& __r)
: _M_pi(__r._M_pi) // nothrow
{
if (_M_pi != 0)
_M_pi->_M_weak_add_ref();
}
-
+
__weak_count(const __weak_count<_Lp>& __r)
: _M_pi(__r._M_pi) // nothrow
{
if (_M_pi != 0)
_M_pi->_M_weak_add_ref();
}
-
+
~__weak_count() // nothrow
{
if (_M_pi != 0)
_M_pi->_M_weak_release();
}
-
+
__weak_count<_Lp>&
operator=(const __shared_count<_Lp>& __r) // nothrow
{
__tmp->_M_weak_add_ref();
if (_M_pi != 0)
_M_pi->_M_weak_release();
- _M_pi = __tmp;
+ _M_pi = __tmp;
return *this;
}
-
+
__weak_count<_Lp>&
operator=(const __weak_count<_Lp>& __r) // nothrow
{
__r._M_pi = _M_pi;
_M_pi = __tmp;
}
-
+
long
_M_get_use_count() const // nothrow
{ return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
friend inline bool
operator==(const __weak_count<_Lp>& __a, const __weak_count<_Lp>& __b)
{ return __a._M_pi == __b._M_pi; }
-
+
friend inline bool
operator<(const __weak_count<_Lp>& __a, const __weak_count<_Lp>& __b)
{ return std::less<_Sp_counted_base<_Lp>*>()(__a._M_pi, __b._M_pi); }
// Forward declarations.
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __shared_ptr;
-
+
template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
class __weak_ptr;
template<typename _Tp>
class shared_ptr;
-
+
template<typename _Tp>
class weak_ptr;
{
public:
typedef _Tp element_type;
-
+
__shared_ptr()
: _M_ptr(0), _M_refcount() // never throws
{ }
// TODO requires _Deleter CopyConstructible and __d(__p) well-formed
__enable_shared_from_this_helper(_M_refcount, __p, __p);
}
-
+
// generated copy constructor, assignment, destructor are fine.
-
+
template<typename _Tp1>
__shared_ptr(const __shared_ptr<_Tp1, _Lp>& __r)
: _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
reset(_Tp1* __p) // _Tp1 must be complete.
{
// Catch self-reset errors.
- _GLIBCXX_DEBUG_ASSERT(__p == 0 || __p != _M_ptr);
+ _GLIBCXX_DEBUG_ASSERT(__p == 0 || __p != _M_ptr);
__shared_ptr(__p).swap(*this);
}
_GLIBCXX_DEBUG_ASSERT(_M_ptr != 0);
return _M_ptr;
}
-
+
_Tp*
get() const // never throws
{ return _M_ptr; }
// 2.2.3.7 shared_ptr I/O
template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
std::basic_ostream<_Ch, _Tr>&
- operator<<(std::basic_ostream<_Ch, _Tr>& __os,
+ operator<<(std::basic_ostream<_Ch, _Tr>& __os,
const __shared_ptr<_Tp, _Lp>& __p)
{
__os << __p.get();
{
public:
typedef _Tp element_type;
-
+
__weak_ptr()
: _M_ptr(0), _M_refcount() // never throws
{ }
// Generated copy constructor, assignment, destructor are fine.
-
+
// The "obvious" converting constructor implementation:
//
// template<typename _Tp1>
_M_refcount = __r._M_refcount;
return *this;
}
-
+
template<typename _Tp1>
__weak_ptr&
operator=(const __shared_ptr<_Tp1, _Lp>& __r) // never throws
// use_count test above.
return __shared_ptr<element_type, _Lp>();
}
-
+
#else
// Optimization: avoid try/catch overhead when single threaded.
return expired() ? __shared_ptr<element_type, _Lp>()
bool
expired() const // never throws
{ return _M_refcount._M_get_use_count() == 0; }
-
+
void
reset() // never throws
{ __weak_ptr().swap(*this); }
{
protected:
__enable_shared_from_this() { }
-
+
__enable_shared_from_this(const __enable_shared_from_this&) { }
-
+
__enable_shared_from_this&
operator=(const __enable_shared_from_this&)
{ return *this; }
~__enable_shared_from_this() { }
-
+
public:
__shared_ptr<_Tp, _Lp>
shared_from_this()
public:
weak_ptr()
: __weak_ptr<_Tp>() { }
-
+
template<typename _Tp1>
weak_ptr(const weak_ptr<_Tp1>& __r)
: __weak_ptr<_Tp>(__r) { }
{
protected:
enable_shared_from_this() { }
-
+
enable_shared_from_this(const enable_shared_from_this&) { }
enable_shared_from_this&
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdarg.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_STDARG_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdbool.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_STDBOOL_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdint.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_STDINT_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdio.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _TR1_STDIO_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/stdlib.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_STDLIB_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/tgmath.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_TGMATH_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/wchar.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_WCHAR_H
// <http://www.gnu.org/licenses/>.
/** @file tr1/wctype.h
- * This is a TR1 C++ Library header.
+ * This is a TR1 C++ Library header.
*/
#ifndef _GLIBCXX_TR1_WCTYPE_H
// Copyright (C) 2007-2024 Free Software Foundation, Inc.
-//
+//
// This file is part of GCC.
//
// GCC is free software; you can redistribute it and/or modify
namespace __aeabiv1
{
extern "C" int
- __aeabi_atexit (void *object,
+ __aeabi_atexit (void *object,
void (*destructor) (void *),
void *dso_handle) throw ()
{
std::bad_alloc::~bad_alloc() _GLIBCXX_USE_NOEXCEPT { }
-const char*
+const char*
std::bad_alloc::what() const _GLIBCXX_USE_NOEXCEPT
{
return "std::bad_alloc";
#include <new>
-namespace std
+namespace std
{
// From N3639. This was voted in and then back out of C++14, and is now
// just here for backward link compatibility with code built with 4.9.
#include <new>
-namespace std
+namespace std
{
bad_array_new_length::~bad_array_new_length() _GLIBCXX_USE_NOEXCEPT { }
bad_cast::~bad_cast() _GLIBCXX_USE_NOEXCEPT { }
-const char*
+const char*
bad_cast::what() const _GLIBCXX_USE_NOEXCEPT
{
return "std::bad_cast";
bad_typeid::~bad_typeid() _GLIBCXX_USE_NOEXCEPT { }
-const char*
+const char*
bad_typeid::what() const _GLIBCXX_USE_NOEXCEPT
{
return "std::bad_typeid";
void **obj_ptr) const
{
__upcast_result result (__vmi_class_type_info::__flags_unknown_mask);
-
+
__do_upcast (dst_type, *obj_ptr, result);
if (!contained_public_p (result.part2dst))
return false;
__cxa_deleted_virtual(void) __attribute__ ((__noreturn__));
// Exception handling auxiliary.
- void
+ void
__cxa_bad_cast() __attribute__((__noreturn__));
- void
+ void
__cxa_bad_typeid() __attribute__((__noreturn__));
void
__cxa_get_globals_fast() _GLIBCXX_NOTHROW __attribute__ ((__const__));
// Free the space allocated for the primary exception.
- void
+ void
__cxa_free_exception(void*) _GLIBCXX_NOTHROW;
// Throw the exception.
void*
__cxa_begin_catch(void*) _GLIBCXX_NOTHROW;
- void
+ void
__cxa_end_catch();
- void
+ void
__cxa_rethrow() __attribute__((__noreturn__));
// Returns the type_info for the currently handled exception [15.3/8], or
// cxxabi.h subset for cancellation -*- C++ -*-
-
+
// Copyright (C) 2007-2024 Free Software Foundation, Inc.
//
// This file is part of GCC.
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3, or (at your option)
// any later version.
-//
+//
// GCC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
-//
+//
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
#ifdef __cplusplus
namespace __cxxabiv1
-{
- /**
+{
+ /**
* @brief Thrown as part of forced unwinding.
* @ingroup exceptions
*
virtual ~__forced_unwind() throw();
// Prevent catch by value.
- virtual void __pure_dummy() = 0;
+ virtual void __pure_dummy() = 0;
};
}
#endif // __cplusplus
#pragma GCC visibility pop
-#endif // __CXXABI_FORCED_H
+#endif // __CXXABI_FORCED_H
// here, so there's no backwards compatibility problem.
thrown_ptr = __get_object_from_ue (ue_header);
}
-
+
__cxa_type_match_result result = ctm_succeeded;
// Pointer types need to adjust the actual pointer, not
// Remember for end_catch and rethrow.
globals->caughtExceptions = header;
- // ??? No sensible value to return; we don't know what the
+ // ??? No sensible value to return; we don't know what the
// object is, much less where it is in relation to the header.
return 0;
}
abi::__foreign_exception::~__foreign_exception() throw() { }
-const char*
+const char*
std::exception::what() const _GLIBCXX_TXN_SAFE_DYN _GLIBCXX_USE_NOEXCEPT
{
// NB: Another elegant option would be returning typeid(*this).name()
return "std::exception";
}
-const char*
+const char*
std::bad_exception::what() const _GLIBCXX_TXN_SAFE_DYN _GLIBCXX_USE_NOEXCEPT
{
return "std::bad_exception";
_uleb128_t tmp;
tmp = *e;
-
+
// Zero signals the end of the list. If we've not found
// a match by now, then we've failed the specification.
if (tmp == 0)
}
install_context:
-
+
// We can't use any of the cxa routines with foreign exceptions,
// because they all expect ue_header to be a struct __cxa_exception.
// So in that case, call terminate or unexpected directly.
#ifdef __ARM_EABI_UNWINDER__
const _Unwind_Word* e;
_Unwind_Word n;
-
+
e = ((const _Unwind_Word*) info.TType) - handler_switch_value - 1;
// Count the number of rtti objects.
n = 0;
xh_terminate_handler = xh->terminateHandler;
info.ttype_base = (_Unwind_Ptr) xh->catchTemp;
- __try
- { __unexpected (xh->unexpectedHandler); }
- __catch(...)
+ __try
+ { __unexpected (xh->unexpectedHandler); }
+ __catch(...)
{
// Get the exception thrown from unexpected.
const std::type_info &bad_exc = typeid (std::bad_exception);
if (check_exception_spec (&info, &bad_exc, 0, xh_switch_value))
throw std::bad_exception();
-#endif
+#endif
// Otherwise, die.
__terminate (xh_terminate_handler);
void
__cxxabiv1::__terminate (std::terminate_handler handler) throw ()
{
- __try
+ __try
{
handler ();
std::abort ();
- }
- __catch(...)
+ }
+ __catch(...)
{ std::abort (); }
}
_Unwind_Resume_or_Rethrow (&header->unwindHeader);
#endif
#endif
-
+
// Some sort of unwinding error. Note that terminate is a handler.
__cxa_begin_catch (&header->unwindHeader);
}
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 3, or (at your option)
// any later version.
-//
+//
// GCC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
-//
+//
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
exception_ptr(__safe_bool) _GLIBCXX_USE_NOEXCEPT;
#endif
- exception_ptr&
+ exception_ptr&
operator=(const exception_ptr&) _GLIBCXX_USE_NOEXCEPT;
#if __cplusplus >= 201103L
- exception_ptr&
+ exception_ptr&
operator=(exception_ptr&& __o) noexcept
{
exception_ptr(static_cast<exception_ptr&&>(__o)).swap(*this);
~exception_ptr() _GLIBCXX_USE_NOEXCEPT;
- void
+ void
swap(exception_ptr&) _GLIBCXX_USE_NOEXCEPT;
#ifdef _GLIBCXX_EH_PTR_COMPAT
// Copyright (C) 2002-2024 Free Software Foundation, Inc.
-//
+//
// This file is part of GCC.
//
// GCC is free software; you can redistribute it and/or modify
namespace
{
// A single mutex controlling all static initializations.
- static __gnu_cxx::__recursive_mutex* static_mutex;
+ static __gnu_cxx::__recursive_mutex* static_mutex;
typedef char fake_recursive_mutex[sizeof(__gnu_cxx::__recursive_mutex)]
__attribute__ ((aligned(__alignof__(__gnu_cxx::__recursive_mutex))));
namespace
{
// A single condition variable controlling all static initializations.
- static __gnu_cxx::__cond* static_cond;
+ static __gnu_cxx::__cond* static_cond;
// using a fake type to avoid initializing a static class.
typedef char fake_cond_t[sizeof(__gnu_cxx::__cond)]
// | _GLIBCXX_GUARD_WAITING_BIT) and some other threads are waiting until
// it is initialized.
-namespace __cxxabiv1
+namespace __cxxabiv1
{
#ifdef _GLIBCXX_USE_FUTEX
namespace
}
extern "C"
- int __cxa_guard_acquire (__guard *g)
+ int __cxa_guard_acquire (__guard *g)
{
#ifdef __GTHREADS
// If the target can reorder loads, we need to insert a read memory
// This thread should do the initialization.
return 1;
}
-
+
if (expected == guard_bit)
{
// Already initialized.
- return 0;
+ return 0;
}
if (expected == pending_bit)
// Use acquire here.
int newv = expected | waiting_bit;
if (!__atomic_compare_exchange_n(gi, &expected, newv, false,
- __ATOMIC_ACQ_REL,
+ __ATOMIC_ACQ_REL,
__ATOMIC_ACQUIRE))
{
if (expected == guard_bit)
if (expected == 0)
continue;
}
-
+
expected = newv;
}
}
#elif defined(__GTHREAD_HAS_COND)
if (__gthread_active_p())
- {
+ {
mutex_wrapper mw;
set_init_in_progress_flag(g, 0);
// the condition variable.
get_static_cond().broadcast();
return;
- }
+ }
#endif
set_init_in_progress_flag(g, 0);
get_static_cond().broadcast();
return;
- }
+ }
#endif
set_init_in_progress_flag(g, 0);
// Copyright (C) 2011-2024 Free Software Foundation, Inc.
-//
+//
// This file is part of GCC.
//
// GCC is free software; you can redistribute it and/or modify
#include <exception>
-namespace std
+namespace std
{
nested_exception::~nested_exception() noexcept = default;
} // namespace std
if (typeid (*this) != typeid (*thr_type))
return false; // not both same kind of pointers
#endif
-
+
if (!(outer & 1))
// We're not the same and our outer pointers are not all const qualified
// Therefore there must at least be a qualification conversion involved
if (catch_fqual & ~throw_fqual)
/* But not the reverse. */
return false;
-
+
if (tflags & ~__flags)
// We're less qualified.
return false;
-
+
if (!(__flags & __const_mask))
outer &= ~1;
-
+
return __pointer_catch (thrown_type, thr_obj, outer);
}
// thr_type is really a __pointer_to_member_type_info.
const __pointer_to_member_type_info *thrown_type =
static_cast <const __pointer_to_member_type_info *> (thr_type);
-
+
if (*__context != *thrown_type->__context)
return false; // not pointers to member of same class
-
+
return __pbase_type_info::__pointer_catch (thrown_type, thr_obj, outer);
}
return !thrown_type->__pointee->__is_function_p ();
}
#endif
-
+
return __pbase_type_info::__pointer_catch (thrown_type, thr_obj, outer);
}
-// -*- C++ -*-
+// -*- C++ -*-
// Copyright (C) 2000-2024 Free Software Foundation, Inc.
//
// This file is part of GCC.
{
if (__class_type_info::__do_upcast (dst, obj_ptr, result))
return true;
-
+
return __base_type->__do_upcast (dst, obj_ptr, result);
}
// Initial part of a vtable, this structure is used with offsetof, so we don't
// have to keep alignments consistent manually.
-struct vtable_prefix
+struct vtable_prefix
{
// Offset to most derived object.
ptrdiff_t whole_object;
// Additional padding if necessary.
#ifdef _GLIBCXX_VTABLE_PADDING
- ptrdiff_t padding1;
+ ptrdiff_t padding1;
#endif
// Pointer to most derived type_info.
- const __class_type_info *whole_type;
+ const __class_type_info *whole_type;
// Additional padding if necessary.
#ifdef _GLIBCXX_VTABLE_PADDING
- ptrdiff_t padding2;
+ ptrdiff_t padding2;
#endif
// What a class's vptr points to.
- const void *origin;
+ const void *origin;
};
template <typename T>
if (is_virtual)
{
const void *vtable = *static_cast <const void *const *> (addr);
-
+
offset = *adjust_pointer<ptrdiff_t> (vtable, offset);
}
__sub_kind whole2src; // path from most derived object to sub object
__sub_kind dst2src; // path from target to sub object
int whole_details; // details of the whole class hierarchy
-
+
__dyncast_result (int details_ = __vmi_class_type_info::__flags_unknown_mask)
:dst_ptr (NULL), whole2dst (__unknown),
whole2src (__unknown), dst2src (__unknown),
protected:
__dyncast_result(const __dyncast_result&);
-
+
__dyncast_result&
operator=(const __dyncast_result&);
};
// Invokes given handler, dying appropriately if the user handler was
// so inconsiderate as to return.
-extern void __terminate(std::terminate_handler) throw ()
+extern void __terminate(std::terminate_handler) throw ()
__attribute__((__noreturn__));
extern void __unexpected(std::terminate_handler)
__attribute__((__noreturn__));
#else // !__ARM_EABI_UNWINDER__
// This is the primary exception class we report -- "GNUCC++\0".
const _Unwind_Exception_Class __gxx_primary_exception_class
-= ((((((((_Unwind_Exception_Class) 'G'
+= ((((((((_Unwind_Exception_Class) 'G'
<< 8 | (_Unwind_Exception_Class) 'N')
<< 8 | (_Unwind_Exception_Class) 'U')
<< 8 | (_Unwind_Exception_Class) 'C')
// This is the dependent (from std::rethrow_exception) exception class we report
// "GNUCC++\x01"
const _Unwind_Exception_Class __gxx_dependent_exception_class
-= ((((((((_Unwind_Exception_Class) 'G'
+= ((((((((_Unwind_Exception_Class) 'G'
<< 8 | (_Unwind_Exception_Class) 'N')
<< 8 | (_Unwind_Exception_Class) 'U')
<< 8 | (_Unwind_Exception_Class) 'C')
// New abi Support -*- C++ -*-
// Copyright (C) 2000-2024 Free Software Foundation, Inc.
-//
+//
// This file is part of GCC.
//
// GCC is free software; you can redistribute it and/or modify
namespace __cxxabiv1
{
- namespace
+ namespace
{
- struct uncatch_exception
+ struct uncatch_exception
{
uncatch_exception();
~uncatch_exception () { __cxa_begin_catch (&p->unwindHeader); }
-
+
__cxa_exception* p;
private:
}
return base;
}
-
+
extern "C" void *
__cxa_vec_new3(std::size_t element_count,
std::size_t element_size,
char *base = static_cast<char *>(alloc (size));
if (!base)
return base;
-
+
if (padding_size)
{
base += padding_size;
}
return base;
}
-
+
// Construct array.
extern "C" __cxa_vec_ctor_return_type
__cxa_vec_ctor(void *array_address,
{
std::size_t ix = 0;
char *ptr = static_cast<char *>(array_address);
-
+
__try
{
if (constructor)
}
_GLIBCXX_CXA_VEC_CTOR_RETURN (array_address);
}
-
+
// Construct an array by copying.
extern "C" __cxa_vec_ctor_return_type
__cxa_vec_cctor(void *dest_array,
std::size_t ix = 0;
char *dest_ptr = static_cast<char *>(dest_array);
char *src_ptr = static_cast<char *>(src_array);
-
+
__try
{
if (constructor)
- for (; ix != element_count;
+ for (; ix != element_count;
ix++, src_ptr += element_size, dest_ptr += element_size)
constructor(dest_ptr, src_ptr);
}
}
_GLIBCXX_CXA_VEC_CTOR_RETURN (dest_array);
}
-
+
// Destruct array.
extern "C" void
__cxa_vec_dtor(void *array_address,
return;
char* base = static_cast<char *>(array_address);
-
+
if (padding_size)
{
std::size_t element_count = reinterpret_cast<std::size_t *>(base)[-1];
reinterpret_cast<std::size_t *>(array_address)[-2] = element_size;
reinterpret_cast<std::size_t *>(array_address)[-1] = element_count;
return abi::__cxa_vec_ctor (array_address,
- element_count, element_size,
+ element_count, element_size,
constructor, /*destructor=*/NULL);
}
-
+
extern "C" void *
__aeabi_vec_cctor_nocookie_nodtor (void *dest_array,
- void *src_array,
- std::size_t element_size,
+ void *src_array,
+ std::size_t element_size,
std::size_t element_count,
void *(*constructor) (void *, void *))
{
}
extern "C" void *
- __aeabi_vec_new_cookie_noctor (std::size_t element_size,
+ __aeabi_vec_new_cookie_noctor (std::size_t element_size,
std::size_t element_count)
{
- return abi::__cxa_vec_new(element_count, element_size,
+ return abi::__cxa_vec_new(element_count, element_size,
2 * sizeof (std::size_t),
/*constructor=*/NULL, /*destructor=*/NULL);
}
extern "C" void *
- __aeabi_vec_new_nocookie (std::size_t element_size,
+ __aeabi_vec_new_nocookie (std::size_t element_size,
std::size_t element_count,
abi::__cxa_cdtor_type constructor)
{
- return abi::__cxa_vec_new (element_count, element_size, 0, constructor,
+ return abi::__cxa_vec_new (element_count, element_size, 0, constructor,
NULL);
}
extern "C" void *
- __aeabi_vec_new_cookie_nodtor (std::size_t element_size,
+ __aeabi_vec_new_cookie_nodtor (std::size_t element_size,
std::size_t element_count,
abi::__cxa_cdtor_type constructor)
{
- return abi::__cxa_vec_new(element_count, element_size,
+ return abi::__cxa_vec_new(element_count, element_size,
2 * sizeof (std::size_t),
constructor, NULL);
}
extern "C" void *
- __aeabi_vec_new_cookie(std::size_t element_size,
+ __aeabi_vec_new_cookie(std::size_t element_size,
std::size_t element_count,
abi::__cxa_cdtor_type constructor,
abi::__cxa_cdtor_type destructor)
{
- return abi::__cxa_vec_new (element_count, element_size,
+ return abi::__cxa_vec_new (element_count, element_size,
2 * sizeof (std::size_t),
constructor, destructor);
}
-
+
extern "C" void *
- __aeabi_vec_dtor (void *array_address,
+ __aeabi_vec_dtor (void *array_address,
abi::__cxa_cdtor_type destructor,
- std::size_t element_size,
+ std::size_t element_size,
std::size_t element_count)
{
- abi::__cxa_vec_dtor (array_address, element_count, element_size,
+ abi::__cxa_vec_dtor (array_address, element_count, element_size,
destructor);
return reinterpret_cast<std::size_t*> (array_address) - 2;
}
extern "C" void *
- __aeabi_vec_dtor_cookie (void *array_address,
+ __aeabi_vec_dtor_cookie (void *array_address,
abi::__cxa_cdtor_type destructor)
{
if (!array_address)
return NULL;
- abi::__cxa_vec_dtor (array_address,
+ abi::__cxa_vec_dtor (array_address,
reinterpret_cast<std::size_t *>(array_address)[-1],
reinterpret_cast<std::size_t *>(array_address)[-2],
destructor);
return reinterpret_cast<std::size_t*> (array_address) - 2;
}
-
-
+
+
extern "C" void
- __aeabi_vec_delete (void *array_address,
+ __aeabi_vec_delete (void *array_address,
abi::__cxa_cdtor_type destructor)
{
if (!array_address)
}
extern "C" void
- __aeabi_vec_delete3 (void *array_address,
+ __aeabi_vec_delete3 (void *array_address,
abi::__cxa_cdtor_type destructor,
void (*dealloc) (void *, std::size_t))
{
{
if (obj_ptr == src_ptr && *this == *src_type)
return __contained_public;
-
+
for (std::size_t i = __base_count; i--;)
{
if (!__base_info[i].__is_public_p ())
continue; // Not public, can't be here.
-
+
const void *base = obj_ptr;
ptrdiff_t offset = __base_info[i].__offset ();
bool is_virtual = __base_info[i].__is_virtual_p ();
-
+
if (is_virtual)
{
if (src2dst == -3)
continue; // Not a virtual base, so can't be here.
}
base = convert_to_base (base, is_virtual, offset);
-
+
__sub_kind base_kind = __base_info[i].__base_type->__do_find_public_src
(src2dst, base, src_type, src_ptr);
if (contained_p (base_kind))
return base_kind;
}
}
-
+
return __not_contained;
}
{
if (result.whole_details & __flags_unknown_mask)
result.whole_details = __flags;
-
+
if (obj_ptr == src_ptr && *this == *src_type)
{
// The src object we started from. Indicate how we are accessible from
__sub_kind base_access = access_path;
ptrdiff_t offset = __base_info[i].__offset ();
bool is_virtual = __base_info[i].__is_virtual_p ();
-
+
if (is_virtual)
base_access = __sub_kind (base_access | __contained_virtual_mask);
base = convert_to_base (base, is_virtual, offset);
continue;
base_access = __sub_kind (base_access & ~__contained_public_mask);
}
-
+
bool result2_ambig
= __base_info[i].__base_type->__do_dyncast (src2dst, base_access,
dst_type, base,
// which can't be disambiguated
return result2_ambig;
}
-
+
if (!result_ambig && !result.dst_ptr)
{
// Not found anything yet.
// we're ambiguous and fail. If it is in neither, we're ambiguous,
// but don't yet fail as we might later find a third base which does
// contain SRC_PTR.
-
+
__sub_kind new_sub_kind = result2.dst2src;
__sub_kind old_sub_kind = result.dst2src;
-
+
if (contained_p (result.whole2src)
&& (!virtual_p (result.whole2src)
|| !(result.whole_details & __diamond_shaped_mask)))
else
old_sub_kind = dst_type->__find_public_src
(src2dst, result.dst_ptr, src_type, src_ptr);
-
+
if (new_sub_kind >= __not_contained)
;// already calculated
else if (contained_p (old_sub_kind)
new_sub_kind = dst_type->__find_public_src
(src2dst, result2.dst_ptr, src_type, src_ptr);
}
-
+
// Neither sub_kind can be contained_ambig -- we bail out early
// when we find those.
if (contained_p (__sub_kind (new_sub_kind ^ old_sub_kind)))
result_ambig = true;
}
}
-
+
if (result.whole2src == __contained_private)
// We found SRC_PTR as a private non-virtual base, therefore all
// cross casts will fail. We have already found a down cast, if
{
if (__class_type_info::__do_upcast (dst, obj_ptr, result))
return true;
-
+
int src_details = result.src_details;
if (src_details & __flags_unknown_mask)
src_details = __flags;
-
+
for (std::size_t i = __base_count; i--;)
{
__upcast_result result2 (src_details);
ptrdiff_t offset = __base_info[i].__offset ();
bool is_virtual = __base_info[i].__is_virtual_p ();
bool is_public = __base_info[i].__is_public_p ();
-
+
if (!is_public && !(src_details & __non_diamond_repeat_mask))
// original cannot have an ambiguous base, so skip private bases
continue;
if (base)
base = convert_to_base (base, is_virtual, offset);
-
+
if (__base_info[i].__base_type->__do_upcast (dst, base, result2))
{
if (result2.base_type == nonvirtual_base_type && is_virtual)
result2.base_type = __base_info[i].__base_type;
if (contained_p (result2.part2dst) && !is_public)
result2.part2dst = __sub_kind (result2.part2dst & ~__contained_public_mask);
-
+
if (!result.base_type)
{
result = result2;
if (!contained_p (result.part2dst))
return true; // found ambiguously
-
+
if (result.part2dst & __contained_public_mask)
{
if (!(__flags & __non_diamond_repeat_mask))
{
int status = -1;
char *dem = 0;
-
+
dem = __cxa_demangle(name, 0, 0, &status);
fputs("terminate called after throwing an instance of '", stderr);
}
else
fputs("terminate called without an active exception\n", stderr);
-
+
abort();
}