cobol/cdf.o \
cobol/cdf-copy.o \
cobol/cobol1.o \
+ cobol/compare.o \
cobol/convert.o \
cobol/except.o \
cobol/genutil.o \
#define ATTR_TMPURE_NORETURN_NOTHROW_LEAF_COLD_LIST (ECF_TM_PURE|ECF_NORETURN|ECF_NOTHROW|ECF_LEAF|ECF_COLD)
#define ATTR_NORETURN_NOTHROW_LIST (ECF_NORETURN|ECF_NOTHROW)
#define ATTR_NOTHROW_NONNULL_LEAF (ECF_NOTHROW|ECF_LEAF)
+#define ATTR_CONST_NOTHROW_LEAF (ECF_CONST | ECF_NOTHROW | ECF_LEAF)
static void
gfc_define_builtin (const char *name, tree type, enum built_in_function code,
gfc_define_builtin ("__builtin_strcpy", ftype, BUILT_IN_STRCPY,
"strcpy", ATTR_NOTHROW_NONNULL_LEAF);
+
+ ftype = build_function_type_list (uint16_type_node,
+ uint16_type_node,
+ NULL_TREE);
+ gfc_define_builtin ("__builtin_bswap16",
+ ftype,
+ BUILT_IN_BSWAP16,
+ NULL,
+ ATTR_CONST_NOTHROW_LEAF);
+
+ ftype = build_function_type_list (uint32_type_node,
+ uint32_type_node,
+ NULL_TREE);
+ gfc_define_builtin ("__builtin_bswap32",
+ ftype,
+ BUILT_IN_BSWAP32,
+ NULL,
+ ATTR_CONST_NOTHROW_LEAF);
+
+ ftype = build_function_type_list (uint64_type_node,
+ uint64_type_node,
+ NULL_TREE);
+ gfc_define_builtin ("__builtin_bswap64",
+ ftype,
+ BUILT_IN_BSWAP64,
+ NULL,
+ ATTR_CONST_NOTHROW_LEAF);
+
+ ftype = build_function_type_list (unsigned_intTI_type_node,
+ unsigned_intTI_type_node,
+ NULL_TREE);
+ gfc_define_builtin ("__builtin_bswap128",
+ ftype,
+ BUILT_IN_BSWAP128,
+ NULL,
+ ATTR_CONST_NOTHROW_LEAF);
+
+
build_common_builtin_nodes ();
// Make sure this is a supported configuration.
--- /dev/null
+/*
+ * Copyright (c) 2021-2026 Symas Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the Symas Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "cobol-system.h"
+
+#include "coretypes.h"
+#include "tree.h"
+#include "tree-iterator.h"
+#include "stringpool.h"
+#include "diagnostic-core.h"
+#include "target.h"
+
+#include "../../libgcobol/ec.h"
+#include "../../libgcobol/common-defs.h"
+#include "util.h"
+#include "cbldiag.h"
+#include "symbols.h"
+#include "gengen.h"
+#include "inspect.h"
+#include "../../libgcobol/io.h"
+#include "genapi.h"
+#include "genutil.h"
+#include "genmath.h"
+#include "structs.h"
+#include "../../libgcobol/gcobolio.h"
+#include "../../libgcobol/charmaps.h"
+#include "../../libgcobol/valconv.h"
+#include "show_parse.h"
+#include "fold-const.h"
+#include "realmpfr.h"
+#include "compare.h"
+
+static bool
+comparably_numeric(const cbl_refer_t &refer)
+ {
+ // This routine returns TRUE for refers that can be treated as binary
+ // integers for the purpose of comparisons. We handle CHAR through INT128;
+ // floats are broken out separately.
+ bool retval;
+
+ if( refer.refmod.from )
+ {
+ // Anything with a refmod is treated like an alphanumeric, not a number.
+ retval = false;
+ }
+ else
+ {
+ switch(refer.field->type)
+ {
+ case FldNumericBinary:
+ case FldNumericDisplay:
+ case FldPacked:
+ case FldNumericBin5:
+ case FldLiteralN:
+ case FldIndex:
+ case FldPointer:
+ retval = true;
+ break;
+
+ case FldAlphanumeric:
+ {
+ // An FldAlphanumeric flagged as ZERO is a number.
+ retval = (cbl_figconst_t)(refer.field->attr & FIGCONST_MASK) ==
+ zero_value_e;
+ }
+ break;
+
+ default:
+ retval = false;
+ break;
+ }
+ }
+
+ return retval;
+ }
+
+static bool
+comparably_alpha(const cbl_refer_t &refer)
+ {
+ // This routine returns TRUE for refers that can be treated as alphanumeric
+ // strings the purpose of comparisons.
+
+ bool retval;
+
+ switch(refer.field->type)
+ {
+ case FldGroup:
+ case FldAlphanumeric:
+ case FldLiteralA:
+ case FldNumericEdited:
+ case FldAlphaEdited:
+ retval = true;
+ break;
+
+ default:
+ if( refer.refmod.from )
+ {
+ retval = true;
+ }
+ else
+ {
+ retval = false;
+ }
+ break;
+ }
+ return retval;
+ }
+
+enum {MAX_INT128_DIGITS = 38};
+
+static void
+digiter(int &digits, int &rdigits, const cbl_refer_t &refer)
+ {
+ digits = refer.field->data.digits;
+ rdigits = refer.field->data.rdigits;
+
+ if( digits == 0 )
+ {
+ // We are dealing with a pure binary type:
+ switch(refer.field->data.capacity())
+ {
+ case 1:
+ digits = 2;
+ break;
+ case 2:
+ digits = 4;
+ break;
+ case 4:
+ digits = 9;
+ break;
+ case 8:
+ digits = 19;
+ break;
+ case 16:
+ digits = 38;
+ break;
+ default:
+ gcc_unreachable();
+ break;
+ }
+ }
+ if( refer.field->attr & scaled_e )
+ {
+ if( rdigits < 0 )
+ {
+ // This is like 999PPPP with rdigits = -4
+ // So, digits becomes 7, and rdigits becomes 0
+ // Our caller will have to multiply by 10^4 to get the 999 digits into
+ // the right place.
+ digits += rdigits;
+ rdigits = 0;
+ }
+ else
+ {
+ // This is like PPP9999. Digits stays 3, and rdigits becomes 7
+ rdigits += digits;
+ }
+ }
+ }
+
+static int
+total_digits(int &left_rdigits,
+ int &right_rdigits,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // This routine is called when neither parameter is intermediate_e, and thus
+ // we can use the compile-time values:
+ int left_digits=0;
+ int right_digits=0;
+ digiter(left_digits, left_rdigits, left_side);
+ digiter(right_digits, right_rdigits, right_side);
+
+ if( (left_side.field->attr & scaled_e) && left_side.field->data.rdigits < 0)
+ {
+ right_rdigits -= left_side.field->data.rdigits;
+ }
+ if( (right_side.field->attr & scaled_e) && right_side.field->data.rdigits < 0)
+ {
+ left_rdigits -= right_side.field->data.rdigits;
+ }
+
+ // We can reduce the two rdigits values by the common portion of both:
+ int excess_digits = std::min(left_rdigits, right_rdigits);
+
+ left_rdigits -= excess_digits;
+ right_rdigits -= excess_digits;
+
+ // And now we can scale up both left_digits and right_digits by the rdigits
+ // of the other side. Keep in mind that at this point, one of them is
+ // zero:
+
+ left_digits += right_rdigits;
+ right_digits += left_rdigits;
+
+ // Our return value is the larger of those two numbers:
+ int retval = std::max(left_digits, right_digits);
+ return retval;
+ }
+
+static void
+total_digits_tree( tree &left_rdigits,
+ tree &right_rdigits,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // This routine is used when we might have to use the run-time values:
+ left_rdigits = gg_define_int();
+ right_rdigits = gg_define_int();
+
+ if( left_side.field->attr & intermediate_e )
+ {
+ gg_assign( left_rdigits,
+ gg_cast(INT,
+ member(left_side.field->var_decl_node,
+ "rdigits")));
+ }
+ else
+ {
+ gg_assign(left_rdigits,
+ build_int_cst_type(INT,
+ left_side.field->data.rdigits));
+ }
+
+ if( right_side.field->attr & intermediate_e )
+ {
+ gg_assign(right_rdigits,
+ gg_cast(INT,
+ member(right_side.field->var_decl_node,
+ "rdigits")));
+ }
+ else
+ {
+ gg_assign(right_rdigits,
+ build_int_cst_type(INT,
+ right_side.field->data.rdigits));
+ }
+
+ if( (left_side.field->attr & scaled_e) && left_side.field->data.rdigits < 0)
+ {
+ // Left rdigits becomes zero; right_rdigits is augmented to the negative of
+ // left rdigits
+ gg_assign(left_rdigits, integer_zero_node);
+ gg_assign(right_rdigits,
+ gg_add(right_rdigits,
+ build_int_cst_type(INT,
+ -left_side.field->data.rdigits)));
+ }
+ if( (left_side.field->attr & scaled_e) && left_side.field->data.rdigits > 0)
+ {
+ // left_rdigits is augmented by left_digits
+ gg_assign(left_rdigits,
+ gg_add(left_rdigits,
+ build_int_cst_type(INT,
+ left_side.field->data.digits)));
+ }
+
+ if( (right_side.field->attr & scaled_e) && right_side.field->data.rdigits < 0)
+ {
+ // Right rdigits becomes zero; left_rdigits is augmented by the negative of
+ // right_rdigits
+ gg_assign(right_rdigits, integer_zero_node);
+ gg_assign(left_rdigits,
+ gg_add(left_rdigits,
+ build_int_cst_type(INT,
+ -right_side.field->data.rdigits)));
+ }
+ if( (right_side.field->attr & scaled_e) && right_side.field->data.rdigits > 0)
+ {
+ // right_rdigits is augmented by right_digits
+ gg_assign(right_rdigits,
+ gg_add(right_rdigits,
+ build_int_cst_type(INT,
+ right_side.field->data.digits)));
+ }
+
+ // gg_printf("KILROY LEFT %d\n", left_rdigits, NULL_TREE);
+ // gg_printf("KILROY RIGHT %d\n", right_rdigits, NULL_TREE);
+
+ // We can reduce the two rdigits values by the common portion of both. This
+ // will leave one of them at zero
+ IF( left_rdigits, gt_op, right_rdigits )
+ {
+ gg_assign(left_rdigits, gg_subtract(left_rdigits, right_rdigits));
+ gg_assign(right_rdigits, integer_zero_node);
+ }
+ ELSE
+ {
+ gg_assign(right_rdigits, gg_subtract(right_rdigits, left_rdigits));
+ gg_assign(left_rdigits, integer_zero_node);
+ }
+ ENDIF
+ }
+
+static tree
+type_based_on_digits(int digits, bool signable)
+ {
+ tree retval;
+ if( signable )
+ {
+ // if(digits <= 2)
+ // {
+ // retval = SCHAR;
+ // }
+ // else
+ if(digits <= 4)
+ {
+ retval = SHORT;
+ }
+ else if(digits <= 9)
+ {
+ retval = INT;
+ }
+ else if(digits <= 19)
+ {
+ retval = LONG;
+ }
+ else
+ {
+ retval = INT128;
+ }
+ }
+ else
+ {
+ if(digits <= 2)
+ {
+ retval = UCHAR;
+ }
+ else if(digits <= 4)
+ {
+ retval = USHORT;
+ }
+ else if(digits <= 9)
+ {
+ retval = UINT;
+ }
+ else if(digits <= 19)
+ {
+ retval = ULONG;
+ }
+ else
+ {
+ retval = UINT128;
+ }
+ }
+
+ return retval;
+ }
+
+static bool
+numeric_compare(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ /* This routine handles binary integer values that can fit into one, two,
+ four, eight, or sixteen bytes.
+
+ In order to do that, we have to make sure that the normalized comparable
+ values both fit.
+
+ Nomenclature: for short, I will treat a 9(8)v9(3) as 8.3.
+
+ So consider comparing a 32.0 to a 1.9. To normalize them, I would have to
+ multiply the 32.0 by 10^9, which would mean I would be comparing a
+ 32.9 to a 1.9 . This is mathematically correct; the problem is that an
+ int128 can hold only only 38 digits, and thus can't handle the 41 digits
+ of a 32.9.
+
+ So, in this routine I make sure the two values can be normalized into no
+ more than INT128. Values that are too large fall through to a library
+ routine that can handle them, albeit in a slower fashion than we aspire to
+ here. */
+
+ bool compared = false;
+
+ if( refer_is_super_clean(left_side) && refer_is_super_clean(right_side) )
+ {
+ int left_rdigits;
+ int right_rdigits;
+ int ntotal_digits = total_digits( left_rdigits,
+ right_rdigits,
+ left_side,
+ right_side);
+ if( ntotal_digits <= MAX_INT128_DIGITS )
+ {
+ // Our interest is comparison, so we need both left and right to be
+ // big enough to hold ntotal_digits, and we need both to be the same
+ // class of signable. If signables don't match, we use unsigned, and
+ // we check later for a high-order bit to be on.
+ bool mismatched = (left_side.field->attr & signable_e)
+ != (right_side.field->attr & signable_e) ;
+
+ bool are_signed = mismatched || (left_side.field->attr & signable_e) ;
+ tree type = type_based_on_digits(ntotal_digits, are_signed);
+
+ get_binary_value(left, left_side, type);
+ get_binary_value(right, right_side, type);
+ // We have two good binary values, and they are the same size.
+
+ // If they were mismatched in signable, they were both assigned to
+ // signed types. We need to check to see if the one that is
+ // signable was negative. If so, we return 0 and 1 so that the
+ // test comes out right.
+
+ if( (left_side.field->attr & signable_e)
+ && !(right_side.field->attr & signable_e) )
+ {
+ tree signable_type = type_based_on_digits(ntotal_digits, true);
+ IF( gg_cast(signable_type, left),
+ lt_op,
+ gg_cast(signable_type, integer_zero_node) )
+ {
+ gg_assign(left, gg_cast(type, integer_zero_node));
+ gg_assign(right, gg_cast(type, integer_one_node));
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+ else if( !(left_side.field->attr & signable_e)
+ && (right_side.field->attr & signable_e) )
+ {
+ tree signable_type = type_based_on_digits(ntotal_digits, true);
+ IF( gg_cast(signable_type, right),
+ lt_op,
+ gg_cast(signable_type, integer_zero_node) )
+ {
+ gg_assign(left, gg_cast(type, integer_one_node));
+ gg_assign(right, gg_cast(type, integer_zero_node));
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+ // If left_rdigits and right_rdigits are different, then one of the
+ // values needs to be scaled by the other's rdigits:
+
+ static uint64_t powt[] =
+ {
+ 1UL, // 00
+ 10UL, // 01
+ 100UL, // 02
+ 1000UL, // 03
+ 10000UL, // 04
+ 100000UL, // 05
+ 1000000UL, // 06
+ 10000000UL, // 07
+ 100000000UL, // 08
+ 1000000000UL, // 09
+ 10000000000UL, // 10
+ 100000000000UL, // 11
+ 1000000000000UL, // 12
+ 10000000000000UL, // 13
+ 100000000000000UL, // 14
+ 1000000000000000UL, // 15
+ 10000000000000000UL, // 16
+ };
+
+ while(left_rdigits)
+ {
+ // We need to multiply right by 10^left_rdigits
+ int next = std::min(left_rdigits, 16);
+ left_rdigits -= next;
+ gg_assign(right,
+ gg_multiply(right,
+ gg_cast(type,
+ build_int_cst_type(ULONG,
+ powt[next]))));
+ }
+ while(right_rdigits)
+ {
+ // We need to multiply left by 10^right_rdigits
+ int next = std::min(right_rdigits, 9);
+ right_rdigits -= next;
+ gg_assign(left,
+ gg_multiply(left,
+ gg_cast(type,
+ build_int_cst_type(ULONG,
+ powt[next]))));
+ }
+ }
+ compared = true;
+ }
+ else
+ {
+ // One or both are not in working-storage, so we have to use run-time
+ // capacities and rdigits.
+
+ // We will be needing the run-time left_rdigits and right_rdigits
+ tree left_rdigits;
+ tree right_rdigits;
+ total_digits_tree(left_rdigits,
+ right_rdigits,
+ left_side,
+ right_side);
+
+ // Our interest is comparison.
+ tree type = INT128;
+
+ get_binary_value(left, left_side, type);
+ get_binary_value(right, right_side, type);
+
+ // We have two good binary values, and they are the same size.
+
+ // If they were mismatched in signable, they were both assigned to
+ // signed types. We need to check to see if the one that is
+ // signable was negative. If so, we return 0 and 1 so that the
+ // test comes out right.
+
+ if( (left_side.field->attr & signable_e)
+ && !(right_side.field->attr & signable_e) )
+ {
+ tree signable_type = INT128;
+ IF( gg_cast(signable_type, left),
+ lt_op,
+ gg_cast(signable_type, integer_zero_node) )
+ {
+ gg_assign(left, gg_cast(type, integer_zero_node));
+ gg_assign(right, gg_cast(type, integer_one_node));
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+ else if( !(left_side.field->attr & signable_e)
+ && (right_side.field->attr & signable_e) )
+ {
+ tree signable_type = INT128;
+ IF( gg_cast(signable_type, right),
+ lt_op,
+ gg_cast(signable_type, integer_zero_node) )
+ {
+ gg_assign(left, gg_cast(type, integer_one_node));
+ gg_assign(right, gg_cast(type, integer_zero_node));
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+
+ // To normalize the positions of decimal points, each number has to be
+ // multiplied by the rdigits of the other
+ gg_assign(right,
+ gg_multiply(right,
+ gg_call_expr(INT128,
+ "__gg__power_of_ten",
+ left_rdigits,
+ NULL_TREE)));
+ gg_assign(left,
+ gg_multiply(left,
+ gg_call_expr(INT128,
+ "__gg__power_of_ten",
+ right_rdigits,
+ NULL_TREE)));
+ // The left and right values are ready to be compared
+ compared = true;
+ }
+
+ return compared;
+ }
+
+static void
+alpha_compare_figconst( tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // Call this when you know the right side is a figconst or refer.all
+ cbl_figconst_t figconst_right
+ = (cbl_figconst_t)(right_side.field->attr & FIGCONST_MASK);
+
+ tree location_left;
+ tree length_left;
+
+ get_location(location_left, left_side);
+ get_length(length_left, left_side);
+
+ const charmap_t *charmap_left =
+ __gg__get_charmap(left_side.field->codeset.encoding);
+ charmap_t *charmap_right =
+ __gg__get_charmap(right_side.field->codeset.encoding);
+ cbl_char_t char_right = charmap_right->figconst_character(figconst_right);
+
+ size_t nbytes;
+ char *converted;
+ if( figconst_right )
+ {
+ // Comparing an alphanumeric to a figconst
+ // We need to convert the char_right to the left's encoding:
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ &char_right,
+ 1,
+ &nbytes);
+ }
+ else
+ {
+ // Comparing an alphanumeric to ALL <literal>
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ right_side.field->data.initial,
+ right_side.field->data.capacity(),
+ &nbytes);
+ }
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_call(VOID,
+ "__gg__compare_string_all",
+ gg_get_address_of(left),
+ location_left,
+ length_left,
+ build_int_cst_type(INT, charmap_left->stride()),
+ build_string_literal(nbytes, converted),
+ build_int_cst_type(SIZE_T, nbytes),
+ NULL_TREE);
+ }
+
+static bool
+alpha_compare(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ /* This routine handles these cases:
+
+ alpha1 op alpha2
+ national1 op display2
+ display1 op national2 ***
+ alpha op literal
+ literal op alpha ***
+ alpha op all literal
+ all literal op alpha ***
+ alpha op figconst
+ figconst op alpha ***
+ figconst op figconst
+
+ The four cases marked *** are handled by this routine being called a
+ second time with the parameters swapped. */
+
+ bool retval = false;
+
+ charmap_t *charmap_left = __gg__get_charmap(left_side.field->codeset.encoding);
+ cbl_figconst_t figconst_left
+ = (cbl_figconst_t)(left_side.field->attr & FIGCONST_MASK);
+ cbl_char_t char_left = charmap_left->figconst_character(figconst_left);
+
+ charmap_t *charmap_right = __gg__get_charmap(right_side.field->codeset.encoding);
+ cbl_figconst_t figconst_right
+ = (cbl_figconst_t)(right_side.field->attr & FIGCONST_MASK);
+ cbl_char_t char_right = charmap_right->figconst_character(figconst_right);
+
+ tree location_left;
+ tree location_right;
+ tree length_left;
+ tree length_right;
+
+ if( figconst_left && figconst_right )
+ {
+ // This is a degenerate case. Technically it isn't allowed; ISO says that
+ // comparing two literals is a syntax error. But that's not my department.
+ if( char_left < char_right )
+ {
+ left = integer_minus_one_node;
+ }
+ else if( char_left > char_right)
+ {
+ left = integer_one_node;
+ }
+ else
+ {
+ left = integer_zero_node;
+ }
+ right = integer_zero_node;
+ retval = true;
+ goto done;
+ }
+ else if( figconst_right || right_side.all )
+ {
+ alpha_compare_figconst(left, right, left_side, right_side);
+ retval = true;
+ goto done;
+ }
+ else if( !figconst_left
+ && !figconst_right
+ && left_side.field->codeset.encoding
+ != right_side.field->codeset.encoding )
+ {
+ /* We have two different encodings. The logic chain is:
+ If they have different strides, we convert to the larger one, else
+ If one is national, we convert to it, else
+ If one is display, we convert to it, else
+ We pick one at random.
+
+ In this chain, the winner has to be on the left. If the winner is on
+ the right, we don't do it now. It will get caught during the second
+ call to this routine, with the parameters flipped left-for-right. */
+ bool do_it = false;
+ if( charmap_left->stride() != charmap_right->stride() )
+ {
+ // The strides are different. Convert when the bigger one is on the
+ // left.
+ do_it = charmap_left->stride() > charmap_right->stride();
+ }
+ else if( left_side.field->codeset.encoding == __gg__national_encoding
+ || right_side.field->codeset.encoding == __gg__national_encoding )
+ {
+ // One or the other is national. We convert when the left one is
+ // national.
+ do_it = left_side.field->codeset.encoding == __gg__national_encoding;
+ }
+ else if( left_side.field->codeset.encoding == __gg__display_encoding
+ || right_side.field->codeset.encoding == __gg__display_encoding )
+ {
+ // One or the other is display. We convert when the left one is
+ // display.
+ do_it = left_side.field->codeset.encoding == __gg__display_encoding;
+ }
+ else
+ {
+ // They are the same size, and neither is national or display. This is
+ // highly weird. The only thing I can think causing it would be two
+ // files with different speci fied encodings. Just have the left side
+ // win.
+ do_it = true;
+ }
+ if( do_it )
+ {
+ // Call the library routine that converts the right side to the left
+ // encoding.
+ get_location(location_left, left_side);
+ get_length(length_left, left_side);
+ get_location(location_right, right_side);
+ get_length(length_right, right_side);
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_call(VOID,
+ "__gg_compare_string_different",
+ gg_get_address_of(left),
+ location_left,
+ length_left,
+ build_int_cst_type(INT, left_side.field->codeset.encoding),
+ location_right,
+ length_right,
+ build_int_cst_type(INT, right_side.field->codeset.encoding),
+ NULL_TREE);
+ retval = true;
+ goto done;
+ }
+ }
+ else if( !figconst_left
+ && !figconst_right
+ && left_side.field->codeset.encoding
+ == right_side.field->codeset.encoding )
+ {
+ /* The two encodings are the same. */
+
+ // When the difference in lengths of the two strings is bigger than the
+ // magic number, we call the library routine. The library routine uses
+ // a const string of spaces and memcmp to handle the inferred trailing
+ // spaces of the shorter string. So, the magic number represents the
+ // crossover of the time for a loop here versus the time for a call to
+ // the library. I haven't made any effort to find the best value.
+ // R.J.Dubner; 2026-05-08
+ static const long MAGIC_NUMBER = 16;
+
+ // We are going to need the space character in this encoding space:
+ cbl_char_t space_char = charmap_left->mapped_character(ascii_space);
+ const char *the_routine;
+ switch( charmap_left->stride() )
+ {
+ case 1:
+ {
+ // We are single-byte-coded.
+ if( current_function->alphabet_in_use )
+ {
+ // Call the routine that uses the collation table.
+ the_routine = "__gg__compare_string_1";
+ }
+ else
+ {
+ size_t length_l = left_side.field->data.capacity();
+ size_t length_r = right_side.field->data.capacity();
+ if( refer_is_super_clean(left_side)
+ && refer_is_super_clean(right_side)
+ && std::abs( static_cast<long>(length_l)
+ - static_cast<long>(length_r)) <= MAGIC_NUMBER )
+ {
+ // There is no collation table in use, we are single-byte encoded,
+ // and both variables are in working storage at known locations and
+ // with known lengths. We can build code that is extremely
+ // efficient.
+ get_location(location_left, left_side);
+ get_location(location_right, right_side);
+ left = gg_define_int();
+ right = gg_define_int(0);
+ size_t length = std::min(length_l, length_r);
+ gg_assign(left,
+ gg_memcmp(location_left,
+ location_right,
+ build_int_cst_type(SIZE_T,
+ length)));
+ if( length_l > length_r )
+ {
+ // We have a LEFT excess that needs to be compared to the space
+ // char.
+ IF( left, eq_op, integer_zero_node )
+ {
+ tree count = gg_define_int();
+ gg_assign(count, build_int_cst_type(INT,
+ length));
+ WHILE( count, lt_op, build_int_cst_type(INT,
+ length_l) )
+ {
+ IF( gg_indirect(location_left, count),
+ ne_op,
+ build_int_cst_type(UCHAR, space_char) )
+ {
+ // We have a difference. We need to calculate +1/-1
+ IF( gg_indirect(location_left, count),
+ gt_op,
+ build_int_cst_type(UCHAR, space_char) )
+ {
+ gg_assign(left, integer_one_node);
+ }
+ ELSE
+ {
+ gg_assign(left, integer_minus_one_node);
+ }
+ ENDIF
+ // Force the end of the loop:
+ gg_assign(count, build_int_cst_type(INT,
+ length_l));
+ }
+ ELSE
+ {
+ // The *left is a space; keep going
+ }
+ ENDIF
+ gg_increment(count);
+ }
+ WEND
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+
+ if( length_l < length_r )
+ {
+ // We have a RIGHT excess that needs to be compared to the space
+ // char.
+ IF( left, eq_op, integer_zero_node )
+ {
+ tree count = gg_define_int();
+ gg_assign(count, build_int_cst_type(INT,
+ length));
+ WHILE( count, lt_op, build_int_cst_type(INT,
+ length_r) )
+ {
+ IF( gg_indirect(location_right, count),
+ ne_op,
+ build_int_cst_type(UCHAR, space_char) )
+ {
+ // We have a difference. We need to calculate +1/-1
+ IF( gg_indirect(location_right, count),
+ lt_op,
+ build_int_cst_type(UCHAR, space_char) )
+ {
+ gg_assign(left, integer_one_node);
+ }
+ ELSE
+ {
+ gg_assign(left, integer_minus_one_node);
+ }
+ ENDIF
+ // Force the end of the loop:
+ gg_assign(count, build_int_cst_type(INT,
+ length_r));
+ }
+ ELSE
+ {
+ // The *right is a space; keep going
+ }
+ ENDIF
+ gg_increment(count);
+ }
+ WEND
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+
+ retval = true;
+ goto done;
+ }
+ // Call the routine that uses does straight byte comparison
+ if( charmap_left->is_like_ebcdic() )
+ {
+ the_routine = "__gg__compare_string_1e";
+ }
+ else
+ {
+ the_routine = "__gg__compare_string_1a";
+ }
+ }
+ break;
+ }
+ case 2:
+ {
+ if( current_function->alphabet_in_use )
+ {
+ // Call the routine that uses the collation table.
+ the_routine = "__gg__compare_string_2";
+ }
+ else
+ {
+ // Call the routine that uses does straight short comparison
+ the_routine = "__gg__compare_string_2a";
+ }
+ break;
+ }
+ case 4:
+ {
+ if( current_function->alphabet_in_use )
+ {
+ // Call the routine that uses the collation table.
+ the_routine = "__gg__compare_string_4";
+ }
+ else
+ {
+ // Call the routine that uses does straight short comparison
+ the_routine = "__gg__compare_string_4a";
+ }
+ break;
+ }
+ default:
+ {
+ the_routine = nullptr;
+ gcc_unreachable();
+ break;
+ }
+ }
+ get_location(location_left, left_side);
+ get_length(length_left, left_side);
+ get_location(location_right, right_side);
+ get_length(length_right, right_side);
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_call(VOID,
+ the_routine,
+ gg_get_address_of(left),
+ location_left,
+ length_left,
+ location_right,
+ length_right,
+ build_int_cst_type(INT, space_char),
+ NULL_TREE);
+ retval = true;
+ goto done;
+ }
+
+ done:
+ return retval;
+ }
+
+static bool
+numeric_alpha_compare(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ /* The ISO specification says
+
+ The integer operand is treated as though it were moved, according to the
+ rules of the MOVE statement, to an elementary data item of the same length
+ in terms of character positions as the number of digits in the integer and
+ after that it is compared to the string. */
+
+ bool compared = false;
+
+ const charmap_t *charmap_left =
+ __gg__get_charmap(left_side.field->codeset.encoding);
+
+ charmap_t *charmap_right =
+ __gg__get_charmap(right_side.field->codeset.encoding);
+ cbl_figconst_t figconst_right
+ = (cbl_figconst_t)(right_side.field->attr & FIGCONST_MASK);
+ cbl_char_t char_right = charmap_right->figconst_character(figconst_right);
+
+ if( left_side.field->type == FldLiteralN )
+ {
+ // On the left side, we have data.original. On the right side, we have
+ // some kind of string.
+
+ const char *left_data = left_side.field->data.original();
+ if( *left_data == ascii_minus || *left_data == ascii_plus )
+ {
+ // The rule for moving a numeric to an alphanumeric is that sign
+ // information is discarded.
+ left_data += 1;
+ }
+
+ tree location_left = gg_string_literal(left_data);
+ tree length_left = build_int_cst_type(
+ SIZE_T,
+ strlen(left_data));
+
+ if( figconst_right || right_side.all )
+ {
+ size_t nbytes;
+ char *converted;
+ if( figconst_right )
+ {
+ // Comparing an alphanumeric to a figconst
+ // We need to convert the char_right to the left's encoding:
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ &char_right,
+ 1,
+ &nbytes);
+ }
+ else
+ {
+ // Comparing an alphanumeric to ALL <literal>
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ right_side.field->data.initial,
+ right_side.field->data.capacity(),
+ &nbytes);
+ }
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_call(VOID,
+ "__gg__compare_string_all",
+ gg_get_address_of(left),
+ location_left,
+ length_left,
+ build_int_cst_type(INT, charmap_left->stride()),
+ build_string_literal(nbytes, converted),
+ build_int_cst_type(SIZE_T, nbytes),
+ NULL_TREE);
+ compared = true;
+ }
+ else if( right_side.field->type == FldLiteralA )
+ {
+ // Corner cases. One grows to dislike them. Here we are comparing a
+ // FieldLiteraN to a FldLiteralA
+ left = gg_define_int();
+ right = gg_define_int(0);
+ tree str = gg_string_literal(right_side.field->data.original());
+ tree length = build_int_cst_type(SIZE_T,
+ strlen(right_side.field->data.original()));
+ gg_call( VOID,
+ "__gg__compare_string_1a",
+ gg_get_address_of(left),
+ location_left,
+ length_left,
+ str,
+ length,
+ integer_zero_node,
+ NULL_TREE);
+ compared = true;
+ }
+ }
+ if( !compared )
+ {
+ // The left side is a fixed-point numeric of some kind. First, we pick
+ // up the actual numeric value.
+
+ // The type of the variable should be straightforward. It is not. There
+ // are various considerations in determining the type.
+ tree type;
+ size_t digits = left_side.field->data.digits;
+ tree value;
+ if( digits == 0 )
+ {
+ // This is a pure binary type.
+ type = tree_type_from_size(left_side.field->data.capacity(),
+ left_side.field->attr & signable_e);
+ }
+ else
+ {
+ // There is some specified number of digits in the variable:
+ // But if the variable is scaled, we have to multiply by 10^N, where
+ // N is the number of zeroed P digits:
+ if( left_side.field->data.rdigits < 0 )
+ {
+ digits += -left_side.field->data.rdigits;
+ }
+ type = tree_type_from_digits(digits, left_side.field->attr&signable_e);
+ }
+
+ // We have what we need to get the value:
+ get_binary_value(value, left_side, type);
+
+ // We have corner case to deal with here. When comparing a numeric to a
+ // alphanumeric that is a figconst zero_value_e, we treat the right side
+ // as a numeric zero.
+
+ if( figconst_right == zero_value_e )
+ {
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_assign(left, integer_zero_node);
+ IF( value, lt_op, gg_cast(type, integer_zero_node) )
+ {
+ gg_assign(left, integer_minus_one_node);
+ }
+ ELSE
+ {
+ IF( value, gt_op, gg_cast(type, integer_zero_node) )
+ {
+ gg_assign(left, integer_one_node);
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+ ENDIF
+ compared = true;
+ }
+ else
+ {
+ if( left_side.field->data.rdigits < 0 )
+ {
+ // We need to multiply value by 10^-rdigits
+ gg_assign(value, scale_by_power_of_ten(value,
+ build_int_cst_type(INT,
+ -left_side.field->data.rdigits)));
+ }
+
+ if( left_side.field->attr&signable_e )
+ {
+ // For numeric-alphabetic comparisons, there are no negative values:
+ gg_assign(value, gg_abs(value));
+ }
+
+ left = gg_define_int();
+ right = gg_define_int(0);
+
+ if( figconst_right || right_side.all )
+ {
+ size_t nbytes;
+ char *converted;
+ if( figconst_right )
+ {
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ &char_right,
+ 1,
+ &nbytes);
+ }
+ else
+ {
+ converted = __gg__iconverter(right_side.field->codeset.encoding,
+ left_side.field->codeset.encoding,
+ right_side.field->data.original(),
+ right_side.field->data.capacity(),
+ &nbytes);
+ }
+ left = gg_define_int();
+ right = gg_define_int(0);
+ gg_call(VOID,
+ "__gg__compare_numeric_all",
+ gg_get_address_of(left),
+ gg_cast(UINT128, value),
+ build_int_cst_type(SIZE_T, digits),
+ build_string_literal(nbytes, converted),
+ build_int_cst_type(SIZE_T, nbytes),
+ build_int_cst_type(INT, left_side.field->codeset.encoding),
+ NULL_TREE);
+ }
+ else
+ {
+ tree location_right;
+ tree length_right;
+ get_location(location_right, right_side);
+ get_length(length_right, right_side);
+
+ gg_call(VOID,
+ "__gg__compare_binary_to_string",
+ gg_get_address_of(left),
+ gg_cast(UINT128, value),
+ build_int_cst_type(SIZE_T, digits),
+ location_right,
+ length_right,
+ build_int_cst_type(INT, right_side.field->codeset.encoding),
+ NULL_TREE);
+ }
+ compared = true;
+ }
+ }
+
+ return compared;
+ }
+
+static bool
+addr_of_compare(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // One or the other is addr_of
+ tree type = SIZE_T;
+ tree l;
+ tree r;
+ if( left_side.addr_of && !right_side.addr_of )
+ {
+ get_location(l, left_side);
+ get_binary_value(r, right_side, type);
+ }
+ else if( !left_side.addr_of && right_side.addr_of )
+ {
+ get_binary_value(l, left_side, type);
+ get_location(r, right_side);
+ }
+ else
+ {
+ // They are both addr_of.
+ get_location(l, left_side);
+ get_location(r, right_side);
+ }
+ left = gg_define_int();
+ right = gg_define_int(0);
+
+ gg_assign(left, integer_zero_node);
+ IF( gg_cast(type, l), lt_op, gg_cast(type, r) )
+ {
+ gg_assign(left, integer_minus_one_node);
+ }
+ ELSE
+ {
+ IF( gg_cast(type, l), gt_op, gg_cast(type, r) )
+ {
+ gg_assign(left, integer_one_node);
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
+ ENDIF
+
+ return true;
+ }
+
+static bool
+float_compare(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // left is a float, and if right is also a float it is smaller than left
+ get_binary_value(left, left_side);
+ tree type = TREE_TYPE(left);
+ tree rightv;
+ get_binary_value(rightv, right_side, type);
+ right = gg_define_variable(type);
+ gg_assign(right, gg_cast(type, rightv));
+
+// gg_printf("KILROY %f %f\n",
+// gg_cast(DOUBLE, left),
+// gg_cast(DOUBLE, right),
+// NULL_TREE);
+
+ if( right_side.field->attr & intermediate_e )
+ {
+ tree rdigits = gg_define_variable(INT);
+ gg_assign(rdigits,
+ gg_cast(INT,
+ member(right_side.field->var_decl_node, "rdigits")));
+ gg_assign(right,
+ gg_real_divide(right,
+ gg_cast(type,
+ gg_call_expr(INT128,
+ "__gg__power_of_ten",
+ rdigits,
+ NULL_TREE))));
+ }
+ else
+ {
+ int rdigits = right_side.field->data.rdigits;
+ if( right_side.field->attr & scaled_e )
+ {
+ if( rdigits < 0 )
+ {
+ rdigits = -rdigits;
+ }
+ else
+ {
+ rdigits += right_side.field->data.digits ;
+ gg_assign(right,
+ gg_multiply(right,
+ gg_cast(type,
+ gg_call_expr(INT128,
+ "__gg__power_of_ten",
+ build_int_cst_type(INT, rdigits),
+ NULL_TREE))));
+ rdigits = 0;
+ }
+ }
+ if( rdigits )
+ {
+ gg_assign(right,
+ gg_real_divide(right,
+ gg_cast(type,
+ gg_call_expr(
+ INT128,
+ "__gg__power_of_ten",
+ build_int_cst_type(INT, rdigits),
+ NULL_TREE))));
+ }
+ }
+
+ return true;
+ }
+
+static bool
+compare_class( tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // Left side is FldClass
+ left = gg_define_int();
+ right = gg_define_int(0);
+ tree right_loc;
+ tree right_length;
+ get_location(right_loc, right_side);
+ get_length(right_length, right_side);
+ gg_assign(left,
+ gg_call_expr(INT,
+ "__gg__compare_field_class",
+ gg_get_address_of(right_side.field->var_decl_node),
+ right_loc,
+ right_length,
+ gg_get_address_of(left_side.field->var_decl_node),
+ NULL_TREE));
+ return true;
+ }
+
+void
+cobol_compare_relop(tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side)
+ {
+ // This routine figures out how to compare left_side to right_side, and
+ // returns the trees 'left' and 'right' in numeric form that can be turned
+ // into a conditional expression.
+
+ bool compared = false;
+
+ if( left_side.field->type == FldClass )
+ {
+ compared = compare_class(left, right, left_side, right_side);
+ }
+
+ if( !compared && right_side.field->type == FldClass )
+ {
+ compared = compare_class(right, left, right_side, left_side);
+ }
+
+ if(!compared && (left_side.addr_of || right_side.addr_of) )
+ {
+ compared = addr_of_compare(left, right, left_side, right_side);
+ }
+
+ if( !compared
+ && comparably_numeric(left_side) && comparably_numeric(right_side) )
+ {
+ compared = numeric_compare(left, right, left_side, right_side);
+ }
+
+ if( !compared
+ && comparably_alpha(left_side) && comparably_alpha(right_side) )
+ {
+ compared = alpha_compare(left, right, left_side, right_side);
+ if( !compared )
+ {
+ compared = alpha_compare(right, left, right_side, left_side);
+ }
+ }
+
+ if( !compared
+ && comparably_numeric(left_side) && comparably_alpha(right_side) )
+ {
+ compared = numeric_alpha_compare(left, right, left_side, right_side);
+ }
+
+ if( !compared
+ && comparably_numeric(right_side) && comparably_alpha(left_side) )
+ {
+ compared = numeric_alpha_compare(right, left, right_side, left_side);
+ }
+
+ if( !compared
+ && left_side.field->type == FldFloat
+ && right_side.field->type == FldFloat)
+ {
+ if( left_side.field->data.capacity() >= right_side.field->data.capacity() )
+ {
+ compared = float_compare(left, right, left_side, right_side);
+ }
+ else
+ {
+ compared = float_compare(right, left, right_side, left_side);
+ }
+ }
+
+ if( !compared
+ && (left_side.field->type == FldFloat
+ || right_side.field->type == FldFloat) )
+ {
+ if( left_side.field->type == FldFloat )
+ {
+ compared = float_compare(left, right, left_side, right_side);
+ }
+ else
+ {
+ compared = float_compare(right, left, right_side, left_side);
+ }
+ }
+
+ if( !compared )
+ {
+ gcc_unreachable();
+ }
+ }
--- /dev/null
+/*
+ * Copyright (c) 2021-2026 Symas Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above`
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the Symas Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _COMPARE_H_
+#define _COMPARE_H_
+
+/* The following routine looks at the two_refers. It creates and returns the
+ "left" and "right" var_decl trees, which can be fed directly into one of
+ the six relational expressions like EQ_EXPR, LT_EXPR, and so on. */
+
+void cobol_compare_relop( tree &left,
+ tree &right,
+ const cbl_refer_t &left_side,
+ const cbl_refer_t &right_side);
+
+#endif
#include "show_parse.h"
#include "fold-const.h"
#include "realmpfr.h"
+#include "compare.h"
extern int yylineno;
static tree label_list_back_label;
#ifdef ENABLE_HIJACKING
-#pragma message "HIJACKING IS ENABLED - It should be disabled for release"
+//#pragma message "HIJACKING IS ENABLED - It should be disabled for release"
static bool hijacked = false; // Indicates a DUBNER hijacking is in progress.
static void hijack_for_development(const char *funcname);
static void hijacker();
void
treeplet_fill_source(TREEPLET &treeplet, const cbl_refer_t &refer)
{
- treeplet.pfield = gg_get_address_of(refer.field->var_decl_node);
- treeplet.offset = refer_offset(refer);
- treeplet.length = refer_size_source(refer);
+ if( refer_is_clean(refer) )
+ {
+ treeplet.pfield = gg_get_address_of(refer.field->var_decl_node);
+ treeplet.offset = size_t_zero_node;
+ treeplet.length = refer_size_source(refer);
+ }
+ else
+ {
+ treeplet.pfield = gg_get_address_of(refer.field->var_decl_node);
+ treeplet.offset = refer_offset(refer);
+ treeplet.length = refer_size_source(refer);
+ }
}
tree file_static_variable(tree type, const char *v)
get_power_of_ten(rdigits)))));
// And we need to throw away any digits to the left of the leftmost digits:
- // At least, we need to do so in principl. I am deferring this problem until
+ // At least, we need to do so in principle. I am deferring this problem until
// I understand it better.
// We now have a floating point value that has been multiplied by 10**rdigits
return retval;
}
-static void
-normal_normal_compare(bool debugging,
- tree return_int,
- cbl_refer_t *left_side_ref,
- cbl_refer_t *right_side_ref,
- tree left_side,
- tree right_side )
- {
- Analyze();
-
- // If a value is intermediate_e, then the rdigits can vary at run-time, so
- // we can't rely on the compile-time rdigits.
-
- bool left_intermediate = (left_side_ref->field->attr & intermediate_e);
- bool right_intermediate = (right_side_ref->field->attr & intermediate_e);
-
- if( debugging )
- {
- gg_printf("normal_normal_compare(): left_intermediate/right_intermediate %d/%d\n",
- left_intermediate ? integer_one_node : integer_zero_node ,
- right_intermediate ? integer_one_node : integer_zero_node ,
- NULL_TREE);
- }
-
- if( !left_intermediate && !right_intermediate )
- {
- // Yay! Both sides have fixed rdigit values.
-
- int adjust = get_scaled_rdigits(left_side_ref->field)
- - get_scaled_rdigits(right_side_ref->field);
- if( adjust > 0 )
- {
- // We need to make right_side bigger to match the scale of left_side
- scale_by_power_of_ten_N(right_side, adjust);
- }
- else if( adjust < 0 )
- {
- // We need to make left_side bigger to match the scale of right_side
- scale_by_power_of_ten_N(left_side, -adjust);
- }
- }
- else
- {
- // At least one side is right_intermediate
- bool needs_adjusting;
-
- tree adjust;
- if( !left_intermediate && right_intermediate )
- {
- // left is fixed, right is intermediate
- adjust = gg_define_int();
- gg_assign(adjust,
- build_int_cst_type( INT,
- get_scaled_rdigits(left_side_ref->field)));
-
- gg_assign(adjust,
- gg_subtract(adjust,
- gg_cast(INT,
- member(right_side_ref->field->var_decl_node,
- "rdigits"))));
- needs_adjusting = true;
- }
- else if( left_intermediate && !right_intermediate )
- {
- // left is intermediate, right is fixed
- adjust = gg_define_int();
- gg_assign(adjust, gg_cast(INT, member(left_side_ref->field, "rdigits")));
- gg_assign(adjust,
- gg_subtract(adjust,
- build_int_cst_type( INT,
- get_scaled_rdigits(right_side_ref->field))));
- needs_adjusting = true;
- }
- else // if( left_intermediate && right_intermediate )
- {
- // Both sides are intermediate_e
- adjust = gg_define_int();
- gg_assign(adjust, gg_cast(INT, member(left_side_ref->field, "rdigits")));
- gg_assign(adjust,
- gg_subtract(adjust,
- gg_cast(INT,
- member(right_side_ref->field, "rdigits"))));
- needs_adjusting = true;
- }
-
- if( needs_adjusting )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): The value of adjust is %d\n",
- adjust,
- NULL_TREE);
- }
- IF( adjust, gt_op, integer_zero_node )
- {
- // The right side needs to be scaled up
- scale_by_power_of_ten(right_side, adjust);
- }
- ELSE
- {
- IF( adjust, lt_op, integer_zero_node )
- {
- // The left side needs to be scaled up
- scale_by_power_of_ten(left_side, gg_negate(adjust));
- }
- ELSE
- ENDIF
- }
- ENDIF
- }
- }
-
- if( TREE_TYPE(left_side) != TREE_TYPE(right_side) )
- {
- // One is signed, the other isn't:
- if( left_side_ref->field->attr & signable_e )
- {
- // The left side can be negative. If it is, the return value has to be
- // -1 for left < right
- IF( left_side, lt_op, gg_cast(TREE_TYPE(left_side), integer_zero_node) )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): different types returning -1\n",
- NULL_TREE);
- }
- gg_assign( return_int, integer_minusone_node);
- }
- ELSE
- {
- // Both sides are positive, allowing a direct comparison.
- IF( gg_cast(TREE_TYPE(right_side), left_side), lt_op, right_side )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning -1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_minusone_node);
- }
- ELSE
- {
- IF( gg_cast(TREE_TYPE(right_side), left_side), gt_op, right_side)
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning +1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_one_node);
- }
- ELSE
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning zero\n", NULL_TREE);
- }
- gg_assign( return_int, integer_zero_node);
- }
- ENDIF
- }
- ENDIF
- }
- ENDIF
- }
- else
- {
- // The right side can be negative. If it is, the return value has to be
- // +1 for left > right
- IF( right_side, lt_op, gg_cast(TREE_TYPE(right_side), integer_zero_node) )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): different types returning +1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_one_node);
- }
- ELSE
- {
- // Both sides are positive, allowing a direct comparison.
- IF( left_side, lt_op, gg_cast(TREE_TYPE(left_side), right_side) )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning -1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_minusone_node);
- }
- ELSE
- {
- IF( left_side, gt_op, gg_cast(TREE_TYPE(left_side), right_side) )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning +1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_one_node);
- }
- ELSE
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning zero\n", NULL_TREE);
- }
- gg_assign( return_int, integer_zero_node);
- }
- ENDIF
- }
- ENDIF
- }
- ENDIF
- }
- }
- else
- {
- // Both sides are the same type, allowing a direct comparison.
- IF( left_side, lt_op, right_side )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning -1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_minusone_node);
- }
- ELSE
- {
- IF( left_side, gt_op, right_side )
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning +1\n", NULL_TREE);
- }
- gg_assign( return_int, integer_one_node);
- }
- ELSE
- {
- if( debugging )
- {
- gg_printf("normal_normal_compare(): returning zero\n", NULL_TREE);
- }
- gg_assign( return_int, integer_zero_node);
- }
- ENDIF
- }
- ENDIF
- }
- }
-
-static
-tree
+static tree
tree_type_from_field_type(cbl_field_t *field, size_t &nbytes)
{
/* This routine is used to determine what action is taken with type of a
return retval;
}
-static void
-compare_binary_binary(tree return_int,
- cbl_refer_t *left_side_ref,
- cbl_refer_t *right_side_ref )
- {
- Analyze();
- static const bool debugging = false;
-
- // We know the two sides have binary values that can be extracted.
- tree left_side;
- tree right_side;
-
- // Let's check for the simplified case where both left and right sides are
- // little-endian binary values:
-
- if( is_pure_integer(left_side_ref->field)
- && is_pure_integer(right_side_ref->field) )
- {
- size_t left_bytes;
- tree left_type = tree_type_from_field_type(left_side_ref->field,
- left_bytes);
- size_t right_bytes;
- tree right_type = tree_type_from_field_type(right_side_ref->field,
- right_bytes);
- tree larger;
- if( TREE_INT_CST_LOW(TYPE_SIZE(left_type))
- > TREE_INT_CST_LOW(TYPE_SIZE(right_type)) )
- {
- larger = left_type;
- }
- else
- {
- larger = right_type;
- }
- left_side = get_binary_value_tree(larger,
- NULL,
- *left_side_ref);
- right_side = get_binary_value_tree(larger,
- NULL,
- *right_side_ref);
- IF( left_side, eq_op, right_side )
- {
- gg_assign(return_int, integer_zero_node);
- }
- ELSE
- {
- IF( left_side, lt_op, right_side )
- {
- gg_assign(return_int, integer_minusone_node);
- }
- ELSE
- {
- gg_assign(return_int, integer_one_node);
- }
- ENDIF
- }
- ENDIF
- return;
- }
-
- // Use SIZE128 when we need two 64-bit registers to hold the value. All
- // others fit into 64-bit LONG with pretty much the same efficiency.
-
- size_t left_bytes_needed = get_bytes_needed(left_side_ref->field);
- size_t right_bytes_needed = get_bytes_needed(right_side_ref->field);
-
- if( left_bytes_needed >= SIZE128
- || right_bytes_needed >= SIZE128 )
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): using int128\n", NULL_TREE);
- }
-
- left_side = gg_define_int128();
- right_side = gg_define_int128();
- }
- else
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): using int64\n", NULL_TREE);
- }
- left_side = gg_define_variable( left_side_ref->field->has_attr(signable_e) ? LONG : ULONG );
- right_side = gg_define_variable(right_side_ref->field->has_attr(signable_e) ? LONG : ULONG );
- }
-
- //tree dummy = gg_define_int();
- static tree hilo_left = gg_define_variable(INT, "..cbb_hilo_left", vs_file_static);
- static tree hilo_right = gg_define_variable(INT, "..cbb_hilo_right", vs_file_static);
-
- get_binary_value(left_side,
- NULL,
- left_side_ref->field,
- refer_offset(*left_side_ref),
- hilo_left);
- get_binary_value(right_side,
- NULL,
- right_side_ref->field,
- refer_offset(*right_side_ref),
- hilo_right);
-
- IF( hilo_left, eq_op, integer_one_node )
- {
- // left side is hi-value
- IF( hilo_right, eq_op, integer_one_node )
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): left and right are HIGH-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_zero_node);
- }
- ELSE
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): left is HIGH-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_one_node);
- }
- ENDIF
- }
- ELSE
- {
- // left is not HIGH-VALUE:
- IF( hilo_left, eq_op, integer_minus_one_node )
- {
- // left side is LOW-VALUE
- IF( hilo_right, eq_op, integer_minus_one_node )
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): left and right are LOW-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_zero_node);
- }
- ELSE
- {
- // Right side is not low-value
- if( debugging )
- {
- gg_printf("compare_binary_binary(): left is LOW-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_one_node);
- }
- ENDIF
- }
- ELSE
- {
- // Left side is normal
- IF( hilo_right, eq_op, integer_one_node )
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): right is HIGH-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_minus_one_node);
- }
- ELSE
- {
- IF( hilo_right, eq_op, integer_minus_one_node )
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): right is LOW-VALUE\n", NULL_TREE);
- }
- gg_assign(return_int, integer_one_node);
- }
- ELSE
- {
- if( debugging )
- {
- gg_printf("compare_binary_binary(): left and right are normal\n", NULL_TREE);
- }
- normal_normal_compare(debugging,
- return_int,
- left_side_ref,
- right_side_ref,
- left_side,
- right_side
- );
- }
- ENDIF
- }
- ENDIF
- }
- ENDIF
- }
- ENDIF
- }
-
-#define DEBUG_COMPARE
-
-static void
-cobol_compare( tree return_int,
- cbl_refer_t &left_side_ref,
- cbl_refer_t &right_side_ref )
- {
- Analyze();
-// gg_printf("cobol_compare %s %s \"%s\" \"%s\"\n",
- // gg_string_literal(left_side_ref.field->name),
- // gg_string_literal(right_side_ref.field->name),
- // member(left_side_ref.field, "data"),
- // gg_string_literal(right_side_ref.field->data.original()),
- // NULL_TREE);
-
- CHECK_FIELD(left_side_ref.field);
- CHECK_FIELD(right_side_ref.field);
- // This routine is in support of conditionals in the COBOL program.
- // It takes two arbitrary COBOL variables from the parser and compares them
- // according to a nightmarish set of rules.
-
- // See ISO/IEC 1989:2014(E) section 8.8.4.1.1 (page 153)
-
- // The return_int value is -1 when left_side < right_side
- // 0 left_side == right_side
- // 1 left_side > right_side
-
- bool compared = false;
-
- // In the effort to convert to in-line GIMPLE comparisons, I became flummoxed
- // by comparisons involving REFMODs. This will have to be revisited, but for
- // now I decided to keep using the libgcobol code, which according to NIST
- // works properly.
-
- if( !left_side_ref.refmod.from
- && !left_side_ref.refmod.len
- && !right_side_ref.refmod.from
- && !right_side_ref.refmod.len )
- {
- cbl_refer_t *lefty = &left_side_ref;
- cbl_refer_t *righty = &right_side_ref;
-
- int ntries = 1;
- while( ntries <= 2 )
- {
- switch( lefty->field->type )
- {
- case FldLiteralN:
- {
- switch( righty->field->type )
- {
- case FldLiteralN:
- case FldNumericBinary:
- case FldNumericBin5:
- case FldPacked:
- case FldNumericDisplay:
- case FldIndex:
- compare_binary_binary(return_int, lefty, righty);
- compared = true;
- break;
-
- case FldGroup:
- case FldAlphanumeric:
- {
- // Comparing a FldLiteralN to an alphanumeric. The alphanumeric
- // is encoded in its codeset.encoding, but the FldLiteralN is,
- // in accordance with the rules in cbl_field_t::internalize,
- // encoded in the source-code encoding. The routine we are about
- // to call assumes that the literal string is encoded the same
- // as the alphanumeric, so we have to make it match.
- size_t outlength;
- cbl_encoding_t enc_right = righty->field->codeset.encoding;
- char *converted = __gg__iconverter(
- DEFAULT_SOURCE_ENCODING,
- enc_right,
- lefty->field->data.original(),
- strlen(lefty->field->data.original()),
- &outlength );
- gg_assign( return_int, gg_call_expr(
- INT,
- "__gg__literaln_alpha_compare",
- gg_string_literal(converted),
- gg_get_address_of(righty->field->var_decl_node),
- refer_offset(*righty),
- refer_size_source( *righty),
- build_int_cst_type(INT,
- (righty->all ? REFER_T_MOVE_ALL : 0)),
- NULL_TREE));
- compared = true;
- break;
- }
-
- case FldLiteralA:
- {
- // Comparing a FldLiteralN to an FldLiteralA.
- // lefty->field->data.original() is the numeric string in ASCII.
- // righty->field->data.original() is original alphanumeric
- // string in ASCII.
- int icmp = strcmp(lefty->field->data.original(),
- righty->field->data.original());
- gg_assign(return_int, build_int_cst_type(INT, icmp));
- compared = true;
- break;
- }
-
-
- default:
- break;
- }
- break;
- }
-
- case FldNumericBin5:
- case FldNumericBinary:
- case FldPacked:
- case FldNumericDisplay:
- {
- switch( righty->field->type )
- {
- case FldNumericBin5:
- case FldNumericBinary:
- case FldPacked:
- case FldNumericDisplay:
- {
- compare_binary_binary(return_int, lefty, righty);
- compared = true;
- break;
- }
-
- default:
- break;
- }
- break;
- }
-
- default:
- break;
- }
- if( compared )
- {
- break;
- }
- // We weren't able to compare left/right. Let's see if we understand
- // right/left
- std::swap(lefty, righty);
- ntries += 1;
- }
-
- if( compared && ntries == 2 )
- {
- // We have a successful comparision, but we managed it on the second try,
- // which means our result has the wrong sign. Fix it:
- gg_assign(return_int, gg_negate(return_int));
- }
- }
-
- if( !compared )
- {
- // None of our explicit comparisons up above worked, so we revert to the
- // general case:
- int leftflags = (left_side_ref.all ? REFER_T_MOVE_ALL : 0)
- + (left_side_ref.addr_of ? REFER_T_ADDRESS_OF : 0)
- + (left_side_ref.refmod.from ? REFER_T_REFMOD : 0);
- int rightflags = (right_side_ref.all ? REFER_T_MOVE_ALL : 0)
- + (right_side_ref.addr_of ? REFER_T_ADDRESS_OF : 0)
- + (right_side_ref.refmod.from ? REFER_T_REFMOD : 0);
-
- gg_assign( return_int, gg_call_expr(
- INT,
- "__gg__compare",
- gg_get_address_of(left_side_ref.field->var_decl_node),
- refer_offset(left_side_ref),
- refer_size_source( left_side_ref),
- build_int_cst_type(INT, leftflags),
- gg_get_address_of(right_side_ref.field->var_decl_node),
- refer_offset(right_side_ref),
- refer_size_source( right_side_ref),
- build_int_cst_type(INT, rightflags),
- integer_zero_node,
- NULL_TREE));
- // compared = true; // Commented out to quiet cppcheck
- }
- }
-
static char *
combined_name(const cbl_label_t *label)
{
SET_VAR_DECL(var_decl_call_parameter_lengths , build_array_type(SIZE_T, NULL),
"__gg__call_parameter_lengths");
- SET_VAR_DECL(var_decl_arithmetic_rounds_size , SIZE_T , "__gg__arithmetic_rounds_size");
- SET_VAR_DECL(var_decl_arithmetic_rounds , INT_P , "__gg__arithmetic_rounds");
- SET_VAR_DECL(var_decl_fourplet_flags_size , SIZE_T , "__gg__fourplet_flags_size");
- SET_VAR_DECL(var_decl_fourplet_flags , INT_P , "__gg__fourplet_flags");
-
- SET_VAR_DECL(var_decl_treeplet_1f , cblc_field_pp_type_node , "__gg__treeplet_1f" );
- SET_VAR_DECL(var_decl_treeplet_1o , SIZE_T_P , "__gg__treeplet_1o" );
- SET_VAR_DECL(var_decl_treeplet_1s , SIZE_T_P , "__gg__treeplet_1s" );
- SET_VAR_DECL(var_decl_treeplet_2f , cblc_field_pp_type_node , "__gg__treeplet_2f" );
- SET_VAR_DECL(var_decl_treeplet_2o , SIZE_T_P , "__gg__treeplet_2o" );
- SET_VAR_DECL(var_decl_treeplet_2s , SIZE_T_P , "__gg__treeplet_2s" );
- SET_VAR_DECL(var_decl_treeplet_3f , cblc_field_pp_type_node , "__gg__treeplet_3f" );
- SET_VAR_DECL(var_decl_treeplet_3o , SIZE_T_P , "__gg__treeplet_3o" );
- SET_VAR_DECL(var_decl_treeplet_3s , SIZE_T_P , "__gg__treeplet_3s" );
- SET_VAR_DECL(var_decl_treeplet_4f , cblc_field_pp_type_node , "__gg__treeplet_4f" );
- SET_VAR_DECL(var_decl_treeplet_4o , SIZE_T_P , "__gg__treeplet_4o" );
- SET_VAR_DECL(var_decl_treeplet_4s , SIZE_T_P , "__gg__treeplet_4s" );
SET_VAR_DECL(var_decl_nop , INT , "__gg__nop" );
SET_VAR_DECL(var_decl_main_called , INT , "__gg__main_called" );
SET_VAR_DECL(var_decl_entry_index , SIZE_T , "__gg__entry_index" );
}
#ifdef ENABLE_HIJACKING
- if( strcmp(funcname, "dubner") == 0)
+ if( strcmp(funcname, "dubner_h") == 0)
{
fprintf(stderr, "This is a DUBNER hijacking\n");
hijack_for_development(funcname);
capacity = 1;
}
- value *= sign;
-
- // One last adjustment. The number is signable, so the binary value
- // is going to be treated as twos complement. That means that the highest
- // bit has to be 1 for negative signable numbers, and 0 for positive. If
- // necessary, adjust capacity up by one byte so that the variable fits:
-
- if( capacity < 16 && (attr & signable_e) )
+ value *= sign;
+
+ // One last adjustment. The number is signable, so the binary value
+ // is going to be treated as twos complement. That means that the highest
+ // bit has to be 1 for negative signable numbers, and 0 for positive. If
+ // necessary, adjust capacity up by one byte so that the variable fits:
+
+ if( capacity < 16 && (attr & signable_e) )
+ {
+ FIXED_WIDE_INT(128) mask
+ = wi::set_bit_in_zero<FIXED_WIDE_INT(128)>(capacity * 8 - 1);
+ if( wi::neg_p (value) && (value & mask) == 0 )
+ {
+ capacity *= 2;
+ }
+ else if( !wi::neg_p (value) && (value & mask) != 0 )
+ {
+ capacity *= 2;
+ }
+ }
+
+ return value;
+ }
+
+static tree
+data_decl_type_for(cbl_field_t *field)
+ {
+ // The idea behind this function is to provide a useful data_type for the
+ // the data_decl_node for the field. Where we can get it right, we do, like
+ // for little-endian FldNumericBin3. For other types of 1, 2, 4, 8, or 16
+ // bytes, we alias it to uint8_t, uint16_t, and so on. The idea there is to
+ // be able to create efficient GENERIC for doing moves between identical
+ // types. Otherwise we generate a type for an array of uint8_t.
+ tree retval = NULL_TREE;
+ switch(field->type)
+ {
+ case FldGroup:
+ break;
+ case FldAlphanumeric:
+ break;
+ case FldNumericBinary:
+ break;
+ case FldFloat:
+ if( field->attr & ieeedec_e )
+ {
+ cbl_internal_error("%s: called with the unimplemented %s flag for %s",
+ __func__,
+ "IEEE",
+ field->name);
+ }
+ switch(field->data.capacity())
+ {
+ case 4:
+ retval = FLOAT;
+ break;
+ case 8:
+ retval = DOUBLE;
+ break;
+ case 16:
+ retval = FLOAT128;
+ break;
+ }
+ break;
+ case FldPacked:
+ break;
+ case FldNumericBin5:
+ if( field->attr & signable_e )
+ {
+ // Because it is signable, we handle it here. Unsigned fall through
+ // to the default handler
+ switch(field->data.capacity())
+ {
+ case 1:
+ retval = CHAR;
+ break;
+ case 2:
+ retval = SHORT;
+ break;
+ case 4:
+ retval = INT;
+ break;
+ case 8:
+ retval = LONG;
+ break;
+ case 16:
+ retval = INT128;
+ break;
+ default:
+ cbl_internal_error("%s: called with type %s but strange byte count for %s",
+ __func__,
+ cbl_field_type_str(field->type),
+ field->name);
+ retval = NULL_TREE;
+ break;
+ }
+ }
+ break;
+ case FldNumericDisplay:
+ break;
+ case FldNumericEdited:
+ break;
+ case FldAlphaEdited:
+ break;
+ case FldClass:
+ // Doesn't actually need storage, but we give it one character to avoid
+ // having a NULL data pointer.
+ retval = UCHAR;
+ break;
+ case FldIndex:
+ break;
+ case FldPointer:
+ break;
+ default:
+ cbl_internal_error("%s: called with type %s for %s",
+ __func__,
+ cbl_field_type_str(field->type),
+ field->name);
+ retval = NULL_TREE;
+ break;
+ }
+ // At this point, if there isn't already an assigned type, we specify an
+ // unsigned integer scalar if we can, and otherwise an array of uint8_t.
+
+ if( !retval )
{
- FIXED_WIDE_INT(128) mask
- = wi::set_bit_in_zero<FIXED_WIDE_INT(128)>(capacity * 8 - 1);
- if( wi::neg_p (value) && (value & mask) == 0 )
+ size_t bytes_needed = std::max(field->data.memsize,
+ field->data.capacity());
+ switch(bytes_needed)
{
- capacity *= 2;
- }
- else if( !wi::neg_p (value) && (value & mask) != 0 )
- {
- capacity *= 2;
+ case 1:
+ retval = UCHAR;
+ break;
+ case 2:
+ retval = USHORT;
+ break;
+ case 4:
+ retval = UINT;
+ break;
+ case 8:
+ retval = ULONG;
+ break;
+ case 16:
+ retval = UINT128;
+ break;
+ default:
+ retval = build_array_type_nelts(UCHAR, bytes_needed);
+ break;
}
}
- return value;
+ return retval;
}
+
static void
psa_FldLiteralN(struct cbl_field_t *field )
{
uint64_t alphabet_index = symbol_unique_index(symbol_elem_of(&alphabet));
+ current_function->alphabet_in_use = true;
+
switch(alphabet.encoding)
{
default:
parser_display_internal(file_descriptor, refs[i], DISPLAY_NO_ADVANCE);
}
CHECK_FIELD(refs[n-1].field);
- parser_display_internal(file_descriptor,
- refs[n-1],
- advance ? DISPLAY_ADVANCE : DISPLAY_NO_ADVANCE);
+
+ if( upon
+ && upon->id == SYSPUNCH_e
+ && getenv("ASSEMBLER")
+ && refs[n-1].field
+ && refs[n-1].field->type == FldLiteralA )
+ {
+ // That combination means we want to put the text into the assembly
+ // language. This is a compile-time operation, so the field has to be
+ // a FldLiteralA.
+ gg_insert_into_assemblerf( "%s %s",
+ ASM_COMMENT_START,
+ refs[n-1].field->data.original());
+ }
+ else
+ {
+ parser_display_internal(file_descriptor,
+ refs[n-1],
+ advance ? DISPLAY_ADVANCE : DISPLAY_NO_ADVANCE);
+ }
if( needs_closing )
{
gg_close(file_descriptor);
// We need the just_once state because this routine can be called more than
// once. Usually the parser handles it, but we have a "just-in-case" call
// in parser_end_program() that sometimes is necessary.
- if(just_once && strcmp(current_function->our_name, "hijack") == 0)
+ if(just_once && strcmp(current_function->our_name, "hijack_h") == 0)
{
just_once = false;
fprintf(stderr, "This is a HIJACK BEFORE EXIT scenario.\n");
hijacker();
}
#endif
-
// This is the moral equivalent of a C "return xyz;".
// There cannot be both a non-zero exit status and an exception condition.
// Stash the returning variables for use during parser_return()
current_function->returning = returning;
+ cbl_field_t *return_code = cbl_field_of(symbol_at(return_code_register()));
current_function->var_decl_return =
- gg_indirect(gg_cast(SHORT_P,
- member(cbl_field_of(symbol_at(return_code_register()))->var_decl_node,
- "data")));
+ gg_indirect(gg_cast(SHORT_P,
+ member(return_code->var_decl_node, "data")));
if( gg_trans_unit.function_stack.size() == 1 )
{
{
}
ENDIF
-
current_function->pseudo_return_index =
gg_define_variable(SIZE_T, "_pseudo_return_index", vs_static);
}
}
- switch(logop)
- {
- case and_op:
- case or_op:
- case xor_op:
- case xnor_op:
- CHECK_FIELD(a);
- break;
- default:
- break;
- }
-
// This routine takes two conditionals and a logical operator. From those,
// it creates and returns another conditional:
switch( logop )
{
case and_op:
- gg_assign(tgt->var_decl_node, gg_build_logical_expression(
- a->var_decl_node,
- and_op,
- b->var_decl_node));
+ tgt->var_decl_node = gg_build_logical_expression(
+ a->var_decl_node,
+ and_op,
+ b->var_decl_node);
break;
case or_op:
- gg_assign(tgt->var_decl_node, gg_build_logical_expression(
+ tgt->var_decl_node = gg_build_logical_expression(
a->var_decl_node,
or_op,
- b->var_decl_node));
+ b->var_decl_node);
break;
case not_op:
- gg_assign(tgt->var_decl_node, gg_build_logical_expression(
+ tgt->var_decl_node = gg_build_logical_expression(
NULL,
not_op,
- b->var_decl_node));
+ b->var_decl_node);
break;
case xor_op:
- gg_assign(tgt->var_decl_node, gg_build_logical_expression(
+ tgt->var_decl_node = gg_build_logical_expression(
a->var_decl_node,
xor_op,
- b->var_decl_node));
+ b->var_decl_node);
break;
case xnor_op:
{
- gg_assign( tgt->var_decl_node,
+ tgt->var_decl_node =
gg_build_logical_expression(a->var_decl_node,
xor_op,
- b->var_decl_node));
+ b->var_decl_node);
// I need to negate the result.
- gg_assign(tgt->var_decl_node, gg_build_logical_expression(
+ tgt->var_decl_node = gg_build_logical_expression(
NULL,
not_op,
- tgt->var_decl_node));
+ tgt->var_decl_node);
}
break;
case true_op:
- gg_assign(tgt->var_decl_node, boolean_true_node);
+ tgt->var_decl_node = boolean_true_node;
break;
case false_op:
- gg_assign(tgt->var_decl_node, boolean_false_node);
+ tgt->var_decl_node = boolean_false_node;
break;
}
tgt->name);
}
- static tree comp_res = gg_define_variable(INT, "..pr_comp_res", vs_file_static);
- cobol_compare(comp_res, aref, bref);
-
- // comp_res is negative, zero, position for less-than, equal-to, greater-than
-
- // So, we simply compare the result of the comparison to zero using the relop
- // we were given to turn it into a TRUE/FALSE
- gg_assign( tgt->var_decl_node,
- gg_build_relational_expression( comp_res,
- relop,
- integer_zero_node));
+ tree left;
+ tree right;
+ cobol_compare_relop(left, right, aref, bref);
+ tgt->var_decl_node = gg_build_relational_expression(left,
+ relop,
+ right);
TRACE1
{
TRACE1_INDENT
// This routine builds the relational expression and returns the TREE as
// a conditional:
-
if( tgt->type != FldConditional )
{
cbl_internal_error("%<parser_relop()%> was called with variable %s, "
}
tree tree_a = build_int_cst_type(LONG, avalue);
- static tree tree_b = gg_define_variable(LONG, "..prl_tree_b", vs_file_static);
+ tree tree_b = gg_define_variable(LONG);
get_binary_value( tree_b,
NULL,
bref.field,
refer_offset(bref) );
- static tree comp_res = gg_define_variable(LONG, "..prl_comp_res", vs_file_static);
+ tree comp_res = gg_define_variable(LONG);
gg_assign(comp_res, gg_subtract(tree_a, tree_b));
// comp_res is negative, zero, position for less-than, equal-to, greater-than
gcc_assert(pcbl_index == n_resolveds);
// We have built up an array of integers, and an array of cbl_refer_t.
- build_array_of_treeplets(1, pcbl_index, pcbl_refers.data());
+ tree params = build_array_of_referlets(pcbl_index, pcbl_refers.data());
// Do the actual call:
charmap_t *charmap = __gg__get_charmap(identifier_1.field->codeset.encoding);
"__gg__inspect_format_1_sbc",
backward ? integer_one_node : integer_zero_node,
integers,
+ params,
NULL_TREE);
}
else
"__gg__inspect_format_1",
backward ? integer_one_node : integer_zero_node,
integers,
+ params,
NULL_TREE);
}
}
}
}
- build_array_of_treeplets(1, pcbl_index, pcbl_refers.data());
+ tree params = build_array_of_referlets(pcbl_index, pcbl_refers.data());
// Do the actual call:
gg_call(VOID,
"__gg__inspect_format_2",
backward ? integer_one_node : integer_zero_node,
integers,
+ params,
NULL_TREE);
}
tree control = gg_array_of_bytes(argc, control_bytes);
- build_array_of_treeplets(1, argc, arg1.data());
- build_array_of_treeplets(2, argc, arg2.data());
+ tree ref_arg1 = build_array_of_referlets(argc, arg1.data());
+ tree ref_arg2 = build_array_of_referlets(argc, arg2.data());
gg_call(VOID,
"__gg__substitute",
+ ref_arg1,
+ ref_arg2,
gg_get_address_of(f->var_decl_node),
gg_get_address_of(ref1.field->var_decl_node),
refer_offset(ref1),
store_location_stuff(function_name);
tree ncount = build_int_cst_type(SIZE_T, nrefs);
- build_array_of_fourplets(1, nrefs, refs);
+ tree refers = build_array_of_refers(nrefs, refs);
gg_call(VOID,
function_name,
gg_get_address_of(tgt->var_decl_node),
ncount,
+ refers,
NULL_TREE);
TRACE1
TRACE1_INDENT
TRACE1_REFER("parameter: ", ref1, "")
}
- gg_get_address_of(tgt->var_decl_node);
- gg_get_address_of(ref1.field->var_decl_node);
- refer_offset(ref1);
- refer_size_source(ref1);
gg_call(VOID,
function_name,
gg_assign(bsearch->left, build_int_cst_type(LONG, 1));
depending_on_value(bsearch->right, current);
- // Create the variable that will take the compare result.
- bsearch->compare_result = gg_define_int();
-
// We now jump to the top of the binary testing loop, which comes right
// after the labels where we handle non-equal cases:
gg_append_statement(bsearch->top.go_to);
void
parser_bsearch_when(cbl_label_t* name,
- cbl_refer_t key,
- cbl_refer_t sarg,
+ const cbl_refer_t &key,
+ const cbl_refer_t &sarg,
bool ascending)
{
Analyze();
}
cbl_bsearch_t *bsearch = name->structs.bsearch;
+ tree left;
+ tree right;
if( ascending )
{
- cobol_compare( bsearch->compare_result,
- key,
- sarg );
+ cobol_compare_relop(left, right, key, sarg);
}
else
{
- cobol_compare( bsearch->compare_result,
- sarg,
- key );
+ cobol_compare_relop(left, right, sarg, key);
}
- IF( bsearch->compare_result, lt_op, integer_zero_node )
- // The key is smaller than sarg:
- gg_append_statement(bsearch->too_small.go_to);
- ELSE
- ENDIF
- IF( bsearch->compare_result, gt_op, integer_zero_node )
- // The key is larger than sarg:
- gg_append_statement(bsearch->too_big.go_to);
+ IF( left, lt_op, right )
+ {
+ gg_append_statement(bsearch->too_small.go_to);
+ }
ELSE
+ {
+ IF( left, gt_op, right )
+ {
+ gg_append_statement(bsearch->too_big.go_to);
+ }
+ ELSE
+ {
+ }
+ ENDIF
+ }
ENDIF
// We are at the Goldilocks point. The clause has been satisfied with
tree t_alls = build_string_literal(ndelimited+1, alls);
- build_array_of_treeplets(1, ndelimited, delims.data());
- build_array_of_treeplets(2, noutputs, outputs);
- build_array_of_treeplets(3, noutputs, delimiters);
- build_array_of_treeplets(4, noutputs, counts);
+ tree ref_data = build_array_of_referlets(ndelimited, delims.data());
+ tree ref_outputs = build_array_of_referlets(noutputs, outputs);
+ tree ref_delimiters = build_array_of_referlets(noutputs, delimiters);
+ tree ref_counts = build_array_of_referlets(noutputs, counts);
tree t_overflow = gg_define_int();
gg_assign(t_overflow,
gg_call_expr( INT,
"__gg__unstring",
+ ref_data,
+ ref_outputs,
+ ref_delimiters,
+ ref_counts,
gg_get_address_of(src.field->var_decl_node),
refer_offset(src),
refer_size_source(src),
size_t *integers = static_cast<size_t *>(xmalloc((nsource+1)*sizeof(size_t)));
gcc_assert(integers);
- // Count up how many treeplets we are going to need:
+ // Count up how many referlets we are going to need:
size_t cblc_count = 2; // tgt and pointer
for(size_t i=0; i<nsource; i++)
{
gcc_assert(index_cblc == cblc_count);
tree pintegers = build_array_of_size_t( index_int, integers);
-
- build_array_of_treeplets(1, index_cblc, refers.data());
+ tree referlets = build_array_of_referlets(index_cblc, refers.data());
tree t_overflow = gg_define_int();
gg_assign(t_overflow, gg_call_expr( INT,
"__gg__string",
pintegers,
+ referlets,
NULL_TREE));
gg_free(pintegers);
}
else
{
- // Because no explicit returning value is expected, we call the designated
- // function and assign the return value to our RETURN-CODE
+ // There is no explicit location to assign the returned value.
push_program_state();
- gg_assign(current_function->var_decl_return, gg_cast(SHORT, call_expr));
+ if( dialect_ibm() )
+ {
+ // Because no explicit returning value is expected, we call the
+ // designated function and assign the return value to our RETURN-CODE.
+ gg_assign(current_function->var_decl_return, gg_cast(SHORT, call_expr));
+ }
+ else
+ {
+ // Because it is not IBM, we execute the called function and ignore the
+ // any returned value.
+ gg_append_statement(call_expr);
+ }
pop_program_state();
}
}
else
{
- tree mangled_name = gg_define_variable(CHAR_P);
-
- gg_call(VOID,
- "__gg__just_mangle_name",
- (name.field->var_decl_node
- ? gg_get_address_of(name.field->var_decl_node)
- : null_pointer_node),
- gg_get_address_of( mangled_name),
- NULL_TREE);
-
- gg_printf("WARNING: %s:%d \"CALL %s\" not found"
- " with no \"CALL ON EXCEPTION\" phrase.\n"
- "(You might need -rdynamic or --export-dynamic for symbols in the executable.)\n",
- gg_string_literal(current_filename.back().c_str()),
- build_int_cst_type(INT, CURRENT_LINE_NUMBER),
- mangled_name,
+ // When EC-PROGRAM-NOT-FOUND is not enabled, we issue a warning.
+ const cbl_enabled_exceptions_t&
+ enabled_exceptions( cdf_enabled_exceptions() );
+ if( !enabled_exceptions.match(ec_program_not_found_e) )
+ {
+ tree mangled_name = gg_define_variable(CHAR_P);
+
+ gg_call(VOID,
+ "__gg__just_mangle_name",
+ (name.field->var_decl_node
+ ? gg_get_address_of(name.field->var_decl_node)
+ : null_pointer_node),
+ gg_get_address_of( mangled_name),
NULL_TREE);
+
+ gg_printf("WARNING: %s:%d \"CALL %s\" not found"
+ " with no \"CALL ON EXCEPTION\" phrase.\n"
+ "(You might need -rdynamic or --export-dynamic for symbols in the executable.)\n",
+ gg_string_literal(current_filename.back().c_str()),
+ build_int_cst_type(INT, CURRENT_LINE_NUMBER),
+ mangled_name,
+ NULL_TREE);
+ }
}
}
ENDIF
array_type,
pszdata,
vs_scope);
-//// data_decl_node = null_pointer_node;
free(pszdata);
// This is the holy grail. With the initializer set to gg_pointer_to_array,
tree cobfield = gg_define_variable(cblc_field_type_node, psz, vs_stack);
free(psz);
- tree data = null_pointer_node; // UCHAR_P, "data",
+ tree data = data_area; // UCHAR_P, "data",
tree capacity = build_int_cst_type(SIZE_T, 16); // SIZE_T, "capacity",
tree allocated = build_int_cst_type(SIZE_T, 16); // SIZE_T, "allocated",
tree offset = build_int_cst_type(SIZE_T, 0); // SIZE_T, "offset",
tencoding, // INT, "encoding",
alphabet); // INT, "alphabet",
- if( data_decl_node != null_pointer_node )
- {
- gg_call(VOID,
- "__gg__set_data_member",
- gg_get_address_of(cobfield),
- data_area,
- NULL_TREE);
- }
-
return cobfield;
}
#pragma GCC diagnostic pop
funcname,
NULL_TREE);
- parser_display_literal("You have been hijacked by a program named \"dubner\"");
+ parser_display_literal("You have been hijacked by a program named \"dubner_h\"");
gg_insert_into_assemblerf("%s HIJACKED CODE START", ASM_COMMENT_START);
down. The code here is injected just prior to the parser_exit() stuff
in the COBOL source code. */
- parser_display_literal("You have been hijacked by a program named \"hijack\"");
+ parser_display_literal("You have been hijacked by a program named \"hijack_h\"");
gg_insert_into_assemblerf("%s HIJACKED CODE START", ASM_COMMENT_START);
-#if 0
+ tree foo = gg_define_variable(INT);
+ IF( integer_one_node, eq_op, integer_one_node )
+ {
+ gg_printf("1 is indeed equal to 1\n", NULL_TREE);
+ gg_assign(foo, build_int_cst_type(INT, 123));
+ }
+ ELSE
+ {
+ gg_printf("1 is NOT equal to 1!\n", NULL_TREE);
+ gg_abort();
+ gg_assign(foo, build_int_cst_type(INT, 999));
+ }
+ ENDIF
+ gg_printf("\"foo\" is %d\n", foo, NULL_TREE);
+#if 0
+ // Leave this around for reference; it's how you find variables set up
+ // in WORKING-STORAGE when involved in a hijack.
cbl_field_t *faaa = register_find("aaa");
cbl_field_t *fbbb = register_find("bbb");
cbl_field_t *fddd = register_find("ddd");
}
}
+static tree
+get_reference_to_data(cbl_field_t *field)
+ {
+ // Given a field, we can derive the type of data the field needs to provide.
+ // That field has a field->data_decl_node, which is the starting point for
+ // the reference to the data we calculate.
+ tree retval = NULL_TREE;
+ tree field_type = data_decl_type_for(field);
+ tree data_type = TREE_TYPE(field->data_decl_node);
+ bool field_is_array = TREE_CODE(field_type) == ARRAY_TYPE;
+ bool data_is_array = TREE_CODE(data_type) == ARRAY_TYPE;
+
+ int field_code = TREE_CODE(field_type);
+ int data_code = TREE_CODE(data_type);
+ size_t field_size = TREE_INT_CST_LOW(TYPE_SIZE_UNIT(field_type));
+ size_t data_size = TREE_INT_CST_LOW(TYPE_SIZE_UNIT(data_type));
+
+ if( field_code == data_code && field_size == data_size )
+ {
+ if( !field_is_array )
+ {
+ // The two types are the same and are not ARRAY_TYPE
+ if( field->offset == 0 )
+ {
+ // This is an "ah, that feels good" moment. Getting here means the
+ // field is something like "77 foo pic 9999" and that means the
+ // data_decl_node is exactly what is needed.
+ retval = field->data_decl_node;
+ }
+ else
+ {
+ // We have an offset.
+ if( (field->offset % field_size) == 0 )
+ {
+ // The offset is an integer number of bytes from data_decl_node:
+ size_t index = field->offset % field_size;
+ retval = gg_indirect( gg_cast(build_pointer_type(data_type),
+ gg_get_address_of(field->data_decl_node)),
+ build_int_cst_type(SIZE_T, index));
+ }
+ else
+ {
+ // The offset is some random number of bytes. We need to do a
+ // retval = *(data_type *)((char *)&data_decl_node + offset)
+ tree base = gg_get_address_of(field->data_decl_node);
+ base = gg_cast(UCHAR_P, base);
+ base = gg_add(base, build_int_cst_type(SIZE_T, field->offset));
+ retval = gg_cast(field_type, gg_indirect(base));
+ }
+ }
+ }
+ else
+ {
+ // The two types are the same ARRAY_TYPE
+ retval = gg_cast(UCHAR_P, gg_pointer_to_array(field->data_decl_node));
+ if( field->offset )
+ {
+ retval = gg_add(retval, build_int_cst_type(SIZE_T, field->offset));
+ }
+ }
+ }
+ else if( field_is_array && data_is_array )
+ {
+ // We have two different array types
+ retval = gg_cast(UCHAR_P, gg_pointer_to_array(field->data_decl_node));
+ if( field->offset )
+ {
+ retval = gg_add(retval, build_int_cst_type(SIZE_T, field->offset));
+ }
+ }
+ else if( !field_is_array && !data_is_array )
+ {
+ // The two data types are different, and neither is an array
+ if( field->offset == 0 )
+ {
+ if( field_size == data_size )
+ {
+ // The offset is zero, and the sizes are the same.
+ // This must be something like REDEFINES or the like:
+ retval = gg_cast(field_type, field->data_decl_node);
+ }
+ else
+ {
+ // The sizes are different:
+ // retval = *(data_type *)((char *)&data_decl_node)
+ tree base = gg_get_address_of(field->data_decl_node);
+ retval = gg_indirect(gg_cast(build_pointer_type(field_type), base));
+ }
+ }
+ else
+ {
+ // There is an offset
+ tree base = gg_get_address_of(field->data_decl_node);
+ base = gg_cast(UCHAR_P, base);
+ base = gg_add(base, build_int_cst_type(SIZE_T, field->offset));
+ retval = gg_indirect(gg_cast(build_pointer_type(field_type), base));
+ }
+ }
+ else if( !field_is_array && data_is_array )
+ {
+ // The return is a scalar, but we start from an array.
+ tree base = gg_pointer_to_array(field->data_decl_node);
+ base = gg_cast(UCHAR_P, base);
+ if( field->offset )
+ {
+ base = gg_add(base, build_int_cst_type(SIZE_T, field->offset));
+ }
+ base = gg_cast(build_pointer_type(field_type), base);
+ retval = gg_indirect(base);
+ }
+ else // if( field_is_array !data_is_array )
+ {
+ // The return is an array, but we start from a scalar
+ tree base = gg_get_address_of(field->data_decl_node);
+ base = gg_cast(UCHAR_P, base);
+ if( field->offset )
+ {
+ base = gg_add(base, build_int_cst_type(SIZE_T, field->offset));
+ }
+ retval = base;
+ }
+
+ return retval;
+
+#if 0
+ tree retval;
+ // Our job is to return a reference to the field's data decl node:
+ tree cobol_type = data_decl_type_for(field);
+ if( TREE_CODE(cobol_type) != ARRAY_TYPE )
+ {
+ // This COBOL variable is a scalar type
+ tree data_decl_type = TREE_TYPE(field->data_decl_node);
+ if( cobol_type == data_decl_type )
+ {
+ // Life is relatively easy; the data_decl_node is the type we want to
+ // end up with
+
+ if( field->offset == 0 )
+ {
+ // This is the ideal situation: This is a reference to the 01 or 07
+ // member of a simple elementary variable.
+ retval = field->data_decl_node;
+ }
+ else
+ {
+ // This is slightly less desirable.
+ // Get the base address as a pointer to UCHAR
+ tree base = gg_cast(build_pointer_type(UCHAR),
+ gg_get_address_of(field->var_decl_node));
+ // Add the offset to get a new pointer to UCHAR
+ tree base_addr = gg_add(base,
+ build_int_cst_type(SIZE_T, field->offset));
+
+ // Do the equivalent of *( type *)base_addr;
+ retval = gg_cast(cobol_type,
+ gg_indirect(gg_cast(build_pointer_type(cobol_type),
+ base_addr)));
+ }
+ }
+ else
+ {
+ tree base;
+ if( TREE_CODE(data_decl_type) == ARRAY_TYPE )
+ {
+ // Our source data_decl_node is an array:
+ base = gg_pointer_to_array(field->data_decl_node);
+ }
+ else
+ {
+ // Our source data_decl_node is a scalar:
+ base = gg_get_address_of(field->data_decl_node);
+ }
+ base = gg_cast(build_pointer_type(UCHAR), base);
+ if( field->offset )
+ {
+ // Add the offset to get a new pointer to UCHAR
+ base = gg_add(base,
+ build_int_cst_type(SIZE_T, field->offset));
+ }
+
+ // Do the equivalent of *( type *)base;
+ retval = gg_cast(cobol_type,
+ gg_indirect(gg_cast(build_pointer_type(cobol_type),
+ base)));
+ }
+ }
+ else
+ {
+ // This field is an array type.
+ tree base = gg_pointer_to_array(field->data_decl_node);
+ if( field->offset )
+ {
+ retval = gg_add(base, build_int_cst_type(SIZE_T, field->offset));
+ }
+ else
+ {
+ retval = base;
+ }
+ }
+ return retval;
+#endif
+ }
+
static bool
mh_identical(const cbl_refer_t &destref,
- const cbl_refer_t &sourceref,
- const TREEPLET &tsource)
+ const cbl_refer_t &sourceref)
{
// Check to see if the two variables are identical types, thus allowing
// for a simple byte-for-byte copy of the data areas:
bool moved = false;
- if( destref.field->type == sourceref.field->type
+ if( destref.field->type == sourceref.field->type
&& destref.field->data.capacity() == sourceref.field->data.capacity()
- && destref.field->data.digits == sourceref.field->data.digits
- && destref.field->data.rdigits == sourceref.field->data.rdigits
+ && destref.field->data.digits == sourceref.field->data.digits
+ && destref.field->data.rdigits == sourceref.field->data.rdigits
&& (destref.field->attr & (signable_e|separate_e|leading_e))
== (sourceref.field->attr & (signable_e|separate_e|leading_e))
&& destref.field->codeset.encoding == sourceref.field->codeset.encoding
- && !destref.field->occurs.depending_on
- && !sourceref.field->occurs.depending_on
- && !destref.refmod.from
- && !sourceref.refmod.len
- && !(destref.field->attr & intermediate_e) // variables with variable
- && !(sourceref.field->attr & intermediate_e) // capacities have to be
- && !(destref.field->attr & any_length_e) // handled elsewhere
- && !(sourceref.field->attr & any_length_e)
)
{
- // The source and destination are identical in type
- if( !symbol_find_odo(sourceref.field) )
+ // The source and destination are identical in type and the
+ // Source doesn't have a depending_on clause
+ SHOW_PARSE1
+ {
+ SHOW_PARSE_INDENT
+ SHOW_PARSE_TEXT("mh_identical()");
+ }
+ if( refer_is_super_clean(destref)
+ && refer_is_super_clean(sourceref) )
{
- Analyze();
- // Source doesn't have a depending_on clause
- SHOW_PARSE1
+ // They are identical, and they have no subscripts
+
+ tree source = get_reference_to_data(sourceref.field);
+ tree dest = get_reference_to_data(destref.field);
+
+ tree type = data_decl_type_for(destref.field);
+ if( TREE_CODE(type) == ARRAY_TYPE )
{
- SHOW_PARSE_INDENT
- SHOW_PARSE_TEXT("mh_identical()");
+ // We are dealing with pointers to UCHAR.
+ // The move has to be done with a copy:
+ gg_memcpy(dest,
+ source,
+ build_int_cst_type(SIZE_T,
+ destref.field->data.capacity()));
+ }
+ else
+ {
+ // We are dealing with scalars
+ gg_assign(dest, source);
}
- gg_memcpy(gg_add(member(destref.field->var_decl_node, "data"),
- refer_offset(destref)),
- gg_add(member(sourceref.field->var_decl_node, "data"),
- tsource.offset),
- build_int_cst_type(SIZE_T, sourceref.field->data.capacity()));
moved = true;
}
}
SHOW_PARSE_TEXT("mh_source_is_literalN: __gg__psz_to_alpha_move")
}
+ // In accordance with the rules of moving a numeric to an alphabetic,
+ // we need to eliminate any leading sign character from the text
+ // string:
+
+ const char *original = sourceref.field->data.original();
+ if( *original == ascii_plus || *original == ascii_minus )
+ {
+ original += 1;
+ }
+
// We need the data sent to __gg__psz_to_alpha_move to be in the
// encoding of the destination. In accordance with the rules of
// cbl_field_t::internalize, the FldLiteralN is in source-code
const char *converted = __gg__iconverter(
DEFAULT_SOURCE_ENCODING,
destref.field->codeset.encoding,
- sourceref.field->data.original(),
- strlen(sourceref.field->data.original()),
+ original,
+ strlen(original),
&charsout);
gg_call(VOID,
"__gg__psz_to_alpha_move",
SHOW_PARSE_TEXT("move_helper()");
}
- bool moved = false;
-
if( size_error )
{
gg_assign(size_error, integer_zero_node);
st_size);
}
- // if( !moved ) // commented out to quiet cppcheck
- {
- moved = mh_source_is_group(destref, sourceref, tsource);
- }
+ bool moved = mh_source_is_group(destref, sourceref, tsource);
if( !moved )
{
- moved = mh_identical(destref, sourceref, tsource);
+ moved = mh_identical(destref, sourceref);
}
if( !moved )
tree immediate_parent,
tree new_var_decl)
{
- // There is a bug in the GCC compiler. For some optimizations and some
- // settings of -fpie, pathological N-squared time in the middle end can
- // happen when a structure on the stack has an initialized member pointing
- // to another memory area on the stack. In those cases, we are going to
- // initialize the pointer to zero, and then call a function to initialize
- // the data member. That hides things from the compiler's optimization
- // phases.
-
- bool read_only = !!TREE_READONLY(new_var_decl);
- if( new_var->type == FldLiteralN )
- {
- // For a FldLiteralN the new_var_decl is a number, not a
- // a cblc_field_t.
- read_only = true;
- }
-
- if( new_var->type == FldAlphanumeric && new_var->attr & intermediate_e )
- {
- // We need not to mess with the intermediate malloc() logic.
- read_only = true;
- }
-
- if( new_var->attr & external_e )
- {
- // We need not to mess with the intermediate malloc() logic.
- read_only = true;
- }
-
// For FldLiteralN we force the encoding to be ASCII.
// See initial_from_initial() for an explanation.
// For FldClass, we force the encoding to be UTF32; see
tree tencoding = build_int_cst_type(INT, encoding);
tree alphabet = build_int_cst_type(INT, new_var->codeset.alphabet);
- if( !read_only )
- {
- data = null_pointer_node;
- }
-
gg_structure_type_constructor(
new_var_decl,
data , // UCHAR_P, "data",
rdigits, // SCHAR, "rdigits",
tencoding, // INT, "encoding",
alphabet); // INT, "alphabet",
-
-
- if( !read_only && data_area != null_pointer_node )
- {
- gg_call(VOID,
- "__gg__set_data_member",
- gg_get_address_of(new_var_decl),
- data_area,
- NULL_TREE);
- }
}
static void
// This has to be static, because we are putting the actual memory
// on the heap. But if we put the cblc_field_t on the stack inside
// of a condition, or in a loop, we just keep recreating the field
- // without getting freeing the memory. Eventually, with perhaps a
+ // without freeing the memory. Eventually, with perhaps a
// two-pass compiler, we'll be able to create the stack cblc_field_t
// once per program-id.
scope = vs_static;
length_of_initial_string = 0;
}
- // GDB needs to know the data hierarchy. We do that by including our_index
- // and parent index in the variable name:
-
- size_t our_index = new_var->our_index;
-
- if( !our_index
- && new_var->type != FldLiteralN
- && !(new_var->attr & intermediate_e))
- {
- if( ! (new_var->type == FldFloat && new_var->has_attr(constant_e)) )
- {
- // N.B. If level is 0 then we're not participating in a hierarchy.
- // During the early stages of implementing cbl_field_t::our_index, there
- // were execution paths in parse.y and parser.cc that resulted in
- // our_index not being set. Those should be gone.
- cbl_errx("%<our_index%> is NULL under unanticipated circumstances");
- }
- }
-
- // When we create the cblc_field_t structure, we need a data pointer
- // for "data". In the case of a variable that has no parent, we
- // have to allocate storage. In the case of a variable that has a parent,
- // we calculate data as the pointer to our parent's data plus our
- // offset.
-
- // Declare and define the structure. This code *must* match
- // the C structure declared in libgcobol.c. Towards that end, the
- // variables are declared in descending order of size in order to
- // make the packing match up.
-
- // This uses a single structure type_decl template for creating each structure
-
char external_record_base[2*sizeof(cbl_name_t)] = "";
if( new_var->parent > 0 )
{
+ // new_var has a parent.
symbol_elem_t *parent = symbol_at(new_var->parent);
gcc_assert(parent);
if( parent->type == SymField )
{
// The parent of new_var is a SymFile with the external_e attribute
// Therefore, we have to establish new_var as an external with a
- // predictable name
+ // predictable name, which we derive from the source file the parent
+ // came from.
strcpy(external_record_base, parent->elem.file.name);
}
}
goto actual_allocate;
}
- if( ancestor && new_var->level != 0 )
+ if( ancestor && new_var->level != 00 )
{
// This variable has an ancestor, so we share its already-allocated data
// area
// A FldClass actually doesn't need any bytes, because the only important
// thing about it is the .initial field. We will allocate a single byte,
// just to keep run-time pointers from being NULL
- if( (new_var->type == FldClass && bytes_to_allocate == 0)
- || (new_var->type == FldLiteralA && bytes_to_allocate == 0) )
+ if( (new_var->type == FldClass && bytes_to_allocate == 0) )
{
bytes_to_allocate = 1;
}
}
}
- if( new_var->attr & intermediate_e
+ if( (new_var->attr & intermediate_e)
&& new_var->type == FldAlphanumeric )
{
// We don't allocate here for intermediates. We instead use
else
{
gg_variable_scope_t vs_scope = (new_var->attr & intermediate_e)
- ? vs_stack : vs_static ;
- tree array_type = build_array_type_nelts(UCHAR, bytes_to_allocate);
- new_var->data_decl_node = gg_define_variable(
- array_type,
- achDataName,
- vs_scope);
- data_area = gg_pointer_to_array(new_var->data_decl_node);
+ ? vs_stack
+ : vs_static ;
+ tree data_decl_type = data_decl_type_for(new_var);
+ new_var->data_decl_node = gg_define_variable( data_decl_type,
+ achDataName,
+ vs_scope);
+ if( TREE_CODE(data_decl_type) == ARRAY_TYPE )
+ {
+ data_area = gg_pointer_to_array(new_var->data_decl_node);
+ }
+ else
+ {
+ data_area = gg_get_address_of(new_var->data_decl_node);
+ }
}
}
}
done:
return;
}
+
void parser_lsearch_conditional(cbl_label_t * name);
void parser_bsearch_conditional(cbl_label_t * name);
-
void parser_lsearch_when( cbl_label_t *name, cbl_field_t *conditional );
-void
-parser_bsearch_when(cbl_label_t *name,
- cbl_refer_t key,
- cbl_refer_t sarg,
- bool ascending);
-
+void parser_bsearch_when( cbl_label_t *name,
+ const cbl_refer_t &key,
+ const cbl_refer_t &sarg,
+ bool ascending);
void parser_lsearch_end( cbl_label_t *name );
void parser_bsearch_end( cbl_label_t *name );
+void parser_bsearch_start( cbl_label_t *name, cbl_field_t *tgt );
-void
-parser_bsearch_start( cbl_label_t *name, cbl_field_t *tgt );
-
-void
-parser_sort(cbl_refer_t table,
- bool duplicates,
- cbl_alphabet_t *alphabet,
- const std::vector<cbl_key_t>& keys );
+void parser_sort( cbl_refer_t table,
+ bool duplicates,
+ cbl_alphabet_t *alphabet,
+ const std::vector<cbl_key_t>& keys );
void
parser_file_sort( cbl_file_t *file,
bool duplicates,
cbl_internal_error("The given type is NULL, and that is just not fair");
}
+ int code = TREE_CODE(type);
+
if( DECL_P(type) )
{
type = TREE_TYPE(type);
}
- if( !TYPE_P(type) )
+ if( !TYPE_P(type) && code != ARRAY_REF)
{
- cbl_internal_error("The given type is not a declaration or a TYPE");
+ cbl_internal_error("%s", "The given type is not a declaration or a TYPE or an ARRAY_REF");
}
static char ach[1100];
static char ach2[1024];
static char ach3[1024];
- switch( TREE_CODE(type) )
+ switch( code )
{
case POINTER_TYPE:
strcpy(ach2, gg_show_type(TREE_TYPE(type)));
sprintf(ach, "POINTER to %s", ach2);
break;
+ case ARRAY_TYPE:
+ strcpy(ach2, gg_show_type(TREE_TYPE(type)));
+ sprintf(ach, "ARRAY");
+ break;
+
+ case ARRAY_REF:
+ sprintf(ach, "ARRAY_REF");
+ break;
+
case VOID_TYPE:
sprintf(ach, "VOID");
break;
strcat(ach, " readonly");
}
+ if( DECL_P(original_type) && TYPE_VOLATILE(original_type) )
+ {
+ strcat(ach, " volatile");
+ }
+
return ach;
}
// it's time to actually define the storage with a decl_expression:
tree stmt = build1_loc (gg_token_location(),
DECL_EXPR,
- TREE_TYPE(var_decl),
+ void_type_node,
var_decl);
gg_append_statement(stmt);
}
return var_decl;
}
+tree
+gg_define_volatile_variable(tree type_decl,
+ const char *name,
+ gg_variable_scope_t vs_scope)
+ {
+ bool already_defined = false;
+
+ tree volatile_type = build_qualified_type(type_decl, TYPE_QUAL_VOLATILE);
+
+ tree var_decl = gg_declare_variable(volatile_type,
+ name,
+ NULL_TREE,
+ vs_scope,
+ &already_defined);
+
+ /* Helpful, especially while debugging the front end. The volatile-qualified
+ type is the important part; these flags should agree with it. */
+ TREE_THIS_VOLATILE(var_decl) = 1;
+ TREE_SIDE_EFFECTS(var_decl) = 1;
+
+ if (!already_defined)
+ {
+ gg_define_from_declaration(var_decl);
+ }
+
+ return var_decl;
+ }
+
tree
gg_define_bool()
{
return build_fold_addr_expr (elem_ref);
}
+tree
+gg_get_address(const tree var_decl)
+ {
+ /* This takes care of the problem of finding the address of a scalar, or of
+ an ARRAY_TYPE. I recommend using it carefully; there is something to be
+ said for knowing whether you are working with an array, or a scalar. */
+ tree type = TREE_TYPE (var_decl);
+ if( TREE_CODE (type) == ARRAY_TYPE )
+ {
+ return gg_pointer_to_array(var_decl);
+ }
+
+ TREE_ADDRESSABLE(var_decl) = 1;
+ TREE_USED(var_decl) = 1;
+ return build_fold_addr_expr(var_decl);
+ }
+
tree
gg_get_indirect_reference(tree pointer, tree offset)
{
}
else
{
- return build4(ARRAY_REF,
+ tree retval = build4(ARRAY_REF,
element_type,
pointer,
- offset,
+ fold_convert(SIZE_T, offset),
NULL_TREE,
NULL_TREE);
+ return retval;
}
}
return build2( BIT_AND_EXPR, larger_type, gg_cast(larger_type,A), gg_cast(larger_type,B));
}
+tree
+gg_bswap (tree var)
+ {
+ location_t loc = UNKNOWN_LOCATION;
+ tree type = TREE_TYPE (var);
+ tree size = TYPE_SIZE_UNIT (type);
+
+ gcc_assert (tree_fits_uhwi_p (size));
+
+ unsigned HOST_WIDE_INT size_in_bytes = tree_to_uhwi (size);
+
+ enum built_in_function fncode;
+ tree unsigned_type;
+
+ switch (size_in_bytes)
+ {
+ case 1:
+ return var;
+
+ case 2:
+ fncode = BUILT_IN_BSWAP16;
+ unsigned_type = uint16_type_node;
+ break;
+
+ case 4:
+ {
+ fncode = BUILT_IN_BSWAP32;
+ unsigned_type = uint32_type_node;
+ break;
+ }
+
+ case 8:
+ fncode = BUILT_IN_BSWAP64;
+ unsigned_type = uint64_type_node;
+ break;
+
+ case 16:
+ fncode = BUILT_IN_BSWAP128;
+ unsigned_type = unsigned_intTI_type_node; /* or your UINT128 type */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ tree arg = fold_convert_loc (loc, unsigned_type, var);
+
+ tree swapped =
+ build_call_expr_loc (loc,
+ builtin_decl_explicit (fncode),
+ 1,
+ arg);
+
+ return fold_convert_loc (loc, type, swapped);
+ }
+
tree
gg_build_relational_expression(tree operand_a,
enum relop_t op,
gg_append_statement(the_call);
}
+tree
+gg_memcmp(const tree s1, const tree s2, tree n)
+ {
+ tree the_call =
+ build_call_expr_loc(gg_token_location(),
+ builtin_decl_explicit (BUILT_IN_MEMCMP),
+ 3,
+ s1,
+ s2,
+ n);
+ return the_call;
+ }
+
void
gg_record_statement_list_start()
{
#define BOOL boolean_type_node
#define CHAR char_type_node
#define SCHAR signed_char_type_node
+#define SCHAR_P build_pointer_type(SCHAR)
#define UCHAR unsigned_char_type_node
#define SHORT short_integer_type_node
#define SHORT_P build_pointer_type(short_integer_type_node)
tree entry_switch_goto;
tree entry_switch_label;
std::vector<tree> entry_goto_expressions;
+ bool alphabet_in_use;
};
struct cbl_translation_unit_t
const char *name,
gg_variable_scope_t vs_scope,
tree initial_value);
+extern tree gg_define_volatile_variable(tree type_decl,
+ const char *name,
+ gg_variable_scope_t vs_scope);
// Utility definers:
extern tree gg_define_bool();
extern tree gg_define_char();
extern tree gg_define_uchar_star(const char *variable_name, tree var);
// address_of operator; equivalent of C "&var_decl"
-extern tree gg_get_address_of(const tree var_decl);
+extern tree gg_get_address_of(const tree var_decl); // For scalars
// equivalent of C "&array[0]"
-extern tree gg_pointer_to_array(tree array);
+extern tree gg_pointer_to_array(tree array); // For arrays
+extern tree gg_get_address(const tree var_decl);
// Array creation and access:
extern tree gg_negate(tree var); // Two's complement negation
extern tree gg_bitwise_not(tree var); // Bitwise inversion
extern tree gg_abs(tree var); // Absolute value
+extern tree gg_bswap(tree var); // end-for-end byte swap
// And some binary operations:
extern void gg_write(tree fd, tree buf, tree count);
extern void gg_memset(tree dest, const tree value, tree size);
extern tree gg_memchr(tree s, tree c, tree n);
+extern tree gg_memcmp(const tree dest, const tree src, tree size);
extern void gg_memcpy(tree dest, const tree src, tree size);
extern void gg_memmove(tree dest, const tree src, tree size);
extern tree gg_memdup(tree data, tree length);
extern char *gg_show_type(tree type);
extern void gg_leaving_the_source_code_file();
+
#endif
}
static void
-arithmetic_operation(size_t nC, cbl_num_result_t *C,
+arithmetic_operation( size_t nC, cbl_num_result_t *C,
size_t nA, cbl_refer_t *A,
size_t nB, cbl_refer_t *B,
cbl_arith_format_t format,
std::vector<cbl_refer_t> results(nC + 1);
int ncount = 0;
- if( nC+1 <= MIN_FIELD_BLOCK_SIZE )
- {
- // We know there is room in our existing buffer
- }
- else
- {
- // We might have to allocate more space:
- gg_call(VOID,
- "__gg__resize_int_p",
- gg_get_address_of(var_decl_arithmetic_rounds_size),
- gg_get_address_of(var_decl_arithmetic_rounds),
- build_int_cst_type(SIZE_T, nC+1),
- NULL_TREE);
- }
-
// We have to take into account the possibility the quotient of the division
// can affect the disposition of the remainder. In particular, some of the
// NIST tests have the construction
// list
results[ncount++] = temp_remainder;
}
+ tree array_of_int_type = build_array_type_nelts(INT, nC+1);
+ tree arithmetic_rounds = gg_define_variable(array_of_int_type);
for(size_t i=0; i<nC; i++)
{
results[ncount] = C[i].refer;
- gg_assign( gg_array_value(var_decl_arithmetic_rounds, ncount),
+ gg_assign( gg_array_value(arithmetic_rounds, ncount),
build_int_cst_type(INT, C[i].rounded));
ncount += 1;
}
// Having done all that work, we now need to break out the various different
// arithmetic routines that implement the various possibilities,
- build_array_of_treeplets(1, nA, A);
- build_array_of_treeplets(2, nB, B);
- build_array_of_treeplets(3, ncount, results.data());
-
+ tree referlets_A = build_array_of_referlets(nA, A);
+ tree referlets_B = build_array_of_referlets(nB, B);
+ tree referlets_C = build_array_of_referlets(ncount, results.data());
gg_call(VOID,
operation,
build_int_cst_type(INT, format),
build_int_cst_type(SIZE_T, nA),
+ referlets_A,
build_int_cst_type(SIZE_T, nB),
+ referlets_B,
build_int_cst_type(SIZE_T, ncount),
- var_decl_arithmetic_rounds,
+ referlets_C,
+ gg_pointer_to_array(arithmetic_rounds),
build_int_cst_type(INT, call_flags),
compute_error,
NULL_TREE);
tree term_type = largest_binary_term(nA, A);
if( term_type )
{
- tree dest_type = tree_type_from_size(
- C[0].refer.field->data.capacity(),
- 0);
+ tree dest_type = tree_type_from_size(C[0].refer.field->data.capacity(),
+ 0);
+// tree dest_type2 = TREE_TYPE(C[0].refer.field->data_decl_node);
+// gcc_assert(dest_type2 == dest_type);
+
// All the numbers are integers without rdigits
if( nC == 1
&& nA == 1
}
if( refer_is_clean(C[0].refer) )
{
- tree dest_addr = member(C[0].refer.field->var_decl_node,
- "data");
- tree ptr = gg_cast(build_pointer_type(dest_type), dest_addr);
// We are accumulating into memory
- gg_assign( gg_indirect(ptr),
- gg_add( gg_indirect(ptr),
- A_value));
+
+ if(false && refer_is_working_storage(C[0].refer)
+ && C[0].refer.field->offset == 0 )
+ {
+ gg_assign( C[0].refer.field->data_decl_node,
+ gg_cast(TREE_TYPE(C[0].refer.field->data_decl_node), gg_add( C[0].refer.field->data_decl_node, A_value)));
+ }
+ else
+ {
+ tree dest_addr = member(C[0].refer.field->var_decl_node,
+ "data");
+ tree ptr = gg_cast(build_pointer_type(dest_type), dest_addr);
+ gg_assign( gg_indirect(ptr),
+ gg_add( gg_indirect(ptr),
+ A_value));
+ }
}
else
{
// Do phase 2, which puts the subtotal into each target location in turn
for(size_t i=0; i<nC; i++)
{
- arithmetic_operation(1, &C[i],
+ arithmetic_operation( 1, &C[i],
0, NULL,
0, NULL,
format,
#include "cobol-system.h"
#include "coretypes.h"
#include "tree.h"
+#include "langhooks.h"
+#include "tree-iterator.h"
+#include "stringpool.h"
+#include "cgraph.h"
+#include "stor-layout.h"
+#include "toplev.h"
+#include "function.h"
+#include "fold-const.h"
#include "../../libgcobol/ec.h"
#include "../../libgcobol/common-defs.h"
#include "util.h"
#include "show_parse.h"
#include "../../libgcobol/exceptl.h"
#include "exceptg.h"
+#include "dumpfile.h"
+
bool exception_location_active = true;
bool skip_exception_processing = true;
tree var_decl_call_parameter_count; // int __gg__call_parameter_count
tree var_decl_call_parameter_lengths; // size_t *__gg__call_parameter_count
-tree var_decl_arithmetic_rounds_size; // size_t __gg__arithmetic_rounds_size;
-tree var_decl_arithmetic_rounds; // int* __gg__arithmetic_rounds;
-tree var_decl_fourplet_flags_size; // size_t __gg__fourplet_flags_size;
-tree var_decl_fourplet_flags; // int* __gg__fourplet_flags;
-
-tree var_decl_treeplet_1f; // cblc_field_pp_type_node , "__gg__treeplet_1f"
-tree var_decl_treeplet_1o; // SIZE_T_P , "__gg__treeplet_1o"
-tree var_decl_treeplet_1s; // SIZE_T_P , "__gg__treeplet_1s"
-tree var_decl_treeplet_2f; // cblc_field_pp_type_node , "__gg__treeplet_2f"
-tree var_decl_treeplet_2o; // SIZE_T_P , "__gg__treeplet_2o"
-tree var_decl_treeplet_2s; // SIZE_T_P , "__gg__treeplet_2s"
-tree var_decl_treeplet_3f; // cblc_field_pp_type_node , "__gg__treeplet_3f"
-tree var_decl_treeplet_3o; // SIZE_T_P , "__gg__treeplet_3o"
-tree var_decl_treeplet_3s; // SIZE_T_P , "__gg__treeplet_3s"
-tree var_decl_treeplet_4f; // cblc_field_pp_type_node , "__gg__treeplet_4f"
-tree var_decl_treeplet_4o; // SIZE_T_P , "__gg__treeplet_4o"
-tree var_decl_treeplet_4s; // SIZE_T_P , "__gg__treeplet_4s"
-
// There are times when I need to insert a NOP into the code, mainly to force
// a .loc directive into the assembly language so that the GDB-COBOL debugger
// can show the COBOL source code. This is true, for example, the CONTINUE
}
tree
-tree_type_from_digits(size_t digits, int signable)
+tree_type_from_digits(size_t digits, uint64_t signable)
{
tree retval = NULL_TREE;
static
tree
-get_data_offset(const cbl_refer_t &refer,
- int *pflags = NULL)
+get_data_offset(const cbl_refer_t &refer, int *pflags = NULL)
{
// This routine returns a tree which is the size_t offset to the data in the
// refer/field
be handled efficiently. */
const cbl_enabled_exceptions_t &enabled_exceptions(cdf_enabled_exceptions());
- if( !enabled_exceptions.match(ec_bound_subscript_e)
+ if( !enabled_exceptions.match(ec_bound_subscript_e)
&& !enabled_exceptions.match(ec_bound_odo_e)
&& !enabled_exceptions.match(ec_bound_ref_mod_e) )
{
ELSE
{
IF( subscript,
- ge_op,
+ gt_op,
build_int_cst_type( TREE_TYPE(subscript),
parent->occurs.ntimes()) )
{
return retval;
}
-//static tree tree_type_from_field(const cbl_field_t *field);
-
tree
get_binary_value_tree(tree return_type,
tree rdigits,
return retval;
}
- static tree pointer = gg_define_variable( UCHAR_P,
- "..gbv_pointer",
- vs_file_static);
+ tree pointer = gg_define_variable(UCHAR_P);
switch(field->type)
{
case FldLiteralN:
gg_assign(rdigits, build_int_cst_type(TREE_TYPE(rdigits),
field->data.rdigits));
}
- // tree source_type = tree_type_from_field(field);
- // retval = gg_cast(return_type,
- // gg_indirect( gg_cast(build_pointer_type(source_type),
- // gg_get_address_of(field->data_decl_node))));
retval = gg_cast(return_type, field->data_decl_node);
}
break;
build_int_cst_type(INT, field->codeset.encoding),
NULL_TREE));
// Assign the value we got from the string to our "return" value:
-
+
// Note that cppcheck can't understand the run-time IF()
// cppcheck-suppress redundantAssignment
retval = gg_cast(return_type, val128);
hilo ));
}
-#if 0
-static tree
+tree
tree_type_from_field(const cbl_field_t *field)
{
- gcc_assert(field);
- return tree_type_from_size(field->data.capacity(), field->attr & signable_e);
+ /* This routine is used to determine what action is taken with type of a
+ CALL ... USING <var> and the matching PROCEDURE DIVISION USING <var> of
+ a PROGRAM-ID or FUNCTION-ID
+ */
+ tree retval;
+
+ switch(field->type)
+ {
+ case FldGroup:
+ case FldAlphanumeric:
+ case FldAlphaEdited:
+ case FldNumericEdited:
+ case FldLiteralA:
+ {
+ retval = CHAR_P;
+ break;
+ }
+
+ case FldNumericDisplay:
+ case FldPacked:
+ {
+ if( field->attr & signable_e )
+ {
+ if( field->data.digits > 18 )
+ {
+ retval = INT128;
+ }
+ else if( field->data.digits > 9)
+ {
+ retval = LONG;
+ }
+ else if( field->data.digits > 4)
+ {
+ retval = INT;
+ }
+ else if( field->data.digits > 2)
+ {
+ retval = SHORT;
+ }
+ else
+ {
+ retval = SCHAR;
+ }
+ }
+ else
+ {
+ if( field->data.digits > 18 )
+ {
+ retval = UINT128;
+ }
+ else if( field->data.digits > 9)
+ {
+ retval = ULONG;
+ }
+ else if( field->data.digits > 4)
+ {
+ retval = UINT;
+ }
+ else if( field->data.digits > 2)
+ {
+ retval = USHORT;
+ }
+ else
+ {
+ retval = UCHAR;
+ }
+ }
+ break;
+ }
+
+ case FldLiteralN:
+ case FldNumericBinary:
+ case FldNumericBin5:
+ case FldIndex:
+ case FldPointer:
+ {
+ if( field->attr & signable_e )
+ {
+ if( field->data.capacity() == 16 )
+ {
+ retval = INT128;
+ }
+ else if( field->data.capacity() == 8 )
+ {
+ retval = LONG;
+ }
+ else if( field->data.capacity() == 4 )
+ {
+ retval = INT;
+ }
+ else if( field->data.capacity() == 2 )
+ {
+ retval = SHORT;
+ }
+ else if( field->data.capacity() == 1 )
+ {
+ retval = SCHAR;
+ }
+ else
+ {
+ gcc_unreachable();
+ }
+ }
+ else
+ {
+ if( field->data.capacity() == 16 )
+ {
+ retval = UINT128;
+ }
+ else if( field->data.capacity() == 8 )
+ {
+ retval = ULONG;
+ }
+ else if( field->data.capacity() == 4 )
+ {
+ retval = UINT;
+ }
+ else if( field->data.capacity() == 2 )
+ {
+ retval = USHORT;
+ }
+ else if( field->data.capacity() == 1 )
+ {
+ retval = UCHAR;
+ }
+ else
+ {
+ gcc_unreachable();
+ }
+ }
+ break;
+ }
+
+ case FldFloat:
+ {
+ if( field->data.capacity() == 8 )
+ {
+ retval = DOUBLE;
+ }
+ else if( field->data.capacity() == 4 )
+ {
+ retval = FLOAT;
+ }
+ else
+ {
+ retval = FLOAT128;
+ }
+ break;
+ }
+
+ default:
+ {
+ cbl_internal_error( "%s: Invalid field type %s:",
+ __func__,
+ cbl_field_type_str(field->type));
+ break;
+ }
+ }
+ return retval;
}
-#endif
tree
get_data_address( cbl_field_t *field,
}
tree
-tree_type_from_size(size_t bytes, int signable)
+tree_type_from_size(size_t bytes, uint64_t signable)
{
tree retval = NULL_TREE;
switch( bytes )
{
case 1:
- retval = CHAR;
+ retval = SCHAR;
break;
case 2:
retval = SHORT;
retval = INT;
break;
case 8:
- retval = LONGLONG;
+ retval = LONG;
break;
case 16:
retval = INT128;
retval = UINT;
break;
case 8:
- retval = ULONGLONG;
+ retval = ULONG;
break;
case 16:
retval = UINT128;
gg_cast(dest_type, value));
}
-void
-build_array_of_treeplets( int ngroup,
- size_t N,
+tree
+build_array_of_referlets( size_t N,
cbl_refer_t *refers)
{
- if( N )
+ tree retval = null_pointer_node;
+ if(N)
{
- // At the present time the most this routine is called is four times, for
- // the implementation of the UNSTRING verb.
+ // Create the array of referlets.
+ tree table_type = build_array_type_nelts(cblc_referlet_type_node, N);
+ tree reflets = gg_define_variable(table_type);
- if( N > MIN_FIELD_BLOCK_SIZE )
- {
- gg_call(VOID,
- "__gg__resize_treeplet",
- build_int_cst_type(INT, ngroup),
- build_int_cst_type(SIZE_T, N),
- NULL_TREE
- );
- }
- switch(ngroup)
+ // Initialize the array of referlets from the list of refers:
+ for(size_t i=0; i<N; i++)
{
- case 1:
- for(size_t i=0; i<N; i++)
- {
- gg_assign(gg_array_value(var_decl_treeplet_1f, i),
- refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
- : gg_cast(cblc_field_p_type_node, null_pointer_node));
- gg_assign(gg_array_value(var_decl_treeplet_1o, i),
- refer_offset(refers[i]));
- gg_assign(gg_array_value(var_decl_treeplet_1s, i),
- refer_size_source(refers[i]));
- }
- break;
- case 2:
- for(size_t i=0; i<N; i++)
- {
- gg_assign(gg_array_value(var_decl_treeplet_2f, i),
- refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
- : gg_cast(cblc_field_p_type_node, null_pointer_node));
- gg_assign(gg_array_value(var_decl_treeplet_2o, i),
- refer_offset(refers[i]));
- gg_assign(gg_array_value(var_decl_treeplet_2s, i),
- refer_size_source(refers[i]));
- }
- break;
- case 3:
- for(size_t i=0; i<N; i++)
- {
- gg_assign(gg_array_value(var_decl_treeplet_3f, i),
- refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
- : gg_cast(cblc_field_p_type_node, null_pointer_node));
- gg_assign(gg_array_value(var_decl_treeplet_3o, i),
- refer_offset(refers[i]));
- gg_assign(gg_array_value(var_decl_treeplet_3s, i),
- refer_size_source(refers[i]));
- }
- break;
- case 4:
- for(size_t i=0; i<N; i++)
- {
- gg_assign(gg_array_value(var_decl_treeplet_4f, i),
- refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
- : gg_cast(cblc_field_p_type_node, null_pointer_node));
- gg_assign(gg_array_value(var_decl_treeplet_4o, i),
- refer_offset(refers[i]));
- gg_assign(gg_array_value(var_decl_treeplet_4s, i),
- refer_size_source(refers[i]));
- }
- break;
- default:
- abort();
- break;
+ gg_assign(gg_struct_field_ref(gg_array_value(reflets, i), "field"),
+ refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
+ : gg_cast(cblc_field_p_type_node,
+ null_pointer_node));
+ gg_assign(gg_struct_field_ref(gg_array_value(reflets, i), "offset"),
+ refer_offset(refers[i]));
+ gg_assign(gg_struct_field_ref(gg_array_value(reflets, i), "size"),
+ refer_size_source(refers[i]));
}
+
+ // And just return a pointer to the first element of the array:
+ retval = gg_pointer_to_array(reflets);
}
- else
- {
- // Just do nothing
- }
+
+ return retval;
}
-void
-build_array_of_fourplets( int ngroup,
- size_t N,
- cbl_refer_t *refers)
+tree
+build_array_of_refers(size_t N,
+ cbl_refer_t *refers)
{
- int flag_bits = 0;
+ tree retval;
if( N )
{
- if( N > MIN_FIELD_BLOCK_SIZE )
- {
- gg_call(VOID,
- "__gg__resize_treeplet",
- build_int_cst_type(INT, ngroup),
- build_int_cst_type(SIZE_T, N),
- NULL_TREE);
-
- gg_call(VOID,
- "__gg__resize_int_p",
- gg_get_address_of(var_decl_fourplet_flags_size),
- gg_get_address_of(var_decl_fourplet_flags),
- build_int_cst_type(SIZE_T, N),
- NULL_TREE);
- }
-
+ int flag_bits = 0;
+ tree table_type = build_array_type_nelts(cblc_refer_type_node, N);
+ tree table = gg_define_variable(table_type);
for(size_t i=0; i<N; i++)
{
- gg_assign(gg_array_value(var_decl_treeplet_1f, i),
- gg_get_address_of(refers[i].field->var_decl_node));
- gg_assign(gg_array_value(var_decl_treeplet_1o, i),
+ gg_assign(gg_struct_field_ref(gg_array_value(table, i), "field"),
+ refers[i].field ? gg_get_address_of(refers[i].field->var_decl_node)
+ : gg_cast(cblc_field_p_type_node,
+ null_pointer_node));
+ gg_assign(gg_struct_field_ref(gg_array_value(table, i), "offset"),
refer_offset(refers[i], &flag_bits));
- gg_assign(gg_array_value(var_decl_treeplet_1s, i),
+ gg_assign(gg_struct_field_ref(gg_array_value(table, i), "size"),
refer_size_source(refers[i]));
- gg_assign(gg_array_value(var_decl_fourplet_flags, i),
+ gg_assign(gg_struct_field_ref(gg_array_value(table, i), "flags"),
build_int_cst_type(INT, flag_bits));
}
+ // And just return a pointer to the first element of the array:
+ retval = gg_pointer_to_array(table);
}
else
{
abort();
}
+ return retval;
}
tree
bool
refer_is_clean(const cbl_refer_t &refer)
{
- if( !refer.field || refer.field->is_numeric_constant() )
+ if( !refer.field || refer.field->is_numeric_constant() )
{
// It is routine for a refer to have no field. It happens when the parser
// passes us a refer for an optional parameter that has been omitted, for
;
}
+bool
+refer_is_super_clean(const cbl_refer_t &refer)
+ {
+ // By super-clean, we mean that in addition to refer_is_clean, the value
+ // is also in working-storage with fixed offset and capacity, and is
+ // otherwise in condition so that we can use refer.field->var_data_node for
+ // GENERIC, thus getting rid the additional level of indirection through
+ // the refer.field->var_decl_node::data pointer
+ return refer_is_clean(refer)
+ && !(refer.field->attr & ( based_e
+ | linkage_e
+ | local_e
+ | intermediate_e
+ | any_length_e
+ | external_e)) ;
+ }
+
+bool
+refer_is_working_storage(const cbl_refer_t &refer)
+ {
+ // This returns TRUE in cases where the refer.field->data_decl_node is
+ // stored in .bss or .data, and is thus directly addressable.
+ bool retval = !( refer.field->attr & ( based_e
+ | linkage_e
+ | local_e
+ | intermediate_e) )
+ || (refer.field->type == FldLiteralN);
+ return retval;
+ }
/* This routine returns the length portion of a refmod(start:length) reference.
It extracts both the start and the length so that it can add them together
}
tree // size_t
-refer_offset(const cbl_refer_t &refer,
- int *pflags)
+refer_offset(const cbl_refer_t &refer, int *pflags)
{
// This routine calculates the effect of a refer offset on the
// refer.field->data location. When there are subscripts, the data location
}
return retval;
}
+
+bool
+binary_from_FldNumericBin5(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval = false;
+
+ tree source_type = tree_type_from_field(refer.field);
+
+ if( refer_is_working_storage(refer) )
+ {
+ if( !type )
+ {
+ type = source_type;
+ }
+
+ value = gg_define_variable(type);
+
+ if( refer.field->offset == 0
+ && TREE_CODE(TREE_TYPE(refer.field->data_decl_node)) == INTEGER_TYPE)
+ {
+ // This is the cleanest method: We can just pick up the original base
+ // data.
+
+//#define FOUND_THE_ALIASING_PROBLEM
+#ifdef FOUND_THE_ALIASING_PROBLEM
+ // At the present writing, we can't just pick up the data_decl_node data,
+ // because the contents can be altered by pointer operations that the
+ // compiler doesn't know about. And we get errors, first noted with the
+ // results of fast_add (which uses *(data *) to change the data.) Using
+ // -fno-strict-aliasing makes the problem go away. Until we get that
+ // sorted out, we use the slightly less efficient method on the other
+ // side of the #else.
+ gg_assign(value, gg_cast(type, refer.field->data_decl_node));
+#else
+ tree base;
+ base = gg_cast(build_pointer_type(TREE_TYPE(refer.field->data_decl_node)),
+ member(refer.field->var_decl_node, "data"));
+ gg_assign(value,
+ gg_cast(type,
+ gg_indirect(gg_cast(build_pointer_type(source_type),
+ base))));
+#endif
+ }
+ else
+ {
+ // We can't just pick up the stuff at data_decl_node. Either there is
+ // an offset from the 01 grandfather, or else there is a type mismatch,
+ // probably because of a REDEFINES. So, we do the equivalent of
+ // *(type *)&thing;
+ tree base;
+ base = gg_cast(UCHAR_P, gg_get_address(refer.field->data_decl_node));
+ if( refer_is_clean(refer) )
+ {
+ if( refer.field->offset )
+ {
+ base = gg_add(base, build_int_cst_type(SIZE_T, refer.field->offset));
+ }
+ }
+ else
+ {
+ base = gg_add(base, refer_offset(refer));
+ }
+ gg_assign(value,
+ gg_cast(type,
+ gg_indirect(gg_cast(build_pointer_type(source_type),
+ base))));
+ }
+ retval = true;
+ }
+ else
+ {
+ if( !type )
+ {
+ type = tree_type_from_field(refer.field);
+ }
+
+ tree base;
+ get_location(base, refer);
+ value = gg_define_variable(type);
+ gg_assign(value,
+ gg_cast(type,
+ gg_indirect(gg_cast(build_pointer_type(source_type),
+ base))));
+ retval = true;
+ }
+ return retval;
+ }
+
+bool
+binary_from_FldNumericBinary(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval = false;
+
+ // get the value as its own type.
+ tree unflipped;
+ retval = binary_from_FldNumericBin5(unflipped, refer, NULL_TREE);
+ if( retval )
+ {
+ // The actual return value gets the flipped bytes:
+ value = gg_define_variable(type);
+ gg_assign(value, gg_cast(type, gg_bswap(unflipped)));
+ }
+
+ return retval;
+ }
+
+static const unsigned long pots[17] =
+ {
+ 1ULL, // 00
+ 10ULL, // 01
+ 100ULL, // 02
+ 1000ULL, // 03
+ 10000ULL, // 04
+ 100000ULL, // 05
+ 1000000ULL, // 06
+ 10000000ULL, // 07
+ 100000000ULL, // 08
+ 1000000000ULL, // 09
+ 10000000000ULL, // 10
+ 100000000000ULL, // 11
+ 1000000000000ULL, // 12
+ 10000000000000ULL, // 13
+ 100000000000000ULL, // 14
+ 1000000000000000ULL, // 15
+ 10000000000000000ULL, // 16
+ };
+
+static void
+d_and_q_num_disp( tree &retval, // We define this return value
+ tree loc, // This is a UCHAR_P
+ size_t digits,
+ size_t stride)
+ {
+ switch( digits )
+ {
+ case 1:
+ {
+ retval = gg_define_variable(UCHAR);
+ gg_assign(retval,
+ gg_bitwise_and(gg_indirect(loc),
+ build_int_cst_type(UCHAR, 0x0F)));
+ break;
+ }
+
+ case 2:
+ {
+ retval = gg_define_variable(UCHAR);
+ tree ldigit = gg_bitwise_and(gg_indirect(loc),
+ build_int_cst_type(UCHAR, 0x0F));
+ tree rdigit = gg_bitwise_and(gg_indirect(loc,
+ build_int_cst_type(SIZE_T,
+ stride)),
+ build_int_cst_type(UCHAR, 0x0F));
+ gg_assign(retval,
+ gg_add(gg_multiply(ldigit,
+ build_int_cst_type(UCHAR, 10)),
+ rdigit));
+ break;
+ }
+
+ case 3:
+ case 4:
+ {
+ tree type = USHORT;
+ int rsize = 2;
+ retval = gg_define_variable(type);
+ tree left_half;
+ tree right_half;
+ d_and_q_num_disp(left_half,
+ loc,
+ digits - rsize,
+ stride);
+ d_and_q_num_disp(right_half,
+ gg_add(loc,
+ build_int_cst_type(SIZE_T,
+ stride*(digits - rsize))),
+ rsize,
+ stride);
+ gg_assign(retval,
+ gg_add(gg_cast(type,
+ gg_multiply(left_half,
+ build_int_cst_type(type, 100))),
+ gg_cast(type, right_half)));
+ break;
+ }
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ {
+ tree type = UINT;
+ int rsize = 4;
+ retval = gg_define_variable(type);
+ tree left_half;
+ tree right_half;
+ d_and_q_num_disp(left_half,
+ loc,
+ digits - rsize,
+ stride);
+ d_and_q_num_disp(right_half,
+ gg_add(loc,
+ build_int_cst_type(SIZE_T,
+ stride*(digits - rsize))),
+ rsize,
+ stride);
+ gg_assign(retval,
+ gg_add(gg_cast(type,
+ gg_multiply(left_half,
+ build_int_cst_type(type, 10000))),
+ gg_cast(type, right_half)));
+ break;
+ }
+
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ {
+ tree type = ULONG;
+ int rsize = 8;
+ retval = gg_define_variable(type);
+ tree left_half;
+ tree right_half;
+ d_and_q_num_disp(left_half,
+ loc,
+ digits - rsize,
+ stride);
+ d_and_q_num_disp(right_half,
+ gg_add(loc,
+ build_int_cst_type(SIZE_T,
+ stride*(digits - rsize))),
+ rsize,
+ stride);
+ gg_assign(retval,
+ gg_add(gg_cast(type,
+ gg_multiply(left_half,
+ build_int_cst_type(type, 100000000UL))),
+ gg_cast(type, right_half)));
+ break;
+ }
+
+ default:
+ {
+ // 'digits' is greater than 16. We will peel off sixteen digits at a
+ // time, and multiply-and-accumulate into our return value:
+
+ tree type = UINT128;
+ retval = gg_define_variable(type);
+
+ size_t digits_this_time = std::min(digits, 16UL);
+ digits -= digits_this_time;
+ tree value;
+ d_and_q_num_disp(value,
+ loc,
+ digits_this_time,
+ stride);
+ gg_assign(retval, gg_cast(type, value));
+ gg_assign(loc,
+ gg_add(loc,
+ build_int_cst_type(SIZE_T,
+ stride*digits_this_time)));
+ while(digits > 0)
+ {
+ digits_this_time = std::min(digits, 16UL);
+ size_t pot = pots[digits_this_time];
+ gg_assign(retval, gg_multiply(retval,
+ build_int_cst_type(type, pot)));
+ d_and_q_num_disp(value,
+ loc,
+ digits_this_time,
+ stride);
+ gg_assign(retval, gg_add(retval, gg_cast(type, value)));
+ gg_assign(loc,
+ gg_add(loc,
+ build_int_cst_type(SIZE_T,
+ stride*digits_this_time)));
+ digits -= digits_this_time;
+ }
+ break;
+ }
+ }
+ }
+
+bool
+binary_from_FldNumericDisplay(tree &value,
+ const cbl_refer_t &refer,
+ tree return_type)
+ {
+ // A return of false means we couldn't convert this value
+ bool retval = false;
+
+ tree source_type = tree_type_from_field(refer.field);
+ if( !return_type )
+ {
+ return_type = source_type;
+ }
+
+ // This is where we build the actual numeric value of the digits of the
+ // COBOL numeric display variable. It is up to the caller to interpret
+ // scaledness and rdigits and so forth.
+ value = gg_define_variable(return_type);
+
+ // This is our address pointer, used for walking the digits.
+ tree base = gg_define_variable(UCHAR_P);
+
+ // This is the location of the byte holding the sign (if any)
+ tree sign_location = gg_define_variable(UCHAR_P);
+ // This is the operational counter
+ tree digit_count = gg_define_variable(SIZE_T);
+
+ // The stride of the numerical value is the distance, in bytes, between
+ // characters of the zoned decimal value. It can be 1 (ascii or ebcdic),
+ // or 2 or 4 for utf16 and utf32.
+ tree stride;
+
+ // This works for all forms of storage:
+ gg_assign(base,
+ gg_cast(build_pointer_type(UCHAR_P),
+ member(refer.field->var_decl_node,"data")));
+ if( !refer_is_clean(refer) )
+ {
+ gg_assign(base, gg_add(base, refer_offset(refer)));
+ }
+
+ gg_assign(digit_count, build_int_cst_type(SIZE_T, refer.field->data.digits));
+
+ charmap_t *charmap = __gg__get_charmap(refer.field->codeset.encoding);
+ size_t fstride = charmap->stride();
+ stride = build_int_cst_type(SIZE_T, fstride);
+
+ if( refer.field->attr & signable_e )
+ {
+ // The value is signable.
+ if( refer.field->attr & separate_e )
+ {
+ // The sign byte is separate from the digits
+ if( refer.field->attr & leading_e )
+ {
+ // separate & leading. sign_location is the first character.
+ gg_assign(sign_location, base);
+ gg_assign(base, gg_add(base, stride));
+ }
+ else
+ {
+ // separate & trailing. The sign byte is after the last character:
+ gg_assign(sign_location,
+ gg_add(base,
+ build_int_cst_type(SIZE_T,
+ refer.field->data.digits * fstride)));
+ }
+ }
+ else
+ {
+ // sign is internal:
+ if( refer.field->attr & leading_e )
+ {
+ // internal & leading
+ gg_assign(sign_location, base);
+ }
+ else
+ {
+ // internal & trailing
+ gg_assign(sign_location,
+ gg_add(base,
+ build_int_cst_type(SIZE_T,
+ fstride *
+ (refer.field->data.digits-1))));
+ }
+ }
+ }
+
+ size_t digits = refer.field->data.digits;
+ // At this point, we have 'digits', which is the number of characters at
+ // 'base', The obvious thing is a multiply-and-accumulate loop, but faster
+ // code can result from allowing the middle-end to create overlapping.
+
+ // This divide-and-conquer algorithm gives the middle-end that flexibility.
+ // It runs about three times faster than a multiply-accumulate when compiled
+ // with -O0, and about 2.8 times faster when compiled with -O2.
+
+ tree d_and_q;
+ d_and_q_num_disp(d_and_q, base, digits, fstride);
+
+ // d_and_q contains our value. We need to know if negativeness is involved.
+ if( refer.field->attr & signable_e )
+ {
+ if( refer.field->attr & separate_e )
+ {
+ // If the sign location is a minus sign, we have to negate the value.
+ IF( gg_indirect(sign_location),
+ eq_op,
+ build_int_cst_type(UCHAR, charmap->mapped_character(ascii_minus)) )
+ {
+ gg_assign(value, gg_negate(gg_cast(return_type, d_and_q)));
+ }
+ ELSE
+ {
+ gg_assign(value, gg_cast(return_type, d_and_q));
+ }
+ ENDIF
+ }
+ else
+ {
+ // The sign indicator is inside the sign_location digit.
+ if( charmap->is_like_ebcdic() )
+ {
+ // In EBCDIC, the value is negative when the sign_indicator is less
+ // than ebcdic zero:
+ IF( gg_indirect(sign_location),
+ lt_op,
+ build_int_cst_type(UCHAR, charmap->mapped_character(ascii_zero)) )
+ {
+ gg_assign(value, gg_negate(gg_cast(return_type, d_and_q)));
+ }
+ ELSE
+ {
+ gg_assign(value, gg_cast(return_type, d_and_q));
+ }
+ ENDIF
+ }
+ else
+ {
+ // In ASCII, the value is negative when the sign_indicator is greater
+ // than ascii nine:
+ IF( gg_indirect(sign_location),
+ gt_op,
+ build_int_cst_type(UCHAR, ascii_9) )
+ {
+ gg_assign(value, gg_negate(gg_cast(return_type, d_and_q)));
+ }
+ ELSE
+ {
+ gg_assign(value, gg_cast(return_type, d_and_q));
+ }
+ ENDIF
+ }
+ }
+ }
+ else
+ {
+ gg_assign(value, gg_cast(return_type, d_and_q));
+ }
+
+ retval = true;
+ return retval;
+ }
+
+ /* This is the GENERIC that creates
+ static const unsigned char dp2bin[160] =
+ {
+ 00, 01, 02, 03, 04, 05, 06, 07, 8, 9, 0, 0, 0, 0, 0, 0, // 0x00
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 10, 10, 10, 10, 10, // 0x10
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 20, 20, 20, 20, 20, 20, // 0x20
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 30, 30, 30, 30, 30, 30, // 0x30
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 40, 40, 40, 40, 40, 40, // 0x40
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 50, 50, 50, 50, 50, 50, // 0x50
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 60, 60, 60, 60, 60, 60, // 0x60
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 70, 70, 70, 70, 70, 70, // 0x70
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 80, 80, 80, 80, 80, 80, // 0x80
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 90, 90, 90, 90, 90, 90, // 0x90
+ };
+ */
+
+static tree
+make_dp2bin_decl()
+ {
+ static const unsigned char dp2bin[160] =
+ {
+ 00, 01, 02, 03, 04, 05, 06, 07, 8, 9, 0, 0, 0, 0, 0, 0, // 0x00
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 10, 10, 10, 10, 10, // 0x10
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 20, 20, 20, 20, 20, 20, // 0x20
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 30, 30, 30, 30, 30, 30, // 0x30
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 40, 40, 40, 40, 40, 40, // 0x40
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 50, 50, 50, 50, 50, 50, // 0x50
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 60, 60, 60, 60, 60, 60, // 0x60
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 70, 70, 70, 70, 70, 70, // 0x70
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 80, 80, 80, 80, 80, 80, // 0x80
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 90, 90, 90, 90, 90, 90, // 0x90
+ };
+
+ tree array_type = build_array_type_nelts(UCHAR, 160);
+ tree decl = gg_define_variable( array_type,
+ "_dp2bin",
+ vs_file_static);
+ tree ctor = make_node(CONSTRUCTOR);
+ TREE_TYPE(ctor) = array_type;
+ TREE_STATIC(ctor) = 1;
+ TREE_CONSTANT(ctor) = 1;
+
+ for(int i=0; i<160; i++)
+ {
+ CONSTRUCTOR_APPEND_ELT( CONSTRUCTOR_ELTS(ctor),
+ build_int_cst_type(SIZE_T, i),
+ build_int_cst_type(UCHAR, dp2bin[i]) );
+ }
+ DECL_INITIAL(decl) = ctor;
+
+ TREE_STATIC (decl) = 1;
+ TREE_PUBLIC (decl) = 0;
+ DECL_EXTERNAL (decl) = 0;
+ TREE_READONLY (decl) = 1;
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ TREE_USED (decl) = 1;
+
+ DECL_INITIAL (decl) = ctor;
+
+ return decl;
+ }
+
+static void
+d_and_q_packed(tree &value, tree base, size_t places)
+ {
+ static tree dp2bin = make_dp2bin_decl();
+
+ switch(places)
+ {
+ case 0:
+ {
+ // This is what happens for a single-digit comp-3
+ tree type = UCHAR;
+ value = gg_define_variable(type);
+ gg_assign(value, gg_cast(type, integer_zero_node));
+ break;
+ }
+ case 1:
+ {
+ // One place is two digits:
+ tree type = UCHAR;
+ value = gg_define_variable(type);
+ gg_assign(value, gg_array_value(dp2bin, gg_indirect(base)));
+ break;
+ }
+ case 2:
+ {
+ // Four digits:
+ tree type = USHORT;
+ value = gg_define_variable(type);
+ tree lhalf;
+ tree rhalf;
+ size_t rplaces = 1;
+ size_t lplaces = places - rplaces;
+ size_t pot = pots[rplaces*2];
+ d_and_q_packed(lhalf, base, lplaces);
+ d_and_q_packed(rhalf,
+ gg_add(base, build_int_cst_type(SIZE_T, lplaces)),
+ rplaces);
+ gg_assign(value,
+ gg_add(gg_cast(type, gg_multiply(gg_cast(type, lhalf),
+ build_int_cst_type(type, pot))),
+ gg_cast(type, rhalf)));
+ break;
+ }
+ case 3: // six digits
+ case 4: // eight digits
+ {
+ tree type = UINT;
+ value = gg_define_variable(type);
+ tree lhalf;
+ tree rhalf;
+ size_t rplaces = 2;
+ size_t lplaces = places - rplaces;
+ size_t pot = pots[rplaces*2];
+ d_and_q_packed(lhalf, base, lplaces);
+ d_and_q_packed(rhalf,
+ gg_add(base, build_int_cst_type(SIZE_T, lplaces)),
+ rplaces);
+ gg_assign(value,
+ gg_add(gg_cast(type, gg_multiply(gg_cast(type, lhalf), build_int_cst_type(type, pot))),
+ gg_cast(type, rhalf)));
+ break;
+ }
+ case 5: // ten digits
+ case 6: // twelve digits
+ case 7: // fourteen digits
+ case 8: // sixteen digits
+ {
+ tree type = ULONG;
+ value = gg_define_variable(type);
+ tree lhalf;
+ tree rhalf;
+ size_t rplaces = 4;
+ size_t lplaces = places - rplaces;
+ size_t pot = pots[rplaces*2];
+ d_and_q_packed(lhalf, base, lplaces);
+ d_and_q_packed(rhalf,
+ gg_add(base, build_int_cst_type(SIZE_T, lplaces)),
+ rplaces);
+ gg_assign(value,
+ gg_add(gg_cast(type, gg_multiply(gg_cast(type, lhalf), build_int_cst_type(type, pot))),
+ gg_cast(type, rhalf)));
+ break;
+ }
+ default:
+ {
+ // This is nine places (eighteen digits) and up, We are going to peel
+ // off eight places (sixteen digits) at a time:
+ tree type = UINT128;
+ value = gg_define_variable(type);
+ tree lhalf;
+ size_t lplaces = places % 8;
+ if( lplaces != 0 )
+ {
+ d_and_q_packed(lhalf, base, lplaces);
+ gg_assign(value, gg_cast(type, lhalf));
+ places -= lplaces;
+ gg_assign(base, gg_add(base, build_int_cst_type(SIZE_T, lplaces)));
+ }
+ else
+ {
+ gg_assign(value, gg_cast(type, integer_zero_node));
+ }
+ // The remaining places is a multiple of eight:
+ size_t pot = pots[8 * 2];
+ while( places )
+ {
+ gg_assign(value, gg_multiply(value, build_int_cst_type(type, pot)));
+ tree rhalf;
+ d_and_q_packed(rhalf, base, 8);
+ gg_assign(value, gg_add(value, gg_cast(type, rhalf)));
+ gg_assign(base, gg_add(base, build_int_cst_type(SIZE_T, 8)));
+ places -= 8;
+ }
+ }
+ }
+ }
+
+static bool
+binary_from_comp_3(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval = false;
+
+ // This is where we build the actual numeric value of the digits of the
+ // COBOL packed-decimal variable. It is up to the caller to interpret
+ // scaledness and rdigits and so forth.
+
+ tree source_type = tree_type_from_field(refer.field);
+
+ if( !type )
+ {
+ type = source_type;
+ }
+
+ tree working = gg_define_variable(source_type);
+
+ tree base = gg_define_variable(UCHAR_P);
+ gg_assign(base,
+ gg_cast(build_pointer_type(UCHAR_P),
+ member(refer.field->var_decl_node,"data")));
+ if( !refer_is_clean(refer) )
+ {
+ gg_assign(base, gg_add(base, refer_offset(refer)));
+ }
+
+ // This is the location of the byte holding the sign nybble
+ tree sign_location = gg_define_variable(UCHAR_P);
+
+ // The sign nybble is in the last byte:
+ gg_assign(sign_location,
+ gg_add(base,
+ build_int_cst_type(SIZE_T,
+ refer.field->data.capacity()-1)));
+ tree d_and_q;
+ // Pick up the binary value of the first capacity-1 places
+ d_and_q_packed(d_and_q,
+ base,
+ refer.field->data.capacity()-1);
+ // Multiply that by 10
+ tree d_and_q_10 = gg_multiply(gg_cast(source_type, d_and_q),
+ build_int_cst_type(source_type, 10));
+ // Pick up the final digit
+ tree final_digit = gg_rshift(gg_indirect(sign_location),
+ build_int_cst_type(SIZE_T, 4));
+ // Add the results together.
+ tree x0f = build_int_cst_type(UCHAR, 0x0F);
+ tree x0d = build_int_cst_type(UCHAR, 0x0D);
+ IF( gg_bitwise_and( gg_indirect(sign_location), x0f), eq_op, x0d )
+ {
+ gg_assign(working,
+ gg_cast(source_type, gg_negate(gg_add(d_and_q_10, final_digit))));
+ }
+ ELSE
+ {
+ gg_assign(working, gg_cast(source_type, gg_add(d_and_q_10, final_digit)));
+ }
+ ENDIF
+
+ value = gg_define_variable(type);
+ gg_assign(value, gg_cast(type, working));
+
+ retval = true;
+ return retval;
+ }
+
+static bool
+binary_from_comp_6(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval = false;
+
+ // This is where we build the actual numeric value of the digits of the
+ // COBOL packed-decimal variable. It is up to the caller to interpret
+ // scaledness and rdigits and so forth.
+ value = gg_define_variable(type);
+
+ tree base = gg_define_variable(UCHAR_P);
+ gg_assign(base,
+ gg_cast(build_pointer_type(UCHAR_P),
+ member(refer.field->var_decl_node,"data")));
+ if( !refer_is_clean(refer) )
+ {
+ gg_assign(base, gg_add(base, refer_offset(refer)));
+ }
+
+ tree d_and_q;
+ d_and_q_packed(d_and_q,
+ base,
+ refer.field->data.capacity());
+ gg_assign(value, gg_cast(type, d_and_q));
+
+ retval = true;
+ return retval;
+ }
+
+bool
+binary_from_FldPacked(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval;
+ if( refer.field->attr & packed_no_sign_e )
+ {
+ retval = binary_from_comp_6(value, refer, type);
+ }
+ else
+ {
+ retval = binary_from_comp_3(value, refer, type);
+ }
+ return retval;
+ }
+
+static
+bool binary_from_FldFloat(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ tree source_type = tree_type_from_field(refer.field);
+
+ if( !type )
+ {
+ type = source_type;
+ }
+
+ value = gg_define_variable(type);
+
+ tree base;
+ get_location(base, refer);
+
+ gg_assign(value,
+ gg_cast(type,
+ gg_indirect(gg_cast(build_pointer_type(source_type),
+ base))));
+
+ return true;
+ }
+
+bool
+get_binary_value(tree &value, const cbl_refer_t &refer, tree type)
+ {
+ bool retval = false;
+ /* There are other get binary value routines. This one is intended to be the
+ "best in class" version, incorporating everything that's been learned
+ about the process, and incorporating compiler SSA guidelines. */
+
+ if( (refer.field->attr & FIGCONST_MASK) == zero_value_e )
+ {
+ // The ZERO figurative constant is not flagged as signed:
+ if( !type )
+ {
+ type = UINT;
+ }
+ value = gg_define_variable(type);
+ gg_assign(value, gg_cast(type, integer_zero_node));
+ retval = true;
+ }
+ else if( refer.addr_of )
+ {
+ // The case of ADDRESS OF
+ value = gg_define_variable(type);
+ tree base = gg_define_variable(UCHAR_P);
+ gg_assign(base, member(refer.field->var_decl_node, "data"));
+ if( !refer_is_clean(refer) )
+ {
+ gg_assign(base, gg_add(base, refer_offset(refer)));
+ }
+ gg_assign(value, gg_cast(type, base));
+ }
+ else
+ {
+ // We know that the refer is a type that involves an integer binary value.
+ switch(refer.field->type)
+ {
+ case FldNumericBin5:
+ case FldLiteralN:
+ case FldIndex:
+ case FldPointer:
+ retval = binary_from_FldNumericBin5(value, refer, type);
+ break;
+
+ case FldNumericBinary:
+ retval = binary_from_FldNumericBinary(value, refer, type);
+ break;
+
+ case FldNumericDisplay:
+ retval = binary_from_FldNumericDisplay(value, refer, type);
+ break;
+
+ case FldPacked:
+ retval = binary_from_FldPacked(value, refer, type);
+ break;
+
+ case FldFloat:
+ retval = binary_from_FldFloat(value, refer, type);
+ break;
+
+ default:
+ gcc_unreachable();
+ break;
+ }
+ }
+ return retval;
+ }
+
+void
+get_location(tree &retval, const cbl_refer_t &refer)
+ {
+ // This routine looks at a refer and returns a UCHAR_P pointer to the data
+ // of the object.
+ retval = gg_define_variable(UCHAR_P);
+ if( refer_is_super_clean(refer) )
+ {
+ // Working storage, not external, no refmods or subscripts:
+ // gg_assign(retval, member(refer.field->var_decl_node,"data"));
+ tree base = gg_cast(UCHAR_P,
+ gg_get_address(refer.field->data_decl_node));
+ if( refer.field->offset )
+ {
+ tree offset = build_int_cst_type(SIZE_T, refer.field->offset);
+ gg_assign(retval, gg_cast(UCHAR_P, gg_add(base, offset)));
+ }
+ else
+ {
+ gg_assign(retval, base);
+ }
+ }
+ else
+ {
+ // The variable is external, or intermediate_e, or there are subscripts, or
+ // there are refmods. We use the run-time "data", and add the run-time
+ // offset to it.
+ gg_assign(retval,
+ gg_add(member(refer.field->var_decl_node,"data"),
+ refer_offset(refer)));
+ }
+ }
+
+void
+get_length(tree &retval, const cbl_refer_t &refer)
+ {
+ if(refer_is_clean(refer))
+ {
+ if( refer.field->attr & any_length_e
+ || refer.field->attr & intermediate_e )
+ {
+ // We need the run-time capacity.
+ retval = member(refer.field->var_decl_node, "capacity");
+ }
+ else
+ {
+ // We can use the compile-time capacity.
+ retval = build_int_cst_type(SIZE_T, refer.field->data.capacity());
+ }
+ }
+ else
+ {
+ retval = refer_size_source(refer);
+ }
+ }
extern tree var_decl_call_parameter_count; // int __gg__call_parameter_count
extern tree var_decl_call_parameter_lengths; // size_t *var_decl_call_parameter_lengths
-extern tree var_decl_arithmetic_rounds_size; // size_t __gg__arithmetic_rounds_size;
-extern tree var_decl_arithmetic_rounds; // int* __gg__arithmetic_rounds;
-extern tree var_decl_fourplet_flags_size; // size_t __gg__fourplet_flags_size;
-extern tree var_decl_fourplet_flags; // int* __gg__fourplet_flags;
-
-extern tree var_decl_treeplet_1f; // cblc_field_pp_type_node , "__gg__treeplet_1f"
-extern tree var_decl_treeplet_1o; // SIZE_T_P , "__gg__treeplet_1o"
-extern tree var_decl_treeplet_1s; // SIZE_T_P , "__gg__treeplet_1s"
-extern tree var_decl_treeplet_2f; // cblc_field_pp_type_node , "__gg__treeplet_2f"
-extern tree var_decl_treeplet_2o; // SIZE_T_P , "__gg__treeplet_2o"
-extern tree var_decl_treeplet_2s; // SIZE_T_P , "__gg__treeplet_2s"
-extern tree var_decl_treeplet_3f; // cblc_field_pp_type_node , "__gg__treeplet_3f"
-extern tree var_decl_treeplet_3o; // SIZE_T_P , "__gg__treeplet_3o"
-extern tree var_decl_treeplet_3s; // SIZE_T_P , "__gg__treeplet_3s"
-extern tree var_decl_treeplet_4f; // cblc_field_pp_type_node , "__gg__treeplet_4f"
-extern tree var_decl_treeplet_4o; // SIZE_T_P , "__gg__treeplet_4o"
-extern tree var_decl_treeplet_4s; // SIZE_T_P , "__gg__treeplet_4s"
extern tree var_decl_nop; // int __gg__nop
extern tree var_decl_main_called; // int __gg__main_called
extern tree var_decl_entry_index; // void* __gg__entry_index
int get_scaled_rdigits(cbl_field_t *field);
int get_scaled_digits(cbl_field_t *field);
-tree tree_type_from_digits(size_t digits, int signable);
-tree tree_type_from_size(size_t bytes, int signable);
+tree tree_type_from_digits(size_t digits, uint64_t signable);
+tree tree_type_from_size(size_t bytes, uint64_t signable);
void get_binary_value( tree value,
tree rdigits,
char *get_literal_string(cbl_field_t *field);
bool refer_is_clean(const cbl_refer_t &refer);
+bool refer_is_super_clean(const cbl_refer_t &refer);
+bool refer_is_working_storage(const cbl_refer_t &refer);
-tree refer_offset(const cbl_refer_t &refer,
- int *pflags=NULL);
+tree refer_offset(const cbl_refer_t &refer, int *pflags=NULL);
tree refer_size_source(const cbl_refer_t &refer);
tree refer_size_dest(const cbl_refer_t &refer);
tree qualified_data_location(const cbl_refer_t &refer);
-void build_array_of_treeplets( int ngroup,
- size_t N,
+tree build_array_of_referlets( size_t N,
cbl_refer_t *refers);
-void build_array_of_fourplets( int ngroup,
- size_t N,
- cbl_refer_t *refers);
+tree build_array_of_refers(size_t N,
+ cbl_refer_t *refers);
void get_depending_on_value_from_odo(tree retval, cbl_field_t *odo);
uint64_t get_time_nanoseconds();
bool is_pure_integer(const cbl_field_t *field);
+tree tree_type_from_field(const cbl_field_t *field);
+
+bool get_binary_value(tree &value,
+ const cbl_refer_t &refer,
+ tree type = NULL_TREE);
+void get_location(tree &retval, const cbl_refer_t &refer);
+void get_length(tree &retval, const cbl_refer_t &refer);
+
#endif
%printer { fprintf(yyo, "%c %s",
$$.invert? '!' : ' ',
$$.term? name_of($$.term->field) : "<none>"); } <rel_term_t>
+%printer { fprintf(yyo, "%s", $$->dbgstr()); } <log_expr_t>
%printer { fprintf(yyo, "%s (token %d)", keyword_str($$), $$ ); } relop
%printer { fprintf(yyo, "'%s'", $$? $$ : "" ); } NAME <string>
* as specified in the rules."
*/
$$ = cbl_refer_t::empty();
- if( dialect_ibm() ) {
- static auto rt = cbl_field_of(symbol_at(return_code_register()));
- static cbl_refer_t status(rt);
- $$ = &status;
- }
const auto prog = cbl_label_of(symbol_at(current_program_index()));
if( prog->returning ) {
$$ = new cbl_refer_t( cbl_field_of(symbol_at(prog->returning)) );
dbgmsg("%s:%d: logic error: %s is not a truth value",
__func__, __LINE__, name_of(rhs));
} else {
- parser_logop( andable, andable, and_op, rhs );
+ auto cond = new_temporary(FldConditional);
+ parser_logop( cond, andable, and_op, rhs );
+ andable = cond;
}
return this;
}
if( ! orable ) {
orable = andable;
} else {
- parser_logop( orable, orable, or_op, andable );
+ auto cond = new_temporary(FldConditional);
+ parser_logop( cond, orable, or_op, andable );
+ orable = cond;
}
andable = rhs;
return this;
cbl_field_t * resolve() {
assert(andable);
if( orable ) {
- parser_logop( andable, orable, or_op, andable );
+ auto cond = new_temporary(FldConditional);
+ parser_logop( cond, orable, or_op, andable );
+ andable = cond;
orable = NULL;
}
assert(!orable);
bool unresolved() const {
return orable != NULL;
}
+
+ const char * dbgstr() const {
+ static char msg[64 * 2 + 16];
+ int pos = 0;
+ if( andable ) {
+ pos = sprintf(msg, "%s", andable->name);
+ assert(0 < pos);
+ }
+ if( orable ) {
+ pos = sprintf(msg + pos, " or %s", orable->name);
+ }
+ return msg;
+ }
};
static void ast_enter_section( cbl_label_t * );
// tokens.h is generated as needed from parse.h with tokens.h.gen
current_tokens_t::tokenset_t::tokenset_t() {
#include "token_names.h"
-};
+}
tree cblc_file_type_node;
tree cblc_file_p_type_node;
tree cblc_goto_type_node;
+tree cblc_referlet_type_node;
+tree cblc_refer_type_node;
// The following functions return type_decl nodes for the various structures
// When doing FILE I/O, you need the cblc_file_t structure
/*
-typedef struct cblc_file_t*
+typedef struct cblc_file_t
{
char *name; // This is the name of the structure; might be the name of an environment variable
uint64_t symbol_index; // The symbol table index of the related cbl_file_t structure
return retval;
}
+static tree
+create_referlet_t()
+ {
+ /*
+ typedef struct cblc_referlet_t
+ {
+ cblc_field_t *field;
+ size_t offset;
+ size_t size;
+ } cblc_referlet_t;
+ */
+ tree retval = gg_get_structure_type_decl("cblc_referlet_t",
+ cblc_field_p_type_node, "field",
+ SIZE_T, "offset",
+ SIZE_T, "size",
+ NULL_TREE);
+ return retval;
+ }
+
+static tree
+create_refer_t()
+ {
+ /*
+ typedef struct cblc_refer_t
+ {
+ cblc_field_t *field;
+ size_t offset;
+ size_t size;
+ int flags;
+ } cblc_refer_t;
+ */
+ tree retval = gg_get_structure_type_decl("cblc_refer_t",
+ cblc_field_p_type_node, "field",
+ SIZE_T, "offset",
+ SIZE_T, "size",
+ INT, "flags",
+ NULL_TREE);
+ return retval;
+ }
+
void
create_our_type_nodes()
{
cblc_field_pp_type_node = build_pointer_type(cblc_field_p_type_node);
cblc_file_type_node = create_cblc_file_t();
cblc_file_p_type_node = build_pointer_type(cblc_file_type_node);
+ cblc_referlet_type_node = create_referlet_t();
+ cblc_refer_type_node = create_refer_t();
}
}
extern void member(tree var, const char *member_name, tree value);
extern void member(cbl_field_t *var, const char *member_name, tree value);
+extern void member2(tree var, const char *member_name, const char *submember, int value);
extern void member2(tree var, const char *member_name, const char *submember, int value);
extern void member2(tree var, const char *member_name, const char *submember, tree value);
extern void member3(tree var, const char *mem, const char *sub1, const char *sub2, tree value);
extern GTY(()) tree cblc_file_type_node;
extern GTY(()) tree cblc_file_p_type_node;
extern GTY(()) tree cblc_goto_type_node;
+extern GTY(()) tree cblc_referlet_type_node;
+extern GTY(()) tree cblc_refer_type_node;
extern void create_our_type_nodes();
bool yn(p.second);
assert(yn);
return *p.first;
-};
+}
cbl_field_t *
temporaries_t::reuse( cbl_field_type_t type ) {
tree right; // This is a long
tree middle; // This is our copy of the index, so we only need to write
// it and never read it.
- tree compare_result; // This is an int, and avoids
struct cbl_field_t *index;
bool first_when;
};
cbl_field_t *new_alphanumeric(const cbl_name_t name=nullptr,
cbl_encoding_t encoding=no_encoding_e );
-
// ENABLE_HIJACKING allows for code generation to be "hijacked" when the
-// program-id is "dubner" or "hijack". See the mainline code in genapi.cc.
-
-// To enable hijacking, use
-//
-// make ... CPPFLAGS=-DENABLE_HIJACKING
-//
-// taking care to recaptulate whatever CPPFLAGS were set when configure was
-// run.
+// program-id is "dubner_h" or "hijack_h". See the mainline code in genapi.cc.
+
+#define ENABLE_HIJACKING
#endif
case FldSwitch:
gcc_unreachable();
case FldAlphanumeric:
- // MF allows PIC X(n) to have USAGE COMP-[5x]
+ // MF and GNU allow pic x usage comp-5.
+ // Dialect enforcement in that case is in field_binary_usage.
if( candidate != FldNumericBin5 ) return false;
- if( ! (dialect_mf() && field->has_attr(all_x_e)) ) {
- return false;
- }
__attribute__((fallthrough));
case FldFloat:
case FldNumericBin5:
END-IF.
MOVE SRC1 TO DST2.
- IF DST1 not = 11.55
+ IF DST2 not = 11.55
DISPLAY 'error: move/compare FLOAT-LONG to FLOAT-LONG failed ' DST2
END-DISPLAY
END-IF.
working-storage section.
01 msg pic x(32).
procedure division.
- move "This is foo" to msg
+ move "This is FOO" to msg
display "About to call FOO"
- call "foo" using msg
- move "This is bar" to msg
+ call "FOO" using msg
+ move "This is BAR" to msg
display "About to call BAR"
- call "bar" using msg
- move "This is foo2" to msg
+ call "BAR" using msg
+ move "This is FOO2" to msg
display "About to call FOO again"
- call "foo" using msg
+ call "FOO" using msg
+
+ move "This is BAZZ" to msg
+ display "About to call BAZZ"
+ call "BAZZ" using msg
+ move "This is FAZZ" to msg
+ display "About to call FAZZ"
+ call "FAZZ" using msg
+ move "This is baz2" to msg
+ display "About to call BAZZ again"
+ call "BAZZ" using msg
+
goback.
end program prog.
identification division.
- program-id. foo.
+ program-id. FOO.
data division.
linkage section.
01 msg pic x(32).
procedure division using msg.
- display " entry point foo: " function trim (msg)
- entry "bar"
- display " entry point bar: " function trim (msg)
+ display " entry point FOO: " function trim (msg)
+ entry "BAR"
+ display " entry point BAR: " function trim (msg)
+ goback.
+ entry "BAZZ"
+ display " entry point BAZZ: " function trim (msg)
+ entry "FAZZ"
+ display " entry point FAZZ: " function trim (msg)
goback.
- end program foo.
+ end program FOO.
About to call FOO
- entry point foo: This is foo
- entry point bar: This is foo
+ entry point FOO: This is FOO
+ entry point BAR: This is FOO
About to call BAR
- entry point bar: This is bar
+ entry point BAR: This is BAR
About to call FOO again
- entry point foo: This is foo2
- entry point bar: This is foo2
+ entry point FOO: This is FOO2
+ entry point BAR: This is FOO2
+About to call BAZZ
+ entry point BAZZ: This is BAZZ
+ entry point FAZZ: This is BAZZ
+About to call FAZZ
+ entry point FAZZ: This is FAZZ
+About to call BAZZ again
+ entry point BAZZ: This is baz2
+ entry point FAZZ: This is baz2
*> { dg-do run }
- *> { dg-set-target-env-var TZ UTC0 }
+ *> { dg-set-target-env-var TZ "UTC0" }
identification division.
program-id. testy.
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/FUNCTION_TRIM_with_NATIONAL_characters.out" }
+ identification division.
+ program-id. prog.
+ environment division.
+ configuration section.
+ special-names.
+ locale cp1252 is "cp1252"
+ locale utf16le is "utf16le".
+ Object-Computer.
+ linux
+ Character Classification
+ for Alphanumeric is cp1252
+ for National is utf16le.
+ data division.
+ working-storage section.
+ 01 from-8 pic x(10) value " 8888 ".
+ 01 from-16 pic n(10) value N" 1616 ".
+ 01 to-8 pic x(10).
+ 01 to-16 pic n(10).
+ procedure division.
+ display "Alphanumeric literal " function byte-length(from-8)
+ display """" from-8 """"
+ display """" function trim(from-8 leading ) """"
+ display """" function trim(from-8 trailing ) """"
+ display """" function trim(from-8) """"
+
+ display "National literal " function byte-length(from-16)
+ display """" from-16 """"
+ display """" function trim(from-16 leading )""""
+ display """" function trim(from-16 trailing )""""
+ display """" function trim(from-16)""""
+
+ display "Alphanumeric data item " function byte-length(from-8)
+ move from-8 to to-8
+ display """" to-8 """"
+ move function trim(from-8 leading) to to-8
+ display """" to-8 """"
+ move function trim(from-8 trailing) to to-8
+ display """" to-8 """"
+ move function trim(from-8) to to-8
+ display """" to-8 """"
+
+ display "National data item " function byte-length(from-16)
+ move from-16 to to-16
+ display """" to-16 """"
+ move function trim(from-16 leading) to to-16
+ display """" to-16 """"
+ move function trim(from-16 trailing) to to-16
+ display """" to-16 """"
+ move function trim(from-16) to to-16
+ display """" to-16 """"
+ goback.
+ end program prog.
+
--- /dev/null
+Alphanumeric literal 10
+" 8888 "
+"8888 "
+" 8888"
+"8888"
+National literal 20
+" 1616 "
+"1616 "
+" 1616"
+"1616"
+Alphanumeric data item 10
+" 8888 "
+"8888 "
+" 8888 "
+"8888 "
+National data item 20
+" 1616 "
+"1616 "
+" 1616 "
+"1616 "
+
END-IF.
IF FUNCTION ABS(000.0) NOT EQUAL TO ZERO
MOVE 1 TO RETURN-CODE
- DISPLAY "FUNCTION ABS(-000.0) FAILS."
+ DISPLAY "FUNCTION ABS(000.0) FAILS."
END-IF.
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/Large_PIC_10000000_.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 uppered pic x(10000000) value all "A".
+ 01 lowered pic x(10000000).
+ procedure division.
+ move function lower-case(uppered) to lowered
+ move 'X' to lowered(1:1)
+ move 'Z' to lowered(10000000:1)
+ display lowered(1:10)
+ display lowered(9999991:10)
+ goback.
+ end program prog.
+
--- /dev/null
+Xaaaaaaaaa
+aaaaaaaaaZ
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/Nested_PERFORM.out" }
+
+ IDENTIFICATION DIVISION.
+ PROGRAM-ID. prog.
+ PROCEDURE DIVISION.
+ PERFORM 2 TIMES
+ DISPLAY "X" NO ADVANCING
+ END-DISPLAY
+ PERFORM 2 TIMES
+ DISPLAY "Y" NO ADVANCING
+ END-DISPLAY
+ END-PERFORM
+ END-PERFORM.
+ STOP RUN.
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/Overlapping_MOVE.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 structure.
+ 05 field1 pic x(5).
+ 05 field2 pic x(10).
+ procedure division.
+ move "Hallo" to field1.
+ move "1234567890" to field2.
+ *> The ISO specification says specifically that when an overlapping
+ *> move takes place in a single data descriptor, that it behaves
+ *> like a memmove(3), and not a memcopy(3):
+ move field2 to structure.
+ display """" structure """"
+ if field1 not = "12345"
+ display "error:1: " field1
+ end-display
+ end-if
+ if field2 not = "67890 "
+ display "error:2: " field2
+ end-display
+ end-if
+ goback.
+ end program prog.
+
--- /dev/null
+"1234567890 "
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/PERFORM_TIMES_subscripted.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 idx pic 9.
+ 01 cnt-tab.
+ 05 cnt-val occurs 3 times pic 9.
+ procedure division.
+ move 1 to cnt-val (1)
+ move 2 to cnt-val (2)
+ move 3 to cnt-val (3)
+ perform varying idx from 1 by 1 until idx > 3
+ perform cnt-val (idx) times
+ display idx
+ end-perform
+ end-perform
+ goback.
+ end program prog.
+
--- /dev/null
+1
+2
+2
+3
+3
+3
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/PERFORM_VARYING_BY_-0.2.out" }
+
+ IDENTIFICATION DIVISION.
+ PROGRAM-ID. prog.
+ DATA DIVISION.
+ WORKING-STORAGE SECTION.
+ 77 X PIC 9v9.
+ PROCEDURE DIVISION.
+ PERFORM VARYING X FROM 0.8 BY -0.2
+ UNTIL X < 0.4
+ DISPLAY "X" NO ADVANCING
+ END-DISPLAY
+ END-PERFORM.
+ IF X NOT = 0.2
+ DISPLAY "WRONG X: " X END-DISPLAY
+ END-IF
+ STOP RUN.
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/REDEFINES__chained.out" }
+ IDENTIFICATION DIVISION.
+ PROGRAM-ID. chained_REDEFINES.
+ DATA DIVISION.
+ WORKING-STORAGE SECTION.
+ 01 REC.
+ 10 ZIP-9 PIC 9(9).
+ 10 ZIP-RED REDEFINES ZIP-9.
+ 12 ZIP-5 PIC 9(5).
+ 12 ZIP-PLUS-4 PIC 9(4).
+ 10 POSTAL-CORRECT-IND REDEFINES ZIP-9.
+ 12 FILLER PIC X(8).
+ 12 POST-CORRT-IND PIC X(1).
+ PROCEDURE DIVISION.
+ MOVE 123456789 TO ZIP-9
+ DISPLAY """" ZIP-9 """"
+ IF ZIP-5 NOT = 12345
+ DISPLAY "ZIP-5 wrong: " ZIP-5
+ END-DISPLAY
+ END-IF
+ DISPLAY """" ZIP-PLUS-4 """"
+ IF ZIP-PLUS-4 NOT = 6789
+ DISPLAY "ZIP-PLUS-4 wrong: " ZIP-PLUS-4
+ END-DISPLAY
+ END-IF
+ MOVE "X" TO POST-CORRT-IND
+ DISPLAY """" ZIP-9 """"
+ DISPLAY """" POSTAL-CORRECT-IND """"
+ DISPLAY """" POST-CORRT-IND """"
+ IF POST-CORRT-IND NOT = "X"
+ DISPLAY "POST-CORRT-IND wrong: " POST-CORRT-IND
+ END-DISPLAY
+ END-IF
+ STOP RUN.
+
--- /dev/null
+"123456789"
+"6789"
+"123456780"
+"12345678X"
+"X"
+
*> { dg-do run }
*> { dg-xfail-run-if "" { *-*-* } }
+ *> { dg-options "-dialect ibm" }
IDENTIFICATION DIVISION.
PROGRAM-ID. prog.
--- /dev/null
+ *> { dg-do run }
+ *> { dg-options "-dialect ibm" }
+ *> { dg-output-file "group2/RETURN-CODE_with_INITIAL_and_RECURSIVE.out" }
+ identification division.
+ program-id. prog.
+ procedure division.
+ display "prog starting return code should be +0000: " return-code
+ call "prog-a"
+ display "prog first return code from prog-a should be +0123: " return-code
+ call "prog-a"
+ display "prog second return code from prog-a should be +0246: " return-code
+
+ call "prog-i"
+ display "prog first return code from prog-i should be +0321: " return-code
+ call "prog-i"
+ display "prog second return code from prog-i should be +0321: " return-code
+
+ call "prog-r"
+ display "prog first return code from prog-r should be -0123: " return-code
+ call "prog-r"
+ display "prog second return code from prog-r should be -0123: " return-code
+
+ move zero to return-code
+ goback.
+ end program prog.
+
+ identification division.
+ program-id. prog-a.
+ procedure division.
+ add 123 to return-code
+ goback.
+ end program prog-a.
+
+ identification division.
+ program-id. prog-i INITIAL.
+ procedure division.
+ add 321 to return-code
+ goback.
+ end program prog-i.
+
+ identification division.
+ program-id. prog-r RECURSIVE.
+ procedure division.
+ add -123 to return-code
+ goback.
+ end program prog-r.
+
--- /dev/null
+prog starting return code should be +0000: +0000
+prog first return code from prog-a should be +0123: +0123
+prog second return code from prog-a should be +0246: +0246
+prog first return code from prog-i should be +0321: +0321
+prog second return code from prog-i should be +0321: +0321
+prog first return code from prog-r should be -0123: -0123
+prog second return code from prog-r should be -0123: -0123
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/Sanity_check_for_ENTRY.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 foo pic x(12).
+ procedure division.
+ loop.
+ go to dispatch.
+ dispatch.
+ go to pass_1.
+ pass_1.
+ display "I am the first pass"
+ alter dispatch to pass_2.
+ go to loop.
+ pass_2.
+ display "I am the second pass"
+ alter dispatch to pass_3.
+ go to loop.
+ pass_3.
+ display "I am the third and final pass"
+ goback.
+ end program prog.
+
--- /dev/null
+I am the first pass
+I am the second pass
+I am the third and final pass
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-options "-dialect mf" }
+ *> { dg-output-file "group2/Simple_COMP-X.out" }
+ identification division.
+ program-id. compx.
+ data division.
+ working-storage section.
+ 01 byte.
+ 02 byte-val pic x(1) comp-x.
+ 01 short.
+ 02 short-val pic x(2) comp-x.
+ 01 long.
+ 02 long-val pic x(4) comp-x.
+ 01 longlong.
+ 02 longlong-val pic x(8) comp-x.
+ 01 sixteenbytes.
+ 02 sixteenbytes-val pic x(16) comp-x.
+ procedure division.
+ move high-values to byte short long longlong sixteenbytes
+ display function hex-of(byte)
+ display function hex-of(short)
+ display function hex-of(long)
+ display function hex-of(longlong)
+ display function hex-of(sixteenbytes)
+ display "byte-val is: " byte-val.
+ display "short-val is: " short-val.
+ display "long-val is: " long-val.
+ display "longlong-val is: " longlong-val.
+ display "sixteenbytes-val is: " sixteenbytes-val.
+ goback.
+ end program compx.
+
--- /dev/null
+FF
+FFFF
+FFFFFFFF
+FFFFFFFFFFFFFFFF
+FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+byte-val is: 255
+short-val is: 65535
+long-val is: 4294967295
+longlong-val is: 8446744073709551615
+sixteenbytes-val is: 340282366920938463463374607431768211455
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/compare_alpha_to_all__literal_.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 var1 pic x(64) value all "Bob".
+ procedure division.
+ if var1 equal all "Bob"
+ display "It's all Bob."
+ else
+ display "It's not Bob."
+ end-if
+ goback.
+ end program prog.
+
--- /dev/null
+It's all Bob.
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/compare_national_to_display.out" }
+ identification division.
+ program-id. prog.
+ environment division.
+ configuration section.
+ special-names.
+ locale greek is "cyrillic"
+ locale unicode is "utf16le".
+ object-computer.
+ xerox-parc-star
+ character classification
+ for alphanumeric is greek
+ for national is unicode.
+ data division.
+ working-storage section.
+ 01 aaa pic x(16).
+ 01 bbb pic n(16).
+ procedure division.
+ move "aaa" to aaa
+ move n"aaa" to bbb
+ if aaa equal bbb
+ display "1 - correct"
+ else
+ display "1 -- bad"
+ end-if
+ if bbb equal aaa
+ display "2 - correct"
+ else
+ display "2 -- bad"
+ end-if
+ move "aaa" to aaa
+ move n"bbb" to bbb
+ if aaa < bbb
+ display "3 - correct"
+ else
+ display "3 -- bad"
+ end-if
+ if bbb > aaa
+ display "4 - correct"
+ else
+ display "4 -- bad"
+ end-if
+ move "bbb" to aaa
+ move n"aaa" to bbb
+ if aaa > bbb
+ display "5 - correct"
+ else
+ display "5 -- bad"
+ end-if
+ if bbb < aaa
+ display "6 - correct"
+ else
+ display "6 -- bad"
+ end-if
+ goback.
+ end program prog.
+
--- /dev/null
+1 - correct
+2 - correct
+3 - correct
+4 - correct
+5 - correct
+6 - correct
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/comprensive_compare_comp-1_comp-5.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 aaa comp-1.
+ 01 bbb pic S999 comp-5.
+ 01 known pic xxxx.
+ 01 result pic xxxx.
+ procedure division.
+ move -1 to aaa move 1 to bbb move ".lt." to known
+ perform checker.
+ move 1 to aaa move 1 to bbb move ".eq." to known
+ perform checker.
+ move 1 to aaa move -1 to bbb move ".gt." to known
+ perform checker.
+ goback.
+ checker.
+ display "checking " space aaa space known space bbb
+ perform lt
+ perform le
+ perform eq
+ perform ge
+ perform gt
+ perform ne
+ continue.
+ lt.
+ display " .lt. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa < bbb move "Good" to result
+ else move "BAD!" to result end-if
+ when known equal ".eq." if aaa < bbb move "BAD!" to result
+ else move "Good" to result end-if
+ when known equal ".gt." if aaa < bbb move "BAD!" to result
+ else move "Good" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ le.
+ display " .le. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa <= bbb
+ move "Good" to result else move "BAD!" to result end-if
+ when known equal ".eq." if aaa <= bbb
+ move "Good" to result else move "BAD!" to result end-if
+ when known equal ".gt." if aaa <= bbb
+ move "BAD!" to result else move "Good" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ eq.
+ display " .eq. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa = bbb
+ move "BAD!" to result else move "Good" to result end-if
+ when known equal ".eq." if aaa = bbb
+ move "Good" to result else move "BAD!" to result end-if
+ when known equal ".gt." if aaa = bbb
+ move "BAD!" to result else move "Good" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ ge.
+ display " .ge. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa >= bbb
+ move "BAD!" to result else move "Good" to result end-if
+ when known equal ".eq." if aaa >= bbb
+ move "Good" to result else move "BAD!" to result end-if
+ when known equal ".gt." if aaa >= bbb
+ move "Good" to result else move "BAD!" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ gt.
+ display " .gt. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa > bbb
+ move "BAD!" to result else move "Good" to result end-if
+ when known equal ".eq." if aaa > bbb
+ move "BAD!" to result else move "Good" to result end-if
+ when known equal ".gt." if aaa > bbb
+ move "Good" to result else move "BAD!" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ ne.
+ display " .ne. " with no advancing
+ move "xxxx" to result
+ evaluate true
+ when known equal ".lt." if aaa <> bbb
+ move "Good" to result else move "BAD!" to result end-if
+ when known equal ".eq." if aaa <> bbb
+ move "BAD!" to result else move "Good" to result end-if
+ when known equal ".gt." if aaa <> bbb
+ move "Good" to result else move "BAD!" to result end-if
+ end-evaluate
+ display space result
+ continue.
+ end program prog.
+
--- /dev/null
+checking -1 .lt. +001
+ .lt. Good
+ .le. Good
+ .eq. Good
+ .ge. Good
+ .gt. Good
+ .ne. Good
+checking 1 .eq. +001
+ .lt. Good
+ .le. Good
+ .eq. Good
+ .ge. Good
+ .gt. Good
+ .ne. Good
+checking 1 .gt. -001
+ .lt. Good
+ .le. Good
+ .eq. Good
+ .ge. Good
+ .gt. Good
+ .ne. Good
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-options "-dialect ibm" }
+ *> { dg-output-file "group2/refmod_with_nested_parentheses.out" }
+ IDENTIFICATION DIVISION.
+ PROGRAM-ID. refmod_nested_paren_expr.
+ DATA DIVISION.
+ WORKING-STORAGE SECTION.
+ 01 TXT PIC X(10) VALUE 'ABCDEFGHIJ'.
+ 01 N PIC 9 VALUE 2.
+ 01 OUT-CH PIC X.
+ 01 stride PIC 9.
+ PROCEDURE DIVISION.
+ *> LENGTH OF is bytes, so we need stride to handle UTF-16
+ move length of out-ch to stride
+ display TXT(((LENGTH OF TXT / (2*stride)) - (N / 2)):1)
+ MOVE TXT(((LENGTH OF TXT / (2*stride)) - (N / 2)):1)
+ TO OUT-CH
+ display OUT-CH
+ GOBACK.
+
--- /dev/null
+ *> { dg-do run }
+ *> { dg-output-file "group2/signed_unsigned_compare.out" }
+ identification division.
+ program-id. prog.
+ data division.
+ working-storage section.
+ 01 aaa binary-long signed value -1.
+ 01 bbb binary-long unsigned value 1.
+ procedure division.
+ if aaa < bbb
+ display "-1 is properly .LT. than +1"
+ else
+ display "-1 is IMPROPERLY .GE. than +1"
+ end-if
+ goback.
+ end program prog.
+
--- /dev/null
+-1 is properly .LT. than +1
+
switch(figconst)
{
case normal_value_e :
- abort();
+ // Just leave it at zero
break;
case low_value_e :
const_char = low_value_character();
FldPointer,
};
+/* In some places, I use SUPERTYPE for things like MOVES and COMPARES to
+ avoid lots of conditionals or complex multi-level switch() statements. */
+
+#define SUPERTYPE(a, b) ((static_cast<int>(a)<<5)+(static_cast<int>(b)))
+
/* BINARY, COMP, COMPUTATIONAL, COMP-4, COMPUTATIONAL-4 are the same:
* Storage, by default, is big-endian.
case file_mode_any_e: return "file_mode_any_e";
}
return "???";
-};
+}
enum module_type_t {
module_activating_e,
int alphabet; // Same as cbl_field_t::codeset::language
} cblc_field_t;
+typedef struct cblc_referlet_t
+ {
+ cblc_field_t *field;
+ size_t offset;
+ size_t size;
+ } cblc_referlet_t;
+
+typedef struct cblc_refer_t
+ {
+ cblc_field_t *field;
+ size_t offset;
+ size_t size;
+ int flags;
+ } cblc_refer_t;
+
/*
* Implementation details
*/
int alphabet; // Actually cbl_encoding_t
} cblc_file_t;
-
-/* In various arithmetic routines implemented in libgcobol, it is oftent the
- case that complicates lists of variables need to be conveyed. For example,
- "ADD A B C D GIVING E" and "ADD A TO B C D" are valid instructions.
-
- These treeplets (triplets of trees) were created to handle that. */
-
-extern cblc_field_t ** __gg__treeplet_1f;
-extern size_t * __gg__treeplet_1o;
-extern size_t * __gg__treeplet_1s;
-extern cblc_field_t ** __gg__treeplet_2f;
-extern size_t * __gg__treeplet_2o;
-extern size_t * __gg__treeplet_2s;
-extern cblc_field_t ** __gg__treeplet_3f;
-extern size_t * __gg__treeplet_3o;
-extern size_t * __gg__treeplet_3s;
-extern cblc_field_t ** __gg__treeplet_4f;
-extern size_t * __gg__treeplet_4o;
-extern size_t * __gg__treeplet_4s;
-
-extern int * __gg__fourplet_flags;
-
#endif
void
__gg__pow( cbl_arith_format_t,
size_t,
+ const cblc_referlet_t *A,
size_t,
+ const cblc_referlet_t *B,
size_t,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
- cblc_field_t **B = __gg__treeplet_2f;
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
- GCOB_FP128 avalue = __gg__float128_from_qualified_field(A[0], A_o[0], A_s[0]);
- GCOB_FP128 bvalue = __gg__float128_from_qualified_field(B[0], B_o[0], B_s[0]);
+ GCOB_FP128 avalue = __gg__float128_from_qualified_field(A[0].field, A[0].offset, A[0].size);
+ GCOB_FP128 bvalue = __gg__float128_from_qualified_field(B[0].field, B[0].offset, B[0].size);
GCOB_FP128 tgt_value;
if( avalue == 0 && bvalue == 0 )
}
if( !(*compute_error & compute_error_exp_minus_by_frac) )
{
- *compute_error |= conditional_stash(C[0],
- C_o[0],
- C_s[0],
+ *compute_error |= conditional_stash(C[0].field,
+ C[0].offset,
+ C[0].size,
(on_error_flag & ON_SIZE_ERROR),
tgt_value,
*rounded);
void
__gg__add_fixed_phase1( cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *AA,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
const cbl_round_t *,
int ,
int *compute_error
// The result goes into the temporary phase1_result.
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
-
// Let us prime the pump with the first value of A[]
- get_int256_from_qualified_field(phase1_result, phase1_rdigits, A[0], A_o[0], A_s[0]);
+ get_int256_from_qualified_field(phase1_result, phase1_rdigits, AA[0].field, AA[0].offset, AA[0].size);
// We now go into a loop adding each of the A[] values to phase1_result:
{
int temp_rdigits;
int256 temp = {};
- get_int256_from_qualified_field(temp, temp_rdigits, A[i], A_o[i], A_s[i]);
+ get_int256_from_qualified_field(temp, temp_rdigits, AA[i].field, AA[i].offset, AA[i].size);
// We have to scale the one with fewer rdigits to match the one with greater
// rdigits:
void
__gg__addf1_fixed_phase2( cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
// This is the assignment phase of an ADD Format 1
// We take phase1_result and accumulate it into C
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
- if( C[0]->type == FldFloat)
+ if( C[0].field->type == FldFloat)
{
// The target we need to accumulate into is a floating-point number, so we
// need to convert our fixed-point intermediate into floating point and
value_a /= __gg__power_of_ten(phase1_rdigits);
// Pick up the target
- GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[0].field,
+ C[0].offset,
+ C[0].size);
value_a += value_b;
// At this point, we assign running_sum to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field,
+ C[0].offset,
+ C[0].size,
on_size_error,
value_a,
*rounded++);
int256 value_b = {};
int rdigits_b;
- get_int256_from_qualified_field(value_b, rdigits_b, C[0], C_o[0], C_s[0]);
+ get_int256_from_qualified_field(value_b,
+ rdigits_b,
+ C[0].field,
+ C[0].offset,
+ C[0].size);
- // We have to scale the one with fewer rdigits to match the one with greater
- // rdigits:
+ // We have to scale the one with fewer rdigits to match the one with
+ // greater rdigits:
if( rdigits_a > rdigits_b )
{
scale_int256_by_digits(value_b, rdigits_a - rdigits_b);
}
// At this point, we assign running_sum to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field,
+ C[0].offset,
+ C[0].size,
on_size_error,
value_a.i128[0],
rdigits_a,
void
__gg__fixed_phase2_assign_to_c( cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *CC,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
{
// This is the assignment phase of an ADD or SUBTRACT Format 2
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
-
// We take phase1_result and put it into C
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
- if( C[0]->type == FldFloat)
+ if( CC[0].field->type == FldFloat)
{
// The target we need to accumulate into is a floating-point number, so we
// need to convert our fixed-point intermediate into floating point and
GCOB_FP128 value_a = (GCOB_FP128)phase1_result.i128[0];
value_a /= __gg__power_of_ten(phase1_rdigits);
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(CC[0].field, CC[0].offset, CC[0].size,
on_size_error,
value_a,
*rounded++);
*compute_error |= compute_error_overflow;
}
- if( C[0]->type == FldPointer )
+ if( CC[0].field->type == FldPointer )
{
// In case somebody does pointer arithmetic that goes negative, we need
// to make the top 64 bits positive. Otherwise, the conditional stash
}
// At this point, we assign that value to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(CC[0].field, CC[0].offset, CC[0].size,
on_size_error,
value_a.i128[0],
rdigits_a,
void
__gg__add_float_phase1( cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *A,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
const cbl_round_t *,
int ,
int *compute_error
// The result goes into the temporary phase1_result_ffloat.
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
-
// Let us prime the pump with the first value of A[]
- phase1_result_float = __gg__float128_from_qualified_field(A[0], A_o[0], A_s[0]);
+ phase1_result_float = __gg__float128_from_qualified_field(A[0].field, A[0].offset, A[0].size);
// We now go into a loop adding each of the A[] values to phase1_result_flt:
for( size_t i=1; i<nA; i++ )
{
- GCOB_FP128 temp = __gg__float128_from_qualified_field(A[i], A_o[i], A_s[i]);
+ GCOB_FP128 temp = __gg__float128_from_qualified_field(A[i].field, A[i].offset, A[i].size);
phase1_result_float = addition_helper_float(phase1_result_float,
temp,
compute_error);
void
__gg__addf1_float_phase2( cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
// This is the assignment phase of an ADD Format 2
// We take phase1_result and accumulate it into C
- GCOB_FP128 temp = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ GCOB_FP128 temp = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
temp = addition_helper_float(temp, phase1_result_float, compute_error);
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
temp,
*rounded++);
void
__gg__float_phase2_assign_to_c( cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
// This is the assignment phase of an ADD Format 2
// We take phase1_result and put it into C
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
phase1_result_float,
*rounded++);
void
__gg__addf3(cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *A,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
// This is an ADD Format 3. Each A[i] gets accumulated into each C[i]. When
// both are fixed, we do fixed arithmetic. When either is a FldFloat, we
// do floating-point arithmetic.
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
-
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
for(size_t i=0; i<nA; i++)
{
- if( A[i]->type == FldFloat || C[i]->type == FldFloat )
+ if( A[i].field->type == FldFloat || C[i].field->type == FldFloat )
{
- GCOB_FP128 value_a = __gg__float128_from_qualified_field(A[i], A_o[i], A_s[i]);
- GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[i], C_o[i], C_s[i]);
+ GCOB_FP128 value_a = __gg__float128_from_qualified_field(A[i].field, A[i].offset, A[i].size);
+ GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[i].field, C[i].offset, C[i].size);
value_a = addition_helper_float(value_a, value_b, compute_error);
// At this point, we assign the sum to *C.
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
value_a,
*rounded++);
int256 value_b;
int rdigits_b;
- get_int256_from_qualified_field(value_a, rdigits_a, A[i], A_o[i], A_s[i]);
- get_int256_from_qualified_field(value_b, rdigits_b, C[i], C_o[i], C_s[i]);
+ get_int256_from_qualified_field(value_a, rdigits_a, A[i].field, A[i].offset, A[i].size);
+ get_int256_from_qualified_field(value_b, rdigits_b, C[i].field, C[i].offset, C[i].size);
// We have to scale the one with fewer rdigits to match the one with greater
// rdigits:
}
// At this point, we assign the sum to *C.
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
value_a.i128[0],
rdigits_a,
void
__gg__subtractf1_fixed_phase2(cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
// This is the assignment phase of an ADD Format 1
// We take phase1_result and subtrace it from C
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
- if( C[0]->type == FldFloat)
+ if( C[0].field->type == FldFloat)
{
// The target we need to accumulate into is a floating-point number, so we
// need to convert our fixed-point intermediate into floating point and
value_a /= __gg__power_of_ten(phase1_rdigits);
// Pick up the target
- GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
value_b -= value_a;
// At this point, we assign the difference to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
value_b,
*rounded++);
int256 value_b = {};
int rdigits_b;
- get_int256_from_qualified_field(value_b, rdigits_b, C[0], C_o[0], C_s[0]);
+ get_int256_from_qualified_field(value_b, rdigits_b, C[0].field, C[0].offset, C[0].size);
// We have to scale the one with fewer rdigits to match the one with greater
// rdigits:
}
// At this point, we assign running_sum to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
value_b.i128[0],
rdigits_b,
void
__gg__subtractf2_fixed_phase1(cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *AA,
size_t ,
+ const cblc_referlet_t *BB,
size_t ,
+ cblc_referlet_t *,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
{
// This is the calculation phase of a fixed-point SUBTRACT Format 2
- cblc_field_t **B = __gg__treeplet_2f;
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
-
// Add up all the A values
__gg__add_fixed_phase1( not_expected_e ,
nA,
+ AA,
0,
+ NULL,
0,
+ NULL,
rounded,
on_error_flag,
compute_error);
int256 value_b = {};
int rdigits_b;
- get_int256_from_qualified_field(value_b, rdigits_b, B[0], B_o[0], B_s[0]);
+ get_int256_from_qualified_field(value_b, rdigits_b, BB[0].field, BB[0].offset, BB[0].size);
// We have to scale the one with fewer rdigits to match the one with greater
// rdigits:
void
__gg__subtractf1_float_phase2(cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
// This is the assignment phase of an SUBTRACT Format 2
// We take phase1_result and subtract it from C
- GCOB_FP128 temp = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ GCOB_FP128 temp = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
temp = subtraction_helper_float(temp, phase1_result_float, compute_error);
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
temp,
*rounded++);
void
__gg__subtractf2_float_phase1(cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *A,
size_t ,
+ const cblc_referlet_t *B,
size_t ,
+ cblc_referlet_t *,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
{
// This is the calculation phase of a fixed-point SUBTRACT Format 2
- cblc_field_t **B = __gg__treeplet_2f;
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
-
// Add up all the A values
__gg__add_float_phase1( not_expected_e ,
nA,
+ A,
0,
+ NULL,
0,
+ NULL,
rounded,
on_error_flag,
compute_error
);
// Subtract that subtotal from the B value:
- GCOB_FP128 value_b = __gg__float128_from_qualified_field(B[0], B_o[0], B_s[0]);
-
-
+ GCOB_FP128 value_b = __gg__float128_from_qualified_field(B[0].field, B[0].offset, B[0].size);
phase1_result_float = subtraction_helper_float(value_b, phase1_result_float, compute_error);
}
void
__gg__subtractf3( cbl_arith_format_t ,
size_t nA,
+ const cblc_referlet_t *A,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
// This is an ADD Format 3. Each A[i] gets accumulated into each C[i]. Each
// SUBTRACTION is treated separately.
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
for(size_t i=0; i<nA; i++)
{
- if( A[i]->type == FldFloat || C[i]->type == FldFloat)
+ if( A[i].field->type == FldFloat || C[i].field->type == FldFloat)
{
- GCOB_FP128 value_a = __gg__float128_from_qualified_field(A[i], A_o[i], A_s[i]);
- GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[i], C_o[i], C_s[i]);
+ GCOB_FP128 value_a = __gg__float128_from_qualified_field(A[i].field, A[i].offset, A[i].size);
+ GCOB_FP128 value_b = __gg__float128_from_qualified_field(C[i].field, C[i].offset, C[i].size);
value_b = subtraction_helper_float(value_b, value_a, compute_error);
// At this point, we assign the sum to *C.
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
value_b,
*rounded++);
int256 value_b;
int rdigits_b;
- get_int256_from_qualified_field(value_a, rdigits_a, A[i], A_o[i], A_s[i]);
- get_int256_from_qualified_field(value_b, rdigits_b, C[i], C_o[i], C_s[i]);
+ get_int256_from_qualified_field(value_a, rdigits_a, A[i].field, A[i].offset, A[i].size);
+ get_int256_from_qualified_field(value_b, rdigits_b, C[i].field, C[i].offset, C[i].size);
// We have to scale the one with fewer rdigits to match the one with greater
// rdigits:
}
// At this point, we assign the sum to *C.
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
value_b.i128[0],
rdigits_b,
void
__gg__multiplyf1_phase1(cbl_arith_format_t ,
size_t ,
+ const cblc_referlet_t *A,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
const cbl_round_t *,
int ,
int *)
// We are getting just the one value, which we are converting to the necessary
// intermediate form
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
-
- if( A[0]->type == FldFloat )
+ if( A[0].field->type == FldFloat )
{
multiply_intermediate_is_float = true;
- multiply_intermediate_float = __gg__float128_from_qualified_field(A[0],
- A_o[0],
- A_s[0]);
+ multiply_intermediate_float = __gg__float128_from_qualified_field(A[0].field,
+ A[0].offset,
+ A[0].size);
}
else
{
multiply_intermediate_is_float = false;
multiply_intermediate_int128 =
__gg__binary_value_from_qualified_field(&multiply_intermediate_rdigits,
- A[0],
- A_o[0],
- A_s[0]);
+ A[0].field,
+ A[0].offset,
+ A[0].size);
}
}
void
__gg__multiplyf1_phase2(cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
int error_this_time=0;
if( multiply_intermediate_is_float )
{
a_value = multiply_intermediate_float;
- if( C[0]->type == FldFloat )
+ if( C[0].field->type == FldFloat )
{
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
goto float_float;
}
else
{
// float times fixed
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
goto float_float;
}
}
else
{
- if( C[0]->type == FldFloat )
+ if( C[0].field->type == FldFloat )
{
// fixed * float
a_value = (GCOB_FP128) multiply_intermediate_int128;
{
a_value /= (GCOB_FP128)__gg__power_of_ten(multiply_intermediate_rdigits);
}
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field, C[0].offset, C[0].size);
goto float_float;
}
else
int cd_rdigits;
__int128 ab_value = multiply_intermediate_int128;
- __int128 cd_value = __gg__binary_value_from_qualified_field(&cd_rdigits, C[0], C_o[0], C_s[0]);
+ __int128 cd_value = __gg__binary_value_from_qualified_field(&cd_rdigits, C[0].field, C[0].offset, C[0].size);
int256 ABCD;
int rdigits = multiply_intermediate_rdigits + cd_rdigits;
*compute_error |= compute_error_overflow;
}
// At this point, we assign running_sum to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
ABCD.i128[0],
rdigits,
}
else
{
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
a_value,
*rounded);
void
__gg__multiplyf2( cbl_arith_format_t ,
size_t ,
+ const cblc_referlet_t *A,
size_t ,
+ const cblc_referlet_t *B,
size_t nC,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
- cblc_field_t **B = __gg__treeplet_2f;
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
bool got_float = false;
int256 product_fix;
int product_fix_digits;
- if( A[0]->type == FldFloat || B[0]->type == FldFloat )
+ if( A[0].field->type == FldFloat || B[0].field->type == FldFloat )
{
- GCOB_FP128 a_value = __gg__float128_from_qualified_field(A[0], A_o[0], A_s[0]);
- GCOB_FP128 b_value = __gg__float128_from_qualified_field(B[0], B_o[0], B_s[0]);
+ GCOB_FP128 a_value = __gg__float128_from_qualified_field(A[0].field, A[0].offset, A[0].size);
+ GCOB_FP128 b_value = __gg__float128_from_qualified_field(B[0].field, B[0].offset, B[0].size);
product_float = multiply_helper_float(a_value, b_value, compute_error);
got_float = true;
}
{
int a_rdigits;
int b_rdigits;
- __int128 a_value = __gg__binary_value_from_qualified_field(&a_rdigits, A[0], A_o[0], A_s[0]);
- __int128 b_value = __gg__binary_value_from_qualified_field(&b_rdigits, B[0], B_o[0], B_s[0]);
+ __int128 a_value = __gg__binary_value_from_qualified_field(&a_rdigits, A[0].field, A[0].offset, A[0].size);
+ __int128 b_value = __gg__binary_value_from_qualified_field(&b_rdigits, B[0].field, B[0].offset, B[0].size);
product_fix_digits = a_rdigits + b_rdigits;
multiply_int128_by_int128(product_fix, a_value, b_value);
int overflow = squeeze_int256(product_fix, product_fix_digits);
{
if( got_float )
{
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
product_float,
*rounded++);
}
else
{
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
product_fix.i128[0],
product_fix_digits,
void
__gg__dividef1_phase2(cbl_arith_format_t ,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ cblc_referlet_t *,
size_t ,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
int error_this_time=0;
if( multiply_intermediate_is_float )
{
a_value = multiply_intermediate_float;
- if( C[0]->type == FldFloat )
+ if( C[0].field->type == FldFloat )
{
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field,
+ C[0].offset,
+ C[0].size);
goto float_float;
}
else
{
// float times fixed
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field,
+ C[0].offset,
+ C[0].size);
goto float_float;
}
}
else
{
- if( C[0]->type == FldFloat )
+ if( C[0].field->type == FldFloat )
{
// gixed * float
a_value = (GCOB_FP128) multiply_intermediate_int128;
{
a_value /= (GCOB_FP128)__gg__power_of_ten(multiply_intermediate_rdigits);
}
- b_value = __gg__float128_from_qualified_field(C[0], C_o[0], C_s[0]);
+ b_value = __gg__float128_from_qualified_field(C[0].field,
+ C[0].offset,
+ C[0].size);
goto float_float;
}
else
// 64-bit "digits". We need to multiply them to create a 256-bit result
int dividend_rdigits;
- __int128 dividend = __gg__binary_value_from_qualified_field(÷nd_rdigits, C[0], C_o[0], C_s[0]);
+ __int128 dividend = __gg__binary_value_from_qualified_field(
+ ÷nd_rdigits,
+ C[0].field,
+ C[0].offset,
+ C[0].size);
int quotient_rdigits;
int256 quotient;
*compute_error |= compute_error_overflow;
}
// At this point, we assign the quotient to *C.
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
quotient.i128[0],
quotient_rdigits,
}
else
{
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
b_value,
*rounded);
void
__gg__dividef23(cbl_arith_format_t ,
size_t ,
+ const cblc_referlet_t *A,
size_t ,
+ const cblc_referlet_t *B,
size_t nC,
+ const cblc_referlet_t *C,
const cbl_round_t *rounded,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **A = __gg__treeplet_1f;
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
- cblc_field_t **B = __gg__treeplet_2f;
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
- cblc_field_t **C = __gg__treeplet_3f;
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
int error_this_time=0;
- if( A[0]->type == FldFloat || B[0]->type == FldFloat )
+ if( A[0].field->type == FldFloat || B[0].field->type == FldFloat )
{
GCOB_FP128 a_value;
GCOB_FP128 b_value;
GCOB_FP128 c_value;
- a_value = __gg__float128_from_qualified_field(A[0], A_o[0], A_s[0]);
- b_value = __gg__float128_from_qualified_field(B[0], B_o[0], B_s[0]);
+ a_value = __gg__float128_from_qualified_field(A[0].field,
+ A[0].offset,
+ A[0].size);
+ b_value = __gg__float128_from_qualified_field(B[0].field,
+ B[0].offset,
+ B[0].size);
c_value = divide_helper_float(a_value, b_value, &error_this_time);
*compute_error |= error_this_time;
{
for(size_t i=0; i<nC; i++)
{
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
c_value,
*rounded++);
{
// fixed divided by fixed
int dividend_rdigits;
- __int128 dividend = __gg__binary_value_from_qualified_field(÷nd_rdigits, A[0], A_o[0], A_s[0]);
+ __int128 dividend = __gg__binary_value_from_qualified_field(
+ ÷nd_rdigits,
+ A[0].field,
+ A[0].offset,
+ A[0].size);
int divisor_rdigits;
- __int128 divisor = __gg__binary_value_from_qualified_field(&divisor_rdigits, B[0], B_o[0], B_s[0]);
-
+ __int128 divisor = __gg__binary_value_from_qualified_field(
+ &divisor_rdigits,
+ B[0].field,
+ B[0].offset,
+ B[0].size);
int quotient_rdigits;
int256 quotient;
// At this point, we assign the quotient to *C.
for(size_t i=0; i<nC; i++)
{
- *compute_error |= conditional_stash(C[i], C_o[i], C_s[i],
+ *compute_error |= conditional_stash(C[i].field, C[i].offset, C[i].size,
on_size_error,
quotient.i128[0],
quotient_rdigits,
void
__gg__dividef45(cbl_arith_format_t ,
size_t ,
+ const cblc_referlet_t *A,
size_t ,
+ const cblc_referlet_t *B,
size_t ,
+ const cblc_referlet_t *C,
cbl_round_t *rounded_p,
int on_error_flag,
int *compute_error
)
{
- cblc_field_t **A = __gg__treeplet_1f; // Numerator
- const size_t *A_o = __gg__treeplet_1o;
- const size_t *A_s = __gg__treeplet_1s;
- cblc_field_t **B = __gg__treeplet_2f; // Denominator
- const size_t *B_o = __gg__treeplet_2o;
- const size_t *B_s = __gg__treeplet_2s;
- cblc_field_t **C = __gg__treeplet_3f; // Has remainder, then quotient
- const size_t *C_o = __gg__treeplet_3o;
- const size_t *C_s = __gg__treeplet_3s;
-
bool on_size_error = !!(on_error_flag & ON_SIZE_ERROR);
int error_this_time=0;
- if( A[0]->type == FldFloat || B[0]->type == FldFloat )
+ if( A[0].field->type == FldFloat || B[0].field->type == FldFloat )
{
GCOB_FP128 a_value;
GCOB_FP128 b_value;
GCOB_FP128 c_value;
- a_value = __gg__float128_from_qualified_field(A[0], A_o[0], A_s[0]);
- b_value = __gg__float128_from_qualified_field(B[0], B_o[0], B_s[0]);
+ a_value = __gg__float128_from_qualified_field(A[0].field,
+ A[0].offset,
+ A[0].size);
+ b_value = __gg__float128_from_qualified_field(B[0].field,
+ B[0].offset,
+ B[0].size);
c_value = divide_helper_float(a_value, b_value, &error_this_time);
*compute_error |= error_this_time;
if( !error_this_time )
{
- *compute_error |= conditional_stash(C[1], C_o[1], C_s[1],
+ *compute_error |= conditional_stash(C[1].field, C[1].offset, C[1].size,
on_size_error,
c_value,
*rounded_p++);
if( !*compute_error )
{
c_value = 0;
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field, C[0].offset, C[0].size,
on_size_error,
c_value,
*rounded_p++);
{
// fixed divided by fixed
int dividend_rdigits;
- __int128 dividend = __gg__binary_value_from_qualified_field(÷nd_rdigits, A[0], A_o[0], A_s[0]);
+ __int128 dividend = __gg__binary_value_from_qualified_field(
+ ÷nd_rdigits,
+ A[0].field,
+ A[0].offset,
+ A[0].size);
int divisor_rdigits;
- __int128 divisor = __gg__binary_value_from_qualified_field(&divisor_rdigits, B[0], B_o[0], B_s[0]);
+ __int128 divisor = __gg__binary_value_from_qualified_field(
+ &divisor_rdigits,
+ B[0].field,
+ B[0].offset,
+ B[0].size);
int quotient_rdigits;
int256 quotient;
{
case truncation_e:
{
- *compute_error |= conditional_stash(C[1], C_o[1], C_s[1],
+ *compute_error |= conditional_stash(C[1].field,
+ C[1].offset,
+ C[1].size,
on_size_error,
quotient.i128[0],
quotient_rdigits,
*rounded_p++);
unrounded_quotient = __gg__binary_value_from_qualified_field(
- &unrounded_quotient_digits,
- C[1], C_o[1], C_s[1]);
+ &unrounded_quotient_digits,
+ C[1].field,
+ C[1].offset,
+ C[1].size);
break;
}
default:
{
- conditional_stash(C[1], C_o[1], C_s[1],
+ conditional_stash(C[1].field, C[1].offset, C[1].size,
false,
quotient.i128[0],
quotient_rdigits,
truncation_e);
unrounded_quotient = __gg__binary_value_from_qualified_field(
- &unrounded_quotient_digits,
- C[1], C_o[1], C_s[1]);
+ &unrounded_quotient_digits,
+ C[1].field,
+ C[1].offset,
+ C[1].size);
// At this point, we assign the rounded quotient to *C.
- *compute_error |= conditional_stash(C[1], C_o[1], C_s[1],
+ *compute_error |= conditional_stash(C[1].field,
+ C[1].offset,
+ C[1].size,
on_size_error,
quotient.i128[0],
quotient_rdigits,
if( !*compute_error )
{
- *compute_error |= conditional_stash(C[0], C_o[0], C_s[0],
+ *compute_error |= conditional_stash(C[0].field,
+ C[0].offset,
+ C[0].size,
on_size_error,
odividend.i128[0],
temp_rdigits,
static
void
-inspect_backward_format_1(const size_t integers[])
+inspect_backward_format_1(const size_t integers[],
+ const cblc_referlet_t *params)
{
size_t int_index = 0;
size_t cblc_index = 0;
std::vector<id_2_result> id_2_results(n_identifier_2);
// Pick up identifier_1, which is the string being inspected
- const cblc_field_t *id1 = __gg__treeplet_1f[cblc_index];
- size_t id1_o = __gg__treeplet_1o[cblc_index];
- size_t id1_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id1 = params[cblc_index].field ;
+ size_t id1_o = params[cblc_index].offset;
+ size_t id1_s = params[cblc_index].size ;
cblc_index += 1;
// normalize it, according to the language specification.
normalized_operand normalized_id_1 = normalize_id(id1, id1_o, id1_s, id1->encoding);
{
// For each identifier_2, we pick up its value:
- id_2_results[i].id2 = __gg__treeplet_1f [cblc_index];
- id_2_results[i].id2_o = __gg__treeplet_1o[cblc_index];
- id_2_results[i].id2_s = __gg__treeplet_1s[cblc_index];
+ id_2_results[i].id2 = params[cblc_index].field ;
+ id_2_results[i].id2_o = params[cblc_index].offset;
+ id_2_results[i].id2_s = params[cblc_index].size ;
cblc_index += 1;
id_2_results[i].result = 0;
next_comparand.operation = operation;
next_comparand.identifier_3.length = 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f [cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f [cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
normalized_operand normalized_id_4_before
next_comparand.id_2_index = i;
next_comparand.operation = operation;
- const cblc_field_t *id3 = __gg__treeplet_1f[cblc_index];
- size_t id3_o = __gg__treeplet_1o[cblc_index];
- size_t id3_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id3 = params[cblc_index].field ;
+ size_t id3_o = params[cblc_index].offset;
+ size_t id3_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f[cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
next_comparand.identifier_3
extern "C"
void
-__gg__inspect_format_1(int backward, size_t integers[])
+__gg__inspect_format_1( int backward,
+ size_t integers[],
+ const cblc_referlet_t *params)
{
if( backward )
{
- return inspect_backward_format_1(integers);
+ return inspect_backward_format_1(integers, params);
}
size_t int_index = 0;
std::vector<id_2_result> id_2_results(n_identifier_2);
// Pick up identifier_1, which is the string being inspected
- const cblc_field_t *id1 = __gg__treeplet_1f[cblc_index];
- size_t id1_o = __gg__treeplet_1o[cblc_index];
- size_t id1_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id1 = params[cblc_index].field ;
+ size_t id1_o = params[cblc_index].offset;
+ size_t id1_s = params[cblc_index].size ;
cblc_index += 1;
// normalize it, according to the language specification.
normalized_operand normalized_id_1
{
// For each identifier_2, we pick up its value:
- id_2_results[i].id2 = __gg__treeplet_1f [cblc_index];
- id_2_results[i].id2_o = __gg__treeplet_1o[cblc_index];
- id_2_results[i].id2_s = __gg__treeplet_1s[cblc_index];
+ id_2_results[i].id2 = params[cblc_index].field ;
+ id_2_results[i].id2_o = params[cblc_index].offset;
+ id_2_results[i].id2_s = params[cblc_index].size ;
cblc_index += 1;
id_2_results[i].result = 0;
next_comparand.operation = operation;
next_comparand.identifier_3.length = 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f [cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f [cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
normalized_operand normalized_id_4_before
next_comparand.id_2_index = i;
next_comparand.operation = operation;
- const cblc_field_t *id3 = __gg__treeplet_1f[cblc_index];
- size_t id3_o = __gg__treeplet_1o[cblc_index];
- size_t id3_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id3 = params[cblc_index].field ;
+ size_t id3_o = params[cblc_index].offset;
+ size_t id3_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f[cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
next_comparand.identifier_3
static
void
-inspect_backward_format_2(const size_t integers[])
+inspect_backward_format_2(const size_t integers[],
+ const cblc_referlet_t *params)
{
size_t int_index = 0;
size_t cblc_index = 0;
// Reference the language specification for the meanings of identifier_X
// Pick up identifier_1, which is the string being inspected
- cblc_field_t *id1 = __gg__treeplet_1f[cblc_index];
- size_t id1_o = __gg__treeplet_1o[cblc_index];
- size_t id1_s = __gg__treeplet_1s[cblc_index];
+ cblc_field_t *id1 = params[cblc_index].field ;
+ size_t id1_o = params[cblc_index].offset;
+ size_t id1_s = params[cblc_index].size ;
cblc_index += 1;
// normalize it, according to the language specification.
comparand next_comparand = {};
next_comparand.operation = operation;
- const cblc_field_t *id5 = __gg__treeplet_1f[cblc_index];
- size_t id5_o = __gg__treeplet_1o[cblc_index];
- size_t id5_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id5 = params[cblc_index].field ;
+ size_t id5_o = params[cblc_index].offset;
+ size_t id5_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f [cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
next_comparand.identifier_5
comparand next_comparand = {};
next_comparand.operation = operation;
- const cblc_field_t *id3 = __gg__treeplet_1f[cblc_index];
- size_t id3_o = __gg__treeplet_1o[cblc_index];
- size_t id3_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id3 = params[cblc_index].field ;
+ size_t id3_o = params[cblc_index].offset;
+ size_t id3_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id5 = __gg__treeplet_1f[cblc_index];
- size_t id5_o = __gg__treeplet_1o[cblc_index];
- size_t id5_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id5 = params[cblc_index].field ;
+ size_t id5_o = params[cblc_index].offset;
+ size_t id5_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f[cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
next_comparand.identifier_3 = normalize_id(id3, id3_o, id3_s, id1->encoding);
extern "C"
void
-__gg__inspect_format_2(int backward, size_t integers[])
+__gg__inspect_format_2( int backward,
+ size_t integers[],
+ const cblc_referlet_t *params)
{
if( backward )
{
- return inspect_backward_format_2(integers);
+ return inspect_backward_format_2(integers, params);
}
size_t int_index = 0;
size_t cblc_index = 0;
// Reference the language specification for the meanings of identifier_X
- // Pick up identifier_1, which is the string being inspected
- cblc_field_t *id1 = __gg__treeplet_1f[cblc_index];
- size_t id1_o = __gg__treeplet_1o[cblc_index];
- size_t id1_s = __gg__treeplet_1s[cblc_index];
+ // id1 is the string being inspected
+ cblc_field_t *id1 = params[cblc_index].field;
+ size_t id1_o = params[cblc_index].offset;
+ size_t id1_s = params[cblc_index].size;
cblc_index += 1;
// normalize it, according to the language specification.
comparand next_comparand = {} ;
next_comparand.operation = operation;
- const cblc_field_t *id5 = __gg__treeplet_1f[cblc_index];
- size_t id5_o = __gg__treeplet_1o[cblc_index];
- size_t id5_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id5 = params[cblc_index].field;
+ size_t id5_o = params[cblc_index].offset;
+ size_t id5_s = params[cblc_index].size;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f [cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size;
cblc_index += 1;
next_comparand.identifier_5
comparand next_comparand = {};
next_comparand.operation = operation;
- const cblc_field_t *id3 = __gg__treeplet_1f[cblc_index];
- size_t id3_o = __gg__treeplet_1o[cblc_index];
- size_t id3_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id3 = params[cblc_index].field;
+ size_t id3_o = params[cblc_index].offset;
+ size_t id3_s = params[cblc_index].size;
cblc_index += 1;
- const cblc_field_t *id5 = __gg__treeplet_1f[cblc_index];
- size_t id5_o = __gg__treeplet_1o[cblc_index];
- size_t id5_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id5 = params[cblc_index].field;
+ size_t id5_o = params[cblc_index].offset;
+ size_t id5_s = params[cblc_index].size;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f[cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size;
cblc_index += 1;
next_comparand.identifier_3 = normalize_id(id3,
extern "C"
void
-__gg__inspect_format_1_sbc(int backward, size_t integers[])
+__gg__inspect_format_1_sbc( int backward,
+ size_t integers[],
+ const cblc_referlet_t *params)
{
// When this routine is called, we know we are working in a single-byte-coded
// codeset like ASCII or EBCDIC.
if( backward )
{
- return inspect_backward_format_1(integers);
+ return inspect_backward_format_1(integers, params);
}
size_t int_index = 0;
std::vector<id_2_result> id_2_results(n_identifier_2);
// Pick up identifier_1, which is the string being inspected
- const cblc_field_t *id1 = __gg__treeplet_1f[cblc_index];
- size_t id1_o = __gg__treeplet_1o[cblc_index];
- size_t id1_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id1 = params[cblc_index].field ;
+ size_t id1_o = params[cblc_index].offset;
+ size_t id1_s = params[cblc_index].size ;
cblc_index += 1;
// normalize it, according to the language specification.
std::string normalized_id_1
{
// For each identifier_2, we pick up its value:
- id_2_results[i].id2 = __gg__treeplet_1f [cblc_index];
- id_2_results[i].id2_o = __gg__treeplet_1o[cblc_index];
- id_2_results[i].id2_s = __gg__treeplet_1s[cblc_index];
+ id_2_results[i].id2 = params[cblc_index].field ;
+ id_2_results[i].id2_o = params[cblc_index].offset;
+ id_2_results[i].id2_s = params[cblc_index].size ;
cblc_index += 1;
id_2_results[i].result = 0;
next_comparand.operation = operation;
next_comparand.identifier_3 = " ";
- const cblc_field_t *id4_before = __gg__treeplet_1f [cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f [cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
std::string normalized_id_4_before
next_comparand.id_2_index = i;
next_comparand.operation = operation;
- const cblc_field_t *id3 = __gg__treeplet_1f[cblc_index];
- size_t id3_o = __gg__treeplet_1o[cblc_index];
- size_t id3_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id3 = params[cblc_index].field ;
+ size_t id3_o = params[cblc_index].offset;
+ size_t id3_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_before = __gg__treeplet_1f[cblc_index];
- size_t id4_before_o = __gg__treeplet_1o[cblc_index];
- size_t id4_before_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_before = params[cblc_index].field ;
+ size_t id4_before_o = params[cblc_index].offset;
+ size_t id4_before_s = params[cblc_index].size ;
cblc_index += 1;
- const cblc_field_t *id4_after = __gg__treeplet_1f[cblc_index];
- size_t id4_after_o = __gg__treeplet_1o[cblc_index];
- size_t id4_after_s = __gg__treeplet_1s[cblc_index];
+ const cblc_field_t *id4_after = params[cblc_index].field ;
+ size_t id4_after_o = params[cblc_index].offset;
+ size_t id4_after_s = params[cblc_index].size ;
cblc_index += 1;
next_comparand.identifier_3 = normalize_id_sbc(id3,
static
GCOB_FP128 kahan_summation(size_t ncount,
- cblc_field_t **source,
- const size_t *source_o,
- const size_t *source_s,
- const int *flags,
+ const cblc_refer_t *refers,
size_t *k_count)
{
// We use compensated addition. Look up Kahan summation.
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, source[i], flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
- input = __gg__float128_from_qualified_field(source[i],
- source_o[i],
- source_s[i]);
+ input = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
y = input - kahan_c;
t = sum + y;
kahan_c = (t - sum) - y ;
sum = t;
*k_count += 1;
- if( !update_refer_state_for_all(state, source[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
static
GCOB_FP128
variance( size_t ncount,
- cblc_field_t **source,
- const size_t *source_o,
- const size_t *source_s,
- const int *flags)
+ const cblc_refer_t *refers)
{
// In order to avoid catastrophic cancellation, we are going to use an
// algorithm that is a bit wasteful of time, but is described as particularly
// as an offset in the second stage:
size_t k_count;
GCOB_FP128 offset = kahan_summation( ncount,
- source,
- source_o,
- source_s,
- flags,
+ refers,
&k_count);
offset /= k_count;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, source[i], flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
- newValue = __gg__float128_from_qualified_field(source[i],
- source_o[i],
- source_s[i]);
+ newValue = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
newValue -= offset;
count += 1;
mean += delta / count;
delta2 = newValue - mean;
M2 += delta * delta2;
- if( !update_refer_state_for_all(state, source[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
extern "C"
void
__gg__concat( cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
size_t bytes = 0;
size_t offset = 0;
for(size_t i=0; i<ncount; i++)
{
- bytes += __gg__treeplet_1s[i];
+ bytes += refers[i].size;
}
__gg__adjust_dest_size(dest, bytes);
for(size_t i=0; i<ncount; i++)
{
memcpy( dest->data + offset,
- __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
- offset += __gg__treeplet_1s[i];
+ refers[i].field->data + refers[i].offset,
+ refers[i].size);
+ offset += refers[i].size;
}
}
extern "C"
void
__gg__max(cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION MAX
- if( ( __gg__treeplet_1f[0]->type == FldAlphanumeric
- || __gg__treeplet_1f[0]->type == FldLiteralA) )
+ if( ( refers[0].field->type == FldAlphanumeric
+ || refers[0].field->type == FldLiteralA) )
{
cblc_field_t *best_field ;
unsigned char *best_location = nullptr ;
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
if( first_time )
{
first_time = false;
- best_field = __gg__treeplet_1f[i];
- best_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- best_length = __gg__treeplet_1s[i];
- best_attr = __gg__treeplet_1f[i]->attr;
- best_flags = __gg__fourplet_flags[i];
+ best_field = refers[i].field;
+ best_location = refers[i].field->data + refers[i].offset;
+ best_length = refers[i].size;
+ best_attr = refers[i].field->attr;
+ best_flags = refers[i].flags;
}
else
{
- cblc_field_t *candidate_field = __gg__treeplet_1f[i];
- unsigned char *candidate_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- size_t candidate_length = __gg__treeplet_1s[i];
- int candidate_attr = __gg__treeplet_1f[i]->attr;
- int candidate_flags = __gg__fourplet_flags[i];
+ cblc_field_t *candidate_field = refers[i].field;
+ unsigned char *candidate_location = refers[i].field->data + refers[i].offset;
+ size_t candidate_length = refers[i].size;
+ int candidate_attr = refers[i].field->attr;
+ int candidate_flags = refers[i].flags;
int compare_result = __gg__compare_2(
candidate_field,
best_flags = candidate_flags ;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
if( first_time )
{
first_time = false;
- retval = __gg__float128_from_qualified_field(__gg__treeplet_1f[i], __gg__treeplet_1o[i], __gg__treeplet_1s[i]);
+ retval = __gg__float128_from_qualified_field(refers[i].field, refers[i].offset, refers[i].size);
}
else
{
- GCOB_FP128 candidate = __gg__float128_from_qualified_field(__gg__treeplet_1f[i], __gg__treeplet_1o[i], __gg__treeplet_1s[i]);
+ GCOB_FP128 candidate = __gg__float128_from_qualified_field(refers[i].field, refers[i].offset, refers[i].size);
if( candidate >= retval )
{
retval = candidate;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__mean( cblc_field_t *dest,
- size_t ninputs)
+ size_t ninputs,
+ const cblc_refer_t *refers)
{
// FUNCTION MEAN
size_t k_count;
GCOB_FP128 sum = kahan_summation(ninputs,
- __gg__treeplet_1f,
- __gg__treeplet_1o,
- __gg__treeplet_1s,
- __gg__fourplet_flags,
+ refers,
&k_count);
sum /= k_count;
__gg__float128_to_field(dest,
extern "C"
void
__gg__median( cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION MEDIAN
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
}
assert(the_list);
- the_list[k_count] = __gg__float128_from_qualified_field(__gg__treeplet_1f[i],
- __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
+ the_list[k_count] = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
k_count += 1;
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
extern "C"
void
__gg__midrange( cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION MIDRANGE
GCOB_FP128 val;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
- val = __gg__float128_from_qualified_field(__gg__treeplet_1f[i],
- __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
+ val = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
if( first_time )
{
first_time = false;
}
min = std::min(min, val);
max = std::max(max, val);
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__min(cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION MIN
- if( ( __gg__treeplet_1f[0]->type == FldAlphanumeric
- || __gg__treeplet_1f[0]->type == FldLiteralA) )
+ if( ( refers[0].field->type == FldAlphanumeric
+ || refers[0].field->type == FldLiteralA) )
{
cblc_field_t *best_field ;
unsigned char *best_location = nullptr ;
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
if( first_time )
{
first_time = false;
- best_field = __gg__treeplet_1f[i];
- best_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- best_length = __gg__treeplet_1s[i];
- best_attr = __gg__treeplet_1f[i]->attr;
- best_flags = __gg__fourplet_flags[i];
+ best_field = refers[i].field;
+ best_location = refers[i].field->data + refers[i].offset;
+ best_length = refers[i].size;
+ best_attr = refers[i].field->attr;
+ best_flags = refers[i].flags;
}
else
{
- cblc_field_t *candidate_field = __gg__treeplet_1f[i];
- unsigned char *candidate_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- size_t candidate_length = __gg__treeplet_1s[i];
- int candidate_attr = __gg__treeplet_1f[i]->attr;
- int candidate_flags = __gg__fourplet_flags[i];
+ cblc_field_t *candidate_field = refers[i].field;
+ unsigned char *candidate_location = refers[i].field->data + refers[i].offset;
+ size_t candidate_length = refers[i].size;
+ int candidate_attr = refers[i].field->attr;
+ int candidate_flags = refers[i].flags;
int compare_result = __gg__compare_2(
candidate_field,
best_flags = candidate_flags ;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
if( first_time )
{
first_time = false;
- retval = __gg__float128_from_qualified_field(__gg__treeplet_1f[i], __gg__treeplet_1o[i], __gg__treeplet_1s[i]);
+ retval = __gg__float128_from_qualified_field(refers[i].field, refers[i].offset, refers[i].size);
}
else
{
- GCOB_FP128 candidate = __gg__float128_from_qualified_field(__gg__treeplet_1f[i], __gg__treeplet_1o[i], __gg__treeplet_1s[i]);
+ GCOB_FP128 candidate = __gg__float128_from_qualified_field(refers[i].field, refers[i].offset, refers[i].size);
if( candidate < retval )
{
retval = candidate;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__ord_min(cblc_field_t *dest,
- size_t ninputs)
+ size_t ninputs,
+ const cblc_refer_t *refers)
{
// Sets dest to the one-based ordinal position of the first occurrence
// of the biggest element in the list of refs[]
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
running_position += 1;
{
// We have to initialize the comparisons:
retval = running_position;
- best = __gg__treeplet_1f[i];
- best_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- best_length = __gg__treeplet_1s[i];
- best_attr = __gg__treeplet_1f[i]->attr;
- best_flags = __gg__fourplet_flags[i];
+ best = refers[i].field;
+ best_location = refers[i].field->data + refers[i].offset;
+ best_length = refers[i].size;
+ best_attr = refers[i].field->attr;
+ best_flags = refers[i].flags;
}
else
{
// We need to save the current adjustments, because __gg__compare
// is free to modify .location
- candidate_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- candidate_length = __gg__treeplet_1s[i];
- candidate_attr = __gg__treeplet_1f[i]->attr;
- candidate_flags = __gg__fourplet_flags[i];
+ candidate_location = refers[i].field->data + refers[i].offset;
+ candidate_length = refers[i].size;
+ candidate_attr = refers[i].field->attr;
+ candidate_flags = refers[i].flags;
int compare_result =
__gg__compare_2(
- __gg__treeplet_1f[i],
+ refers[i].field,
candidate_location,
candidate_length,
candidate_attr,
if( compare_result < 0 )
{
retval = running_position;
- best = __gg__treeplet_1f[i];
+ best = refers[i].field;
best_location = candidate_location;
best_length = candidate_length;
best_attr = candidate_attr;
best_flags = candidate_flags;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__ord_max(cblc_field_t *dest,
- size_t ninputs)
+ size_t ninputs,
+ const cblc_refer_t *refers)
{
// Sets dest to the one-based ordinal position of the first occurrence
// of the biggest element in the list of refs[]
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
running_position += 1;
{
// We have to initialize the comparisons:
retval = running_position;
- best = __gg__treeplet_1f[i];
- best_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- best_length = __gg__treeplet_1s[i];
- best_attr = __gg__treeplet_1f[i]->attr;
- best_flags = __gg__fourplet_flags[i];
+ best = refers[i].field;
+ best_location = refers[i].field->data + refers[i].offset;
+ best_length = refers[i].size;
+ best_attr = refers[i].field->attr;
+ best_flags = refers[i].flags;
}
else
{
// We need to save the current adjustments, because __gg__compare
// is free to modify .location
- candidate_location = __gg__treeplet_1f[i]->data + __gg__treeplet_1o[i];
- candidate_length = __gg__treeplet_1s[i];
- candidate_attr = __gg__treeplet_1f[i]->attr;
- candidate_flags = __gg__fourplet_flags[i];
+ candidate_location = refers[i].field->data + refers[i].offset;
+ candidate_length = refers[i].size;
+ candidate_attr = refers[i].field->attr;
+ candidate_flags = refers[i].flags;
int compare_result =
__gg__compare_2(
- __gg__treeplet_1f[i],
+ refers[i].field,
candidate_location,
candidate_length,
candidate_attr,
if( compare_result > 0 )
{
retval = running_position;
- best = __gg__treeplet_1f[i];
+ best = refers[i].field;
best_location = candidate_location;
best_length = candidate_length;
best_attr = candidate_attr;
best_flags = candidate_flags;
}
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__present_value(cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
GCOB_FP128 discount = 0;;
GCOB_FP128 denom = 1;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
if(first_time)
{
first_time = false;
- GCOB_FP128 arg1 = __gg__float128_from_qualified_field(__gg__treeplet_1f[i],
- __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
+ GCOB_FP128 arg1 = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
if( arg1 <= GCOB_FP128_LITERAL(-1.0) )
{
exception_raise(ec_argument_function_e);
}
else
{
- GCOB_FP128 arg = __gg__float128_from_qualified_field(__gg__treeplet_1f[i],
- __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
+ GCOB_FP128 arg = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
denom *= discount;
retval += arg * denom;
}
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do for that input.
break;
extern "C"
void
__gg__range(cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION RANGE
bool first_time = true;
for(size_t i=0; i<ncount; i++)
{
refer_state_for_all state;
- build_refer_state_for_all(state, __gg__treeplet_1f[i], __gg__fourplet_flags[i]);
+ build_refer_state_for_all(state, refers[i].field, refers[i].flags);
for(;;)
{
- val = __gg__float128_from_qualified_field(__gg__treeplet_1f[i],
- __gg__treeplet_1o[i],
- __gg__treeplet_1s[i]);
+ val = __gg__float128_from_qualified_field(refers[i].field,
+ refers[i].offset,
+ refers[i].size);
if( first_time )
{
first_time = false;
}
min = std::min(min, val);
max = std::max(max, val);
- if( !update_refer_state_for_all(state, __gg__treeplet_1f[i]) )
+ if( !update_refer_state_for_all(state, refers[i].field) )
{
// There is nothing left to do.
break;
extern "C"
void
__gg__standard_deviation( cblc_field_t *dest,
- size_t ninputs)
+ size_t ninputs,
+ const cblc_refer_t *refers)
{
// FUNCTION STANDARD-DEVIATION
GCOB_FP128 retval = variance(ninputs,
- __gg__treeplet_1f,
- __gg__treeplet_1o,
- __gg__treeplet_1s,
- __gg__fourplet_flags);
+ refers);
retval = FP128_FUNC(sqrt)(retval);
__gg__float128_to_field(dest,
extern "C"
void
__gg__sum(cblc_field_t *dest,
- size_t ninputs)
+ size_t ninputs,
+ const cblc_refer_t *refers)
{
// FUNCTION SUM
size_t k_count;
GCOB_FP128 sum = kahan_summation(ninputs,
- __gg__treeplet_1f,
- __gg__treeplet_1o,
- __gg__treeplet_1s,
- __gg__fourplet_flags,
+ refers,
&k_count);
__gg__float128_to_field(dest,
sum,
extern "C"
void
__gg__variance( cblc_field_t *dest,
- size_t ncount)
+ size_t ncount,
+ const cblc_refer_t *refers)
{
// FUNCTION VARIANCE
GCOB_FP128 retval = variance(ncount,
- __gg__treeplet_1f,
- __gg__treeplet_1o,
- __gg__treeplet_1s,
- __gg__fourplet_flags);
+ refers);
__gg__float128_to_field(dest,
retval,
truncation_e,
extern "C"
void
-__gg__substitute( cblc_field_t *dest,
- const cblc_field_t *arg1_f,
- size_t arg1_o,
- size_t arg1_s,
- size_t N,
- const uint8_t *control)
+__gg__substitute( const cblc_referlet_t *arg2,
+ const cblc_referlet_t *arg3,
+ cblc_field_t *dest,
+ const cblc_field_t *arg1_f,
+ size_t arg1_o,
+ size_t arg1_s,
+ size_t N,
+ const uint8_t *control)
{
// arg2 is the Group 1 triplet.
// arg3 is the Group 2 triplet
- cblc_field_t **arg2_f = __gg__treeplet_1f;
- size_t *arg2_o = __gg__treeplet_1o;
- size_t *arg2_s = __gg__treeplet_1s;
- cblc_field_t **arg3_f = __gg__treeplet_2f;
- const size_t *arg3_o = __gg__treeplet_2o;
- const size_t *arg3_s = __gg__treeplet_2s;
-
ssize_t retval_size;
retval_size = 256;
char *retval = static_cast<char *>(malloc(retval_size));
for( size_t i=0; i<N; i++ )
{
- if( arg2_s[i] == 0 )
+ if( arg2[i].size == 0 )
{
exception_raise(ec_argument_function_e);
goto bugout;
{
pflasts[i] = strcasestr(haystack,
haystack_e,
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])),
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])) + arg2_s[i],
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)),
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)) + arg2[i].size,
is_ebcdic);
}
else if( control[i] & substitute_last_e)
{
pflasts[i] = strcaselaststr(haystack,
haystack_e,
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])),
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])) + arg2_s[i],
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)),
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)) + arg2[i].size,
is_ebcdic);
}
else
{
pflasts[i] = strstr(haystack,
haystack_e,
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])),
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])) + arg2_s[i]);
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)),
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)) + arg2[i].size);
}
else if( control[i] & substitute_last_e)
{
pflasts[i] = strlaststr(haystack,
haystack_e,
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])),
- PTRCAST(char, (arg2_f[i]->data+arg2_o[i])) + arg2_s[i]);
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)),
+ PTRCAST(char, (arg2[i].field->data+arg2[i].offset)) + arg2[i].size);
}
else
{
{
// Let's make sure that there is enough room in the case that we add this
// arg
- while( outdex - (ssize_t)arg2_s[i] + (ssize_t)arg3_s[i]
+ while( outdex - (ssize_t)arg2[i].size + (ssize_t)arg3[i].size
> retval_size )
{
retval_size *= 2;
continue;
}
- const char *needle = PTRCAST(char, arg2_f[i]->data+arg2_o[i]);
- const char *needle_e = PTRCAST(char, arg2_f[i]->data+arg2_o[i]) + arg2_s[i];
+ const char *needle = PTRCAST(char, arg2[i].field->data+arg2[i].offset);
+ const char *needle_e = PTRCAST(char, arg2[i].field->data+arg2[i].offset) + arg2[i].size;
matched = (control[i] & substitute_anycase_e) && iscasematch(
haystack,
haystack_e,
}
if( matched )
{
- haystack += arg2_s[i];
- memcpy(retval + outdex, arg3_f[i]->data + arg3_o[i], arg3_s[i]);
- outdex += arg3_s[i];
+ haystack += arg2[i].size;
+ memcpy(retval + outdex, arg3[i].field->data + arg3[i].offset, arg3[i].size);
+ outdex += arg3[i].size;
did_something = true;
break;
}
// Setting this variable to 'true' suppresses the error condition.
static bool sv_suppress_eof_ec = false;
-// What follows are arrays that are used by features like INSPECT, STRING,
-// UNSTRING, and, particularly, arithmetic_operation. These features are
-// characterized by having unknown, and essentially unlimited, numbers of
-// variables. Consider, for example, ADD A B C D ... TO L M N O ...
-
-// Although originally implemented with malloc/free, that's terribly inefficient
-// on its face; arithmetic is done frequently. The next step was to malloc
-// buffers just once, and have them grow as needed, but that resulted in a lot
-// of code being laid down, because it meant checking each buffer size at
-// run-time, and laying down the code to be executed if the size was inadequate.
-//
-// The current solution is to make the pointers to the arrays of values global,
-// and initialize them with space for MIN_FIELD_BLOCK_SIZE values. Thus, at
-// compile time, we can ignore all tests for fewer than MIN_FIELD_BLOCK_SIZE
-// (which is generally the case). Only when N is greater than the MIN do we
-// have to check the current run-time size and, if necessary, expand the buffer
-// with realloc.
-size_t __gg__arithmetic_rounds_size = 0 ;
-int * __gg__arithmetic_rounds = NULL ;
-
-size_t __gg__fourplet_flags_size = 0 ;
-int * __gg__fourplet_flags = NULL ;
-
-static size_t treeplet_1_size = 0 ;
-cblc_field_t ** __gg__treeplet_1f = NULL ;
-size_t * __gg__treeplet_1o = NULL ;
-size_t * __gg__treeplet_1s = NULL ;
-
-static size_t treeplet_2_size = 0 ;
-cblc_field_t ** __gg__treeplet_2f = NULL ;
-size_t * __gg__treeplet_2o = NULL ;
-size_t * __gg__treeplet_2s = NULL ;
-
-static size_t treeplet_3_size = 0 ;
-cblc_field_t ** __gg__treeplet_3f = NULL ;
-size_t * __gg__treeplet_3o = NULL ;
-size_t * __gg__treeplet_3s = NULL ;
-
-static size_t treeplet_4_size = 0 ;
-cblc_field_t ** __gg__treeplet_4f = NULL ;
-size_t * __gg__treeplet_4o = NULL ;
-size_t * __gg__treeplet_4s = NULL ;
-
-
// This value is increased every time PROCEDURE DIVISION is processed. It is
// used to keep track of local variables.
size_t __gg__unique_prog_id = 0 ;
return currency_signs(__gg__default_currency_sign).c_str();
}
-extern "C"
-void
-__gg__resize_int_p( size_t *size,
- int **block,
- size_t new_size)
- {
- if( new_size > *size )
- {
- *size = new_size;
- *block = static_cast<int *>(realloc(*block, new_size * sizeof(int)));
- }
- }
-
-extern "C"
-void
-__gg__resize_treeplet(int ngroup,
- size_t new_size)
- {
- switch( ngroup )
- {
- case 1:
- if( new_size > treeplet_1_size )
- {
- treeplet_1_size = new_size;
- __gg__treeplet_1f = static_cast<cblc_field_t **>(realloc(__gg__treeplet_1f, new_size * sizeof(cblc_field_t *)));
- __gg__treeplet_1o = static_cast<size_t *>(realloc(__gg__treeplet_1o, new_size * sizeof(size_t)));
- __gg__treeplet_1s = static_cast<size_t *>(realloc(__gg__treeplet_1s, new_size * sizeof(size_t)));
- }
- break;
- case 2:
- if( new_size > treeplet_2_size )
- {
- treeplet_2_size = new_size;
- __gg__treeplet_2f = static_cast<cblc_field_t **>(realloc(__gg__treeplet_2f, new_size * sizeof(cblc_field_t *)));
- __gg__treeplet_2o = static_cast<size_t *>(realloc(__gg__treeplet_2o, new_size * sizeof(size_t)));
- __gg__treeplet_2s = static_cast<size_t *>(realloc(__gg__treeplet_2s, new_size * sizeof(size_t)));
- }
- break;
- case 3:
- if( new_size > treeplet_3_size )
- {
- treeplet_3_size = new_size;
- __gg__treeplet_3f = static_cast<cblc_field_t **>(realloc(__gg__treeplet_3f, new_size * sizeof(cblc_field_t *)));
- __gg__treeplet_3o = static_cast<size_t *>(realloc(__gg__treeplet_3o, new_size * sizeof(size_t)));
- __gg__treeplet_3s = static_cast<size_t *>(realloc(__gg__treeplet_3s, new_size * sizeof(size_t)));
- }
- break;
- case 4:
- if( new_size > treeplet_4_size )
- {
- treeplet_4_size = new_size;
- __gg__treeplet_4f = static_cast<cblc_field_t **>(realloc(__gg__treeplet_4f, new_size * sizeof(cblc_field_t *)));
- __gg__treeplet_4o = static_cast<size_t *>(realloc(__gg__treeplet_4o, new_size * sizeof(size_t)));
- __gg__treeplet_4s = static_cast<size_t *>(realloc(__gg__treeplet_4s, new_size * sizeof(size_t)));
- }
- break;
- }
- }
-
static void
initialize_program_state()
{
program_state initial_value = {};
program_states.push_back(initial_value);
__gg__currency_signs = program_states.back().rt_currency_signs;
-
- // This is where we initialize the various tables that have
- // MIN_FIELD_BLOCK_SIZE elements:
-
- __gg__resize_int_p(&__gg__arithmetic_rounds_size,
- &__gg__arithmetic_rounds,
- MIN_FIELD_BLOCK_SIZE );
- __gg__resize_int_p(&__gg__fourplet_flags_size,
- &__gg__fourplet_flags,
- MIN_FIELD_BLOCK_SIZE );
- __gg__resize_treeplet(1, MIN_FIELD_BLOCK_SIZE);
- __gg__resize_treeplet(2, MIN_FIELD_BLOCK_SIZE);
- __gg__resize_treeplet(3, MIN_FIELD_BLOCK_SIZE);
- __gg__resize_treeplet(4, MIN_FIELD_BLOCK_SIZE);
}
extern "C"
case FldNumericBinary:
case FldPacked:
case FldNumericBin5:
+ case FldLiteralN:
{
int dummy;
int digits;
digits = 19;
break;
case 16:
- digits = MAX_FIXED_POINT_DIGITS;
+ // digits = MAX_FIXED_POINT_DIGITS;
+ /* This requires some context. Although we generally operate on
+ the basis of MAX_FIXED_POINT_DIGITS (currently 37, which gives
+ one digit of headroom for rounding and such), a PIC X(16) COMP-X
+ provides for an honest 128-bits of binary value. If that is set
+ to HIGH-VALUE, the decimal representation requires 39 decimal
+ digits. So, we do that here so that the DISPLAY of that value
+ is accurate. */
+ digits = 39;
break;
default:
warnx("%s(): %s has capacity %ld\n",
return retval;
}
-static
+extern "C"
int
-compare_field_class(const cblc_field_t *conditional,
+__gg__compare_field_class(const cblc_field_t *conditional,
unsigned char *conditional_location,
int conditional_length,
cblc_field_t *list)
*allocated_left = nullptr;
*allocated_right = nullptr;
- bool convert_left_to_right = false;
- bool convert_right_to_left = false;
-
- size_t converted_length;
- const char *converted;
if( *encoding_left == *encoding_right )
{
// This is both the most-seen situation, and, happily, the easiest to
// handle. We just do nothing.
}
- else if( *encoding_left == __gg__national_encoding
- || *encoding_right == __gg__national_encoding )
- {
- // The encodings are different, but at least one is the national encoding.
- // Convert the other one to be national as well:
- if( *encoding_left != __gg__national_encoding )
- {
- convert_left_to_right = true;
- }
- else
- {
- convert_right_to_left = true;
- }
- }
else
{
- // We have two different encodings, and neither of them are national. This
- // can happen when a file descriptor has a specific codeset that doesn't
- // match the national codeset. We will convert the narrower to the wider;
- // if they are both the same width we will pick one arbitrarily.
+ bool convert_left_to_right = false;
+ bool convert_right_to_left = false;
+ size_t converted_length;
+ const char *converted;
+
+ // We are dealing with two different encodings
const charmap_t *charmap_left = __gg__get_charmap(*encoding_left);
const charmap_t *charmap_right = __gg__get_charmap(*encoding_right);
- if( charmap_right->stride() >= charmap_left->stride() )
+ if( charmap_left->stride() == charmap_right->stride() )
{
- convert_left_to_right = true;
+ // The strides are the same. If one is display, convert the other to
+ // display. If one is national, convert the other to national
+ if( *encoding_left == __gg__display_encoding )
+ {
+ convert_right_to_left = true;
+ }
+ else if(*encoding_right == __gg__display_encoding )
+ {
+ convert_left_to_right = true;
+ }
+ else if(*encoding_left == __gg__national_encoding )
+ {
+ convert_right_to_left = true;
+ }
+ else if(*encoding_right == __gg__national_encoding )
+ {
+ convert_left_to_right = true;
+ }
+ else
+ {
+ // The strides are the same, but we don't know anything about the
+ // encodings. Toss a coin:
+ convert_right_to_left = true;
+ }
}
else
{
- convert_right_to_left = true;
+ // The two strides are different. Convert the narrow to the wider:
+ if( charmap_left->stride() > charmap_right->stride() )
+ {
+ convert_right_to_left = true;
+ }
+ else
+ {
+ convert_left_to_right = true;
+ }
}
- }
- if( convert_left_to_right )
- {
- // Convert the left side to the right encoding
- converted = __gg__iconverter(*encoding_left,
- *encoding_right,
- *left_string,
- *left_length,
- &converted_length);
- *encoding_left = *encoding_right ;
- *allocated_left = static_cast<char *>(malloc(converted_length));
- massert(*allocated_left);
- *left_string = *allocated_left;
- *left_length = converted_length;
- memcpy(*left_string, converted, *left_length);
- }
- if( convert_right_to_left )
- {
- // Convert the right side to the left_encoding
- converted = __gg__iconverter(*encoding_right,
- *encoding_left,
- *right_string,
- *right_length,
- &converted_length);
- *encoding_right = *encoding_left ;
- *allocated_right = static_cast<char *>(malloc(converted_length));
- massert(*allocated_right);
- *right_string = *allocated_right;
- *right_length = converted_length;
- memcpy(right_string, converted, *right_length);
+ if( convert_left_to_right )
+ {
+ // Convert the left side to the right encoding
+ converted = __gg__iconverter(*encoding_left,
+ *encoding_right,
+ *left_string,
+ *left_length,
+ &converted_length);
+ *encoding_left = *encoding_right ;
+ *allocated_left = static_cast<char *>(malloc(converted_length));
+ massert(*allocated_left);
+ *left_string = *allocated_left;
+ *left_length = converted_length;
+ memcpy(*left_string, converted, *left_length);
+ }
+ if( convert_right_to_left )
+ {
+ // Convert the right side to the left_encoding
+ converted = __gg__iconverter(*encoding_right,
+ *encoding_left,
+ *right_string,
+ *right_length,
+ &converted_length);
+ *encoding_right = *encoding_left ;
+ *allocated_right = static_cast<char *>(malloc(converted_length));
+ massert(*allocated_right);
+ *right_string = *allocated_right;
+ *right_length = converted_length;
+ memcpy(*right_string, converted, *right_length);
+ }
}
}
if( right_side->type == FldClass )
{
- return compare_field_class( left_side,
+ return __gg__compare_field_class( left_side,
left_location,
left_length,
right_side);
source_size = fsource->digits;
+ // We have to deal with the special case of moving a pure binary
+ // value to an alphanumeric, which starts with a source_size
+ // of zero.
+ if( source_size == 0 )
+ {
+ switch(fsource->capacity)
+ {
+ case 1:
+ source_size = 3;
+ break;
+ case 2:
+ source_size = 5;
+ break;
+ case 3:
+ case 4:
+ source_size = 10;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ source_size = 20;
+ break;
+ default:
+ source_size = 39;
+ break;
+ }
+ }
+
// Turn the integer value into a string:
__gg__binary_to_string_encoded(ach,
source_size,
// Specifically, we'll move pach to point to the first
// character that isn't zero.
- if( fsource->attr & intermediate_e )
+ if( fsource->attr & intermediate_e || fsource->digits == 0)
{
while(source_size > 1) // This ensures we leave one '0'
{
extern "C"
int
-__gg__string(const size_t integers[])
+__gg__string(const size_t integers[], const cblc_referlet_t *ref)
{
// The first integer is the count of identifier-2 values. Call it N
// The following N integers are the counts of each of the identifier-1 values,
// That's followed by identifier2 for N2
// And so on
- cblc_field_t **ref = __gg__treeplet_1f;
- const size_t *ref_o = __gg__treeplet_1o;
- const size_t *ref_s = __gg__treeplet_1s;
-
static const int INDEX_OF_POINTER = 1;
size_t index_cblc = 0 ;
// Pick up the target
- const cblc_field_t *tgt = ref[index_cblc];
+ const cblc_field_t *tgt = ref[index_cblc].field;
// Pick up the target encoding, which according to the ISO specification
// controls all the parameters.
int stride = charmap->stride();
// Pick up the rest of the parameters
- size_t tgt_o = ref_o[index_cblc];
- size_t tgt_s = ref_s[index_cblc];
+ size_t tgt_o = ref[index_cblc].offset;
+ size_t tgt_s = ref[index_cblc].size;
index_cblc += 1;
char *dest = reinterpret_cast<char *>(tgt->data + tgt_o);
// Pick up the pointer, if any
size_t pointer = 0;
int overflow = 0;
- if( ref[INDEX_OF_POINTER] )
+ if( ref[INDEX_OF_POINTER].field )
{
int rdigits;
int p = (size_t)__gg__binary_value_from_qualified_field(
&rdigits,
- ref [INDEX_OF_POINTER],
- ref_o[INDEX_OF_POINTER],
- ref_s[INDEX_OF_POINTER]
+ ref[INDEX_OF_POINTER].field,
+ ref[INDEX_OF_POINTER].offset,
+ ref[INDEX_OF_POINTER].size
);
if( p<0 )
{
// Pick up the identifier_2 DELIMITED BY value
std::u32string str_id2 = normalize_for_inspect_format_4(
- ref[index_cblc],
- ref_o[index_cblc],
- ref_s[index_cblc],
+ ref[index_cblc].field,
+ ref[index_cblc].offset,
+ ref[index_cblc].size,
tgt_encoding);
index_cblc += 1;
{
// Pick up the next id-1 source string for the current id-2 delimiter
std::u32string str_id1 = normalize_for_inspect_format_4(
- ref[index_cblc],
- ref_o[index_cblc],
- ref_s[index_cblc],
+ ref[index_cblc].field,
+ ref[index_cblc].offset,
+ ref[index_cblc].size,
tgt_encoding);
index_cblc += 1;
}
// Update the pointer, if there is one
- if( ref[INDEX_OF_POINTER] )
+ if( ref[INDEX_OF_POINTER].field )
{
- __gg__int128_to_qualified_field(ref [INDEX_OF_POINTER],
- ref_o[INDEX_OF_POINTER],
- ref_s[INDEX_OF_POINTER],
+ __gg__int128_to_qualified_field(ref[INDEX_OF_POINTER].field,
+ ref[INDEX_OF_POINTER].offset,
+ ref[INDEX_OF_POINTER].size,
(__int128)(pointer+1),
0,
truncation_e,
static size_t display_string_size = MINIMUM_ALLOCATION_SIZE;
static char *display_string = static_cast<char *>(malloc(display_string_size));
+ if( field->type == FldLiteralA && field->encoding == custom_encoding_e )
+ {
+ field->encoding = DEFAULT_SOURCE_ENCODING;
+ }
+
+
cbl_encoding_t encoding = format_for_display_internal(
&display_string,
&display_string_size,
extern "C"
int
-__gg__literaln_alpha_compare( char *left_side,
- const cblc_field_t *right,
- size_t offset,
- size_t length,
- int flags)
- {
- int retval;
- if( length == 0 )
- {
- length = right->capacity;
- }
-
- cbl_encoding_t right_encoding = right->encoding;
- if( right->attr & hex_encoded_e )
- {
- right_encoding = iconv_CP1252_e;
- }
- retval = compare_strings( left_side,
- strlen(left_side),
- false,
- reinterpret_cast<char *>((right->data + offset)),
- length,
- !!(flags & REFER_T_MOVE_ALL),
- right_encoding,
- right_encoding);
- return retval;
- }
-
-extern "C"
-int
-__gg__unstring( const cblc_field_t *id1, // The string being unstring
+__gg__unstring( const cblc_referlet_t *id2,
+ const cblc_referlet_t *id4,
+ const cblc_referlet_t *id5,
+ const cblc_referlet_t *id6,
+ const cblc_field_t *id1, // The string being unstring
size_t id1_o,
size_t id1_s,
size_t ndelimiteds, // The number of DELIMITED entries
// resolved. Each might have an identifier-5 delimiter, and each might have
// an identifier-6 count.
- // The delimiting strings; one per ndelimiteds
- cblc_field_t **id2 = __gg__treeplet_1f;
- const size_t *id2_o = __gg__treeplet_1o;
- const size_t *id2_s = __gg__treeplet_1s;
- // The delimited string; one per nreceiver
- cblc_field_t **id4 = __gg__treeplet_2f;
- const size_t *id4_o = __gg__treeplet_2o;
- const size_t *id4_s = __gg__treeplet_2s;
- // The delimiting string; one per receiver
- cblc_field_t **id5 = __gg__treeplet_3f;
- const size_t *id5_o = __gg__treeplet_3o;
- const size_t *id5_s = __gg__treeplet_3s;
- // The count of characters examined; one per receiver
- cblc_field_t **id6 = __gg__treeplet_4f;
- const size_t *id6_o = __gg__treeplet_4o;
- const size_t *id6_s = __gg__treeplet_4s;
-
// Initialize the state variables
int overflow = 0;
int tally = 0;
break;
}
// We will peel off enough characters to fit the receiving id4:
- size_t id_4_size = id4_s[receiver]/stride_id1;
- if( id4[receiver]->attr & separate_e )
+ size_t id_4_size = id4[receiver].size/stride_id1;
+ if( id4[receiver].field->attr & separate_e )
{
// The receiver is NumericDisplay with a separate sign, so, as per
// the spec, we reduce the size by one character.
- id_4_size = id4_s[receiver] - 1;
+ id_4_size = id4[receiver].size - 1;
}
// Make sure id_4_size doesn't take us past the end of the universe
&bytes_converted );
char *duped = static_cast<char *>(__gg__memdup(converted, bytes_converted));
// Put the converted string into place:
- __gg__field_from_string(id4[receiver],
- id4_o[receiver],
- id4_s[receiver],
+ __gg__field_from_string(id4[receiver].field,
+ id4[receiver].offset,
+ id4[receiver].size,
duped,
bytes_converted);
free(duped);
for( size_t i=0; i<ndelimiteds; i++ )
{
std::u32string delimiter
- = normalize_for_inspect_format_4(id2[i],
- id2_o[i],
- id2_s[i],
+ = normalize_for_inspect_format_4(id2[i].field,
+ id2[i].offset,
+ id2[i].size,
id1->encoding);
delimiters.push_back(delimiter);
}
&bytes_converted );
char *duped = static_cast<char *>(__gg__memdup(converted, bytes_converted));
// Put the converted string into place:
- __gg__field_from_string(id4[nreceiver],
- id4_o[nreceiver],
- id4_s[nreceiver],
+ __gg__field_from_string(id4[nreceiver].field,
+ id4[nreceiver].offset,
+ id4[nreceiver].size,
duped,
bytes_converted);
free(duped);
left = best_location + (best_delimiter > -1
? delimiters[best_delimiter].size()
: 0) ;
- if( id5[nreceiver] )
+ if( id5[nreceiver].field )
{
// The caller wants to know what the delimiter was:
if( best_delimiter > -1 )
delimiters[best_delimiter].size()*width_of_utf32,
&bytes_converted );
duped = static_cast<char *>(__gg__memdup(converted, bytes_converted));
- __gg__field_from_string(id5[nreceiver],
- id5_o[nreceiver],
- id5_s[nreceiver],
+ __gg__field_from_string(id5[nreceiver].field,
+ id5[nreceiver].offset,
+ id5[nreceiver].size,
duped,
bytes_converted);
free(duped);
else
{
// We didn't find a delimiter
- __gg__field_from_string(id5[nreceiver],
- id5_o[nreceiver],
- id5_s[nreceiver],
+ __gg__field_from_string(id5[nreceiver].field,
+ id5[nreceiver].offset,
+ id5[nreceiver].size,
"",
0);
}
}
- if( id6[nreceiver] )
+ if( id6[nreceiver].field )
{
- __gg__int128_to_qualified_field(id6[nreceiver],
- id6_o[nreceiver],
- id6_s[nreceiver],
+ __gg__int128_to_qualified_field(id6[nreceiver].field,
+ id6[nreceiver].offset,
+ id6[nreceiver].size,
(__int128)examined,
0,
truncation_e,
static char ach_unmangled[1024];
static char ach_mangled[1024];
+
assert(field);
assert(field->data);
size_t length;
length = field->capacity;
+ cbl_encoding_t encoding = field->encoding;
+ if( field->type == FldLiteralA )
+ {
+ // This is a little complicated. FldLiteralA means somebody said
+ // CALL "LiteralName"m and we know that such things are in the
+ // display_encoding. The parser assumes this, and so it doesn't set the
+ // encoding in the field, although it probably should.
+ encoding = DEFAULT_SOURCE_ENCODING;
+ }
+
// We need ach_name to be in ASCII:
size_t charsout;
- const char *converted = __gg__iconverter(field->encoding,
+ const char *converted = __gg__iconverter(encoding,
__gg__console_encoding,
PTRCAST(char, field->data),
length,
int /*source_format*/,
int dest_format)
{
- /* convert formulations:
+ /* convert formulations:
* 1. ANY to ALNUM HEX, or NAT HEX
* 2. HEX to BYTE
* 3. ALNUM to NAT, ALNUM HEX, or NAT HEX
*convert_any_e = 0x03, // i.e., both
*convert_byte_e = 0x04,
*convert_hex_e = 0x08, // may be combined with alpha or national
- *convert_just_bit_e = 0x10,
+ *convert_just_bit_e = 0x10,
*convert_just_e = 0x18, // combined with HEX
- *convert_rjust_bit_e = 0x20,
+ *convert_rjust_bit_e = 0x20,
*convert_rjust_e = 0x38, // combined with JUSTIFY
*/
- cbl_encoding_t tgt_enc = (dest_format & convert_nat_e)
+ cbl_encoding_t tgt_enc = (dest_format & convert_nat_e)
? __gg__national_encoding
: __gg__display_encoding;
const charmap_t *charmap_tgt = __gg__get_charmap(tgt_enc);
charmap_t *charmap_dest = __gg__get_charmap(dest->encoding);
-
+
if( dest_format & convert_hex_e )
{
size_t nbytes;
}
}
+/*
+ gg_printf("Message: \n", NULL_TREE);
+ gg_call(VOID,
+ "__gg__show_int128",
+ value,
+ NULL_TREE);
+ gg_printf("\n", NULL_TREE);
+*/
extern "C"
__int128
extern "C"
void
-__gg__set_data_member(cblc_field_t *field, unsigned char *data)
+__gg__show_int128(__int128 val)
{
- // This function is used to hide the initialization of the ->data member
- // from the compiler. This avoids the bug that causes n-squared time in the
- // middle end for a -O0 compiler when doing a -fpie compilation.
- field->data = data;
+ if( val < 0 )
+ {
+ fprintf(stderr, "-");
+ val = -val;
+ }
+ char ach[128];
+ if(val == 0)
+ {
+ fprintf(stderr, "0");
+ return;
+ }
+ int index = 0;
+ while(val)
+ {
+ ach[index++] = '0' + val % 10;
+ val /= 10;
+ }
+ ach[index++] = '\0';
+ while( index > 0 )
+ {
+ fprintf(stderr, "%c", ach[--index]);
+ }
+ }
+
+extern "C"
+void
+__gg__compare_string_all(int *result,
+ const unsigned char *left,
+ size_t length_left,
+ int stride,
+ const unsigned char *right,
+ size_t length_right)
+ {
+ // "all" in the name is in the confusing COBOL sense, as in VALUE ALL "A".
+
+ // We are comparing the left data to the right data, where the right
+ // data is repeated as necessary to match the length of the left.
+
+ /* There are unanswered questions about display, versus national, and
+ how to handle different user-defined alphabets for display and national
+ and so on. So, for now if the stride is one, we use the display
+ alphabet. */
+
+ *result = 0;
+ size_t index = 0;
+ if( stride == 1 )
+ {
+ while( index < length_left )
+ {
+ unsigned char ch_l = collated(left[index]);
+ unsigned char ch_r = collated(right[index % length_right]);
+
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ break;
+ }
+ index += 1;
+ }
+ }
+ else if ( stride == 2 )
+ {
+ length_left /= 2;
+ length_right /= 2;
+ const unsigned short *l = reinterpret_cast<const unsigned short *>(left);
+ const unsigned short *r = reinterpret_cast<const unsigned short *>(right);
+ while( index < length_left )
+ {
+ unsigned short ch_l = l[index];
+ unsigned short ch_r = r[index % length_right];
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ break;
+ }
+ index += 1;
+ }
+ }
+ else
+ {
+ length_left /= 4;
+ length_right /= 4;
+ const unsigned long *l = reinterpret_cast<const unsigned long *>(left);
+ const unsigned long *r = reinterpret_cast<const unsigned long *>(right);
+ while( index < length_left )
+ {
+ unsigned long ch_l = l[index];
+ unsigned long ch_r = r[index % length_right];
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ break;
+ }
+ index += 1;
+ }
+ }
}
+
+extern "C"
+void
+__gg__compare_string_1( int *result,
+ const unsigned char *left,
+ size_t length_left,
+ const unsigned char *right,
+ size_t length_right,
+ cbl_char_t char_space_)
+ {
+ // This is the the routine that will probably do all of the real-world work,
+ // the following routines not withstanding. It does single-byte comparisons
+ // through the collation table.
+ *result = 0;
+ unsigned char char_space = char_space_;
+
+ size_t length = std::min(length_left, length_right);
+ size_t index = 0;
+ while( index < length )
+ {
+ unsigned char ch_l = collated(left[index]);
+ unsigned char ch_r = collated(right[index]);
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ if( *result == 0 )
+ {
+ while( index < length_left )
+ {
+ unsigned char ch_l = collated(left[index]);
+ if( ch_l != char_space )
+ {
+ *result = ch_l < char_space ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ while( index < length_right )
+ {
+ unsigned char ch_r = collated(right[index]);
+ if( char_space != ch_r )
+ {
+ *result = char_space < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ }
+ done:
+ return;
+ }
+
+#define ASCII_16 " "
+#define ASCII_64 ASCII_16 ASCII_16 ASCII_16 ASCII_16
+#define ASCII_256 ASCII_64 ASCII_64 ASCII_64 ASCII_64
+#define ASCII_1024 ASCII_256 ASCII_256 ASCII_256 ASCII_256
+
+#define EBCDIC_16 "@@@@@@@@@@@@@@@@"
+#define EBCDIC_64 EBCDIC_16 EBCDIC_16 EBCDIC_16 EBCDIC_16
+#define EBCDIC_256 EBCDIC_64 EBCDIC_64 EBCDIC_64 EBCDIC_64
+#define EBCDIC_1024 EBCDIC_256 EBCDIC_256 EBCDIC_256 EBCDIC_256
+
+static const unsigned char ascii_1024[1025] = ASCII_1024;
+static const unsigned char ebcdic_1024[1025] = EBCDIC_1024;
+
+extern "C"
+void
+__gg__compare_string_1a( int *result,
+ const unsigned char *left,
+ size_t length_left,
+ const unsigned char *right,
+ size_t length_right,
+ cbl_char_t )
+ {
+ // This is the rarely-seen, but simplest routine of all, comparing
+ // single-byte ASCII characters in the same encoding without fear or favor.
+ *result = 0;
+
+ size_t length = std::min(length_left, length_right);
+ *result = memcmp(left, right, length);
+ if( *result == 0 )
+ {
+ if( length < length_left ) // Right is shorter than Left
+ {
+ // We need to compare the trailing characters of left to inferred spaces
+ // on the right.
+ left += length;
+ length_left -= length;
+ while( length_left )
+ {
+ size_t this_time = std::min(1024UL, length_left);
+ *result = memcmp(left, ascii_1024, this_time);
+ if( *result )
+ {
+ break;
+ }
+ left += this_time;
+ length_left -= this_time;
+ }
+ }
+ else if( length < length_right ) // Left is shorter than Right
+ {
+ // We need to compare the trailing characters of left to inferred spaces
+ // on the right.
+ right += length;
+ length_right -= length;
+ while( length_right )
+ {
+ size_t this_time = std::min(1024UL, length_right);
+ *result = memcmp(ascii_1024, right, this_time);
+ if( *result )
+ {
+ break;
+ }
+ right += this_time;
+ length_right -= this_time;
+ }
+ }
+ }
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_string_1e( int *result,
+ const unsigned char *left,
+ size_t length_left,
+ const unsigned char *right,
+ size_t length_right,
+ cbl_char_t )
+ {
+ // This is the rarely-seen, but simplest routine of all, comparing
+ // single-byte EBCDIC characters in the same encoding without fear or favor.
+ *result = 0;
+
+ size_t length = std::min(length_left, length_right);
+ *result = memcmp(left, right, length);
+ if( *result == 0 )
+ {
+ if( length < length_left ) // Right is shorter than Left
+ {
+ // We need to compare the trailing characters of left to inferred spaces
+ // on the right.
+ left += length;
+ length_left -= length;
+ while( length_left )
+ {
+ size_t this_time = std::min(1024UL, length_left);
+ *result = memcmp(left, ebcdic_1024, this_time);
+ if( *result )
+ {
+ break;
+ }
+ left += this_time;
+ length_left -= this_time;
+ }
+ }
+ else if( length < length_right ) // Left is shorter than Right
+ {
+ // We need to compare the trailing characters of left to inferred spaces
+ // on the right.
+ right += length;
+ length_right -= length;
+ while( length_right )
+ {
+ size_t this_time = std::min(1024UL, length_right);
+ *result = memcmp(ebcdic_1024, right, this_time);
+ if( *result )
+ {
+ break;
+ }
+ right += this_time;
+ length_right -= this_time;
+ }
+ }
+ }
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_string_2( int *result,
+ const unsigned short *left,
+ size_t length_left,
+ const unsigned short *right,
+ size_t length_right,
+ cbl_char_t char_space_)
+ {
+ // This compares USHORT character strings:
+ *result = 0;
+ unsigned short char_space = char_space_;
+
+ length_left /= 2;
+ length_right /= 2;
+ size_t length = std::min(length_left, length_right);
+ size_t index = 0;
+ while( index < length )
+ {
+ unsigned short ch_l = collated(left[index]);
+ unsigned short ch_r = collated(right[index]);
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ if( *result == 0 )
+ {
+ while( index < length_left )
+ {
+ unsigned short ch_l = collated(left[index]);
+ if( ch_l != char_space )
+ {
+ *result = ch_l < char_space ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ while( index < length_right )
+ {
+ unsigned short ch_r = collated(right[index]);
+ if( char_space != ch_r )
+ {
+ *result = char_space < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ }
+ done:
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_string_2a( int *result,
+ const unsigned short *left,
+ size_t length_left,
+ const unsigned short *right,
+ size_t length_right,
+ cbl_char_t char_space_)
+ {
+ // This compares USHORT character strings:
+ *result = 0;
+ unsigned short char_space = char_space_;
+
+ length_left /= 2;
+ length_right /= 2;
+ size_t length = std::min(length_left, length_right);
+ size_t index = 0;
+ while( index < length )
+ {
+ unsigned short ch_l = left[index];
+ unsigned short ch_r = right[index];
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ if( *result == 0 )
+ {
+ while( index < length_left )
+ {
+ unsigned short ch_l = left[index];
+ if( ch_l != char_space )
+ {
+ *result = ch_l < char_space ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ while( index < length_right )
+ {
+ unsigned short ch_r = right[index];
+ if( char_space != ch_r )
+ {
+ *result = char_space < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ }
+ done:
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_string_4( int *result,
+ const unsigned long *left,
+ size_t length_left,
+ const unsigned long *right,
+ size_t length_right,
+ cbl_char_t char_space_)
+ {
+ // This compares ULONG character strings:
+ *result = 0;
+ unsigned long char_space = char_space_;
+
+ length_left /= 4;
+ length_right /= 4;
+ size_t length = std::min(length_left, length_right);
+ size_t index = 0;
+ while( index < length )
+ {
+ unsigned long ch_l = collated(left[index]);
+ unsigned long ch_r = collated(right[index]);
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ if( *result == 0 )
+ {
+ while( index < length_left )
+ {
+ unsigned long ch_l = collated(left[index]);
+ if( ch_l != char_space )
+ {
+ *result = ch_l < char_space ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ while( index < length_right )
+ {
+ unsigned long ch_r = collated(right[index]);
+ if( char_space != ch_r )
+ {
+ *result = char_space < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ }
+ done:
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_string_4a( int *result,
+ const unsigned long *left,
+ size_t length_left,
+ const unsigned long *right,
+ size_t length_right,
+ cbl_char_t char_space_)
+ {
+ // This compares ULONG character strings:
+ *result = 0;
+ unsigned long char_space = char_space_;
+
+ length_left /= 4;
+ length_right /= 4;
+ size_t length = std::min(length_left, length_right);
+ size_t index = 0;
+ while( index < length )
+ {
+ unsigned long ch_l = left[index];
+ unsigned long ch_r = right[index];
+ if( ch_l != ch_r )
+ {
+ *result = ch_l < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ if( *result == 0 )
+ {
+ while( index < length_left )
+ {
+ unsigned long ch_l = left[index];
+ if( ch_l != char_space )
+ {
+ *result = ch_l < char_space ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ while( index < length_right )
+ {
+ unsigned long ch_r = right[index];
+ if( char_space != ch_r )
+ {
+ *result = char_space < ch_r ? -1 : +1 ;
+ goto done;
+ }
+ index += 1;
+ }
+ }
+ done:
+ return;
+ }
+
+extern "C"
+void
+__gg_compare_string_different(int *result,
+ const unsigned char *left,
+ size_t length_left,
+ cbl_encoding_t encoding_left,
+ const unsigned char *right,
+ size_t length_right,
+ cbl_encoding_t encoding_right)
+ {
+ /* This routine converts the right string to the left encoding, and then
+ compares the results. In the case where the left side is the
+ __gg__display_encoding, the `collated` table is used. */
+ charmap_t *charmap = __gg__get_charmap(encoding_left);
+ int stride = charmap->stride();
+ cbl_char_t space = charmap->mapped_character(ascii_space);
+ size_t nbytes;
+ const unsigned char *converted = reinterpret_cast<unsigned char *>(
+ __gg__iconverter(encoding_right,
+ encoding_left,
+ right,
+ length_right,
+ &nbytes));
+ switch(stride)
+ {
+ case 1:
+ {
+ __gg__compare_string_1a( result,
+ left,
+ length_left,
+ converted,
+ nbytes,
+ space);
+ break;
+ }
+
+ case 2:
+ {
+ __gg__compare_string_2(result,
+ reinterpret_cast<const unsigned short *>(left),
+ length_left,
+ reinterpret_cast<const unsigned short *>(converted),
+ nbytes,
+ space);
+ break;
+ }
+
+ case 4:
+ {
+ __gg__compare_string_4(result,
+ reinterpret_cast<const unsigned long *>(left),
+ length_left,
+ reinterpret_cast<const unsigned long *>(converted),
+ nbytes,
+ space);
+ break;
+ }
+ }
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_numeric_all(int *result,
+ __int128 value,
+ size_t digits,
+ const unsigned char *string,
+ size_t length,
+ cbl_encoding_t encoding )
+ {
+ char ach[128];
+ unsigned char *pach = reinterpret_cast<unsigned char *>(ach);
+ if( digits == 0 )
+ {
+ // Go for maximum length:
+ __gg__binary_to_string_ascii(ach, 38, value);
+ // Find the first non-zero digit:
+ while(digits > 1)
+ {
+ if( *pach != ascii_zero )
+ {
+ break;
+ }
+ digits -= 1;
+ pach += 1;
+ }
+ }
+ else
+ {
+ __gg__binary_to_string_ascii(ach, digits, value);
+ }
+
+ // we need to convert the ascii numeric string to the same encoding as the
+ // string:
+ const charmap_t *charmap = __gg__get_charmap(encoding);
+ size_t nbytes;
+ const char *converted = __gg__iconverter( DEFAULT_SOURCE_ENCODING,
+ encoding ,
+ ach,
+ digits,
+ &nbytes);
+ *result = 0;
+ for(size_t i=0; i<digits; i++)
+ {
+ cbl_char_t chl = charmap->getch(converted, i);
+ cbl_char_t chr = charmap->getch(string, i % length);
+
+ chl = collated(chl);
+ chr = collated(chr);
+ if( chl > chr )
+ {
+ *result = 1;
+ break;
+ }
+ else if( chl < chr )
+ {
+ *result = -1;
+ break;
+ }
+ }
+ return;
+ }
+
+extern "C"
+void
+__gg__compare_binary_to_string( int *result,
+ __int128 value,
+ size_t digits,
+ char *right,
+ size_t length,
+ cbl_encoding_t encoding)
+ {
+ char ach[128];
+ unsigned char *pach = reinterpret_cast<unsigned char *>(ach);
+ if( digits == 0 )
+ {
+ // Go for maximum length:
+ __gg__binary_to_string_ascii(ach, 38, value);
+ // Find the first non-zero digit:
+ while(digits > 1)
+ {
+ if( *pach != ascii_zero )
+ {
+ break;
+ }
+ digits -= 1;
+ pach += 1;
+ }
+ }
+ else
+ {
+ __gg__binary_to_string_ascii(ach, digits, value);
+ }
+ // ach is digits characters in DEFAULT_SOURCE_ENCODING.
+ // we need to convert the ascii numeric string to the right-side encoding
+ charmap_t *charmap = __gg__get_charmap(encoding);
+ cbl_char_t space = charmap->mapped_character(ascii_space);
+ size_t nbytes;
+ char *converted = __gg__iconverter( DEFAULT_SOURCE_ENCODING,
+ encoding ,
+ ach,
+ digits,
+ &nbytes);
+ switch( charmap->stride() )
+ {
+ case 1:
+ __gg__compare_string_1(result,
+ reinterpret_cast<unsigned char *>(converted),
+ nbytes,
+ reinterpret_cast<unsigned char *>(right),
+ length,
+ space);
+ break;
+ case 2:
+ __gg__compare_string_2(result,
+ reinterpret_cast<unsigned short *>(converted),
+ nbytes,
+ reinterpret_cast<unsigned short *>(right),
+ length,
+ space);
+ break;
+ case 4:
+ __gg__compare_string_4(result,
+ reinterpret_cast<unsigned long *>(converted),
+ nbytes,
+ reinterpret_cast<unsigned long *>(right),
+ length,
+ space);
+ break;
+ }
+ return;
+ }
+
__gg__binary_to_string_ascii(char *result, int digits, __int128 value)
{
zero_char = ascii_zero;
-
- // Note that this routine does not terminate the generated string with a
- // NUL. This routine is sometimes used to generate a NumericDisplay string
- // of digits in place, with no terminator.
- __int128 mask = __gg__power_of_ten(digits);
-
- COMBINED combined;
- if( value < 0 )
+ bool retval; // True means the value was too big to fit into digits
+ if( digits < 39 )
{
- value = -value;
- }
+ // Note that this routine does not terminate the generated string with a
+ // NUL. This routine is sometimes used to generate a NumericDisplay string
+ // of digits in place, with no terminator.
+ __int128 mask = __gg__power_of_ten(digits);
- // A non-zero retval means the number was too big to fit into the desired
- // number of digits:
- bool retval = !!(value / mask);
+ COMBINED combined;
+ if( value < 0 )
+ {
+ value = -value;
+ }
- // mask off the bottom digits to avoid garbage when value is too large
- value %= mask;
+ // A non-zero retval means the number was too big to fit into the desired
+ // number of digits:
+ retval = !!(value / mask);
- combined.start = 0;
- combined.run = digits;
- combined.val128 = value;
- string_from_combined(combined);
- memcpy(result, combined_string, digits);
+ // mask off the bottom digits to avoid garbage when value is too large
+ value %= mask;
+
+ combined.start = 0;
+ combined.run = digits;
+ combined.val128 = value;
+ string_from_combined(combined);
+ memcpy(result, combined_string, digits);
+ return retval;
+ }
+ else
+ {
+ // We assume that this is a PIC X(16) COMP-X, so the value is always
+ // positive.
+ COMBINED combined;
+ // A non-zero retval means the number was too big to fit into the desired
+ // number of digits:
+ retval = false;
+
+ combined.start = 0;
+ combined.run = digits;
+ combined.val128 = value;
+ string_from_combined(combined);
+ memcpy(result, combined_string, digits);
+ }
return retval;
}