* nocachegetattr
*
* This only gets called from fastgetattr(), in cases where we
- * can't use a cacheoffset and the value is not null.
- *
- * This caches attribute offsets in the attribute descriptor.
- *
- * An alternative way to speed things up would be to cache offsets
- * with the tuple, but that seems more difficult unless you take
- * the storage hit of actually putting those offsets into the
- * tuple you send to disk. Yuck.
- *
- * This scheme will be slightly slower than that, but should
- * perform well for queries which hit large #'s of tuples. After
- * you cache the offsets once, examining all the other tuples using
- * the same attribute descriptor will go much quicker. -cim 5/4/91
+ * can't use the attcacheoff and the value is not null.
*
* NOTE: if you need to change this code, see also heap_deform_tuple.
* Also see nocache_index_getattr, which is the same code for index
int attnum,
TupleDesc tupleDesc)
{
+ CompactAttribute *cattr;
HeapTupleHeader td = tup->t_data;
char *tp; /* ptr to data part of tuple */
bits8 *bp = td->t_bits; /* ptr to null bitmap in tuple */
- bool slow = false; /* do we have to walk attrs? */
int off; /* current offset within data */
+ int startAttr;
+ int firstNullAttr;
+ int i;
+ bool hasnulls = HeapTupleHasNulls(tup);
- /* ----------------
- * Three cases:
- *
- * 1: No nulls and no variable-width attributes.
- * 2: Has a null or a var-width AFTER att.
- * 3: Has nulls or var-widths BEFORE att.
- * ----------------
- */
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupleDesc->firstNonCachedOffsetAttr >= 0);
attnum--;
- if (!HeapTupleNoNulls(tup))
+ /*
+ * To minimize the number of attributes we need to look at, start walking
+ * the tuple at the attribute with the highest attcacheoff prior to attnum
+ * or the first NULL attribute prior to attnum, whichever comes first.
+ */
+ if (hasnulls)
+ firstNullAttr = first_null_attr(bp, attnum);
+ else
+ firstNullAttr = attnum;
+
+ if (tupleDesc->firstNonCachedOffsetAttr > 0)
{
/*
- * there's a null somewhere in the tuple
- *
- * check to see if any preceding bits are null...
+ * Start at the highest attcacheoff attribute with no NULLs in prior
+ * attributes.
*/
- int byte = attnum >> 3;
- int finalbit = attnum & 0x07;
-
- /* check for nulls "before" final bit of last byte */
- if ((~bp[byte]) & ((1 << finalbit) - 1))
- slow = true;
- else
- {
- /* check for nulls in any "earlier" bytes */
- int i;
-
- for (i = 0; i < byte; i++)
- {
- if (bp[i] != 0xFF)
- {
- slow = true;
- break;
- }
- }
- }
+ startAttr = Min(tupleDesc->firstNonCachedOffsetAttr - 1, firstNullAttr);
+ off = TupleDescCompactAttr(tupleDesc, startAttr)->attcacheoff;
+ }
+ else
+ {
+ /* Otherwise, start at the beginning... */
+ startAttr = 0;
+ off = 0;
}
tp = (char *) td + td->t_hoff;
- if (!slow)
+ /*
+ * Calculate 'off' up to the first NULL attr. We use two cheaper loops
+ * when the tuple has no variable-width columns. When variable-width
+ * columns exists, we use att_addlength_pointer() to move the offset
+ * beyond the current attribute.
+ */
+ if (!HeapTupleHasVarWidth(tup))
{
- CompactAttribute *att;
-
- /*
- * If we get here, there are no nulls up to and including the target
- * attribute. If we have a cached offset, we can use it.
- */
- att = TupleDescCompactAttr(tupleDesc, attnum);
- if (att->attcacheoff >= 0)
- return fetchatt(att, tp + att->attcacheoff);
-
- /*
- * Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
- * cached offsets for these attrs.
- */
- if (HeapTupleHasVarWidth(tup))
+ for (i = startAttr; i < firstNullAttr; i++)
{
- int j;
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- for (j = 0; j <= attnum; j++)
- {
- if (TupleDescCompactAttr(tupleDesc, j)->attlen <= 0)
- {
- slow = true;
- break;
- }
- }
+ off = att_nominal_alignby(off, cattr->attalignby);
+ off += cattr->attlen;
}
- }
-
- if (!slow)
- {
- int natts = tupleDesc->natts;
- int j = 1;
-
- /*
- * If we get here, we have a tuple with no nulls or var-widths up to
- * and including the target attribute, so we can use the cached offset
- * ... only we don't have it yet, or we'd not have got here. Since
- * it's cheap to compute offsets for fixed-width columns, we take the
- * opportunity to initialize the cached offsets for *all* the leading
- * fixed-width columns, in hope of avoiding future visits to this
- * routine.
- */
- TupleDescCompactAttr(tupleDesc, 0)->attcacheoff = 0;
-
- /* we might have set some offsets in the slow path previously */
- while (j < natts && TupleDescCompactAttr(tupleDesc, j)->attcacheoff > 0)
- j++;
-
- off = TupleDescCompactAttr(tupleDesc, j - 1)->attcacheoff +
- TupleDescCompactAttr(tupleDesc, j - 1)->attlen;
- for (; j < natts; j++)
+ for (; i < attnum; i++)
{
- CompactAttribute *att = TupleDescCompactAttr(tupleDesc, j);
+ if (att_isnull(i, bp))
+ continue;
- if (att->attlen <= 0)
- break;
-
- off = att_nominal_alignby(off, att->attalignby);
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- att->attcacheoff = off;
-
- off += att->attlen;
+ off = att_nominal_alignby(off, cattr->attalignby);
+ off += cattr->attlen;
}
-
- Assert(j > attnum);
-
- off = TupleDescCompactAttr(tupleDesc, attnum)->attcacheoff;
}
else
{
- bool usecache = true;
- int i;
-
- /*
- * Now we know that we have to walk the tuple CAREFULLY. But we still
- * might be able to cache some offsets for next time.
- *
- * Note - This loop is a little tricky. For each non-null attribute,
- * we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
- * storage and no alignment padding either. We can use/set
- * attcacheoff until we reach either a null or a var-width attribute.
- */
- off = 0;
- for (i = 0;; i++) /* loop exit is at "break" */
+ for (i = startAttr; i < firstNullAttr; i++)
{
- CompactAttribute *att = TupleDescCompactAttr(tupleDesc, i);
+ int attlen;
- if (HeapTupleHasNulls(tup) && att_isnull(i, bp))
- {
- usecache = false;
- continue; /* this cannot be the target att */
- }
+ cattr = TupleDescCompactAttr(tupleDesc, i);
+ attlen = cattr->attlen;
- /* If we know the next offset, we can skip the rest */
- if (usecache && att->attcacheoff >= 0)
- off = att->attcacheoff;
- else if (att->attlen == -1)
- {
- /*
- * We can only cache the offset for a varlena attribute if the
- * offset is already suitably aligned, so that there would be
- * no pad bytes in any case: then the offset will be valid for
- * either an aligned or unaligned value.
- */
- if (usecache &&
- off == att_nominal_alignby(off, att->attalignby))
- att->attcacheoff = off;
- else
- {
- off = att_pointer_alignby(off, att->attalignby, -1,
- tp + off);
- usecache = false;
- }
- }
- else
- {
- /* not varlena, so safe to use att_nominal_alignby */
- off = att_nominal_alignby(off, att->attalignby);
+ /*
+ * cstrings don't exist in heap tuples. Use pg_assume to instruct
+ * the compiler not to emit the cstring-related code in
+ * att_addlength_pointer().
+ */
+ pg_assume(attlen > 0 || attlen == -1);
- if (usecache)
- att->attcacheoff = off;
- }
+ off = att_pointer_alignby(off,
+ cattr->attalignby,
+ attlen,
+ tp + off);
+ off = att_addlength_pointer(off, attlen, tp + off);
+ }
- if (i == attnum)
- break;
+ for (; i < attnum; i++)
+ {
+ int attlen;
- off = att_addlength_pointer(off, att->attlen, tp + off);
+ if (att_isnull(i, bp))
+ continue;
- if (usecache && att->attlen <= 0)
- usecache = false;
+ cattr = TupleDescCompactAttr(tupleDesc, i);
+ attlen = cattr->attlen;
+
+ /* As above, heaptuples have no cstrings */
+ pg_assume(attlen > 0 || attlen == -1);
+
+ off = att_pointer_alignby(off, cattr->attalignby, attlen,
+ tp + off);
+ off = att_addlength_pointer(off, attlen, tp + off);
}
}
- return fetchatt(TupleDescCompactAttr(tupleDesc, attnum), tp + off);
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ off = att_pointer_alignby(off,
+ cattr->attalignby,
+ cattr->attlen,
+ tp + off);
+
+ return fetchatt(cattr, tp + off);
}
/* ----------------
Datum *values, bool *isnull)
{
HeapTupleHeader tup = tuple->t_data;
+ CompactAttribute *cattr;
bool hasnulls = HeapTupleHasNulls(tuple);
int tdesc_natts = tupleDesc->natts;
int natts; /* number of atts to extract */
char *tp; /* ptr to tuple data */
uint32 off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
- bool slow = false; /* can we use/set attcacheoff? */
+ int firstNonCacheOffsetAttr;
+ int firstNullAttr;
natts = HeapTupleHeaderGetNatts(tup);
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupleDesc->firstNonCachedOffsetAttr >= 0);
+
/*
* In inheritance situations, it is possible that the given tuple actually
* has more fields than the caller is expecting. Don't run off the end of
* the caller's arrays.
*/
natts = Min(natts, tdesc_natts);
+ firstNonCacheOffsetAttr = Min(tupleDesc->firstNonCachedOffsetAttr, natts);
+
+ if (hasnulls)
+ {
+ firstNullAttr = first_null_attr(bp, natts);
+
+ /*
+ * XXX: it'd be nice to use populate_isnull_array() here, but that
+ * requires that the isnull array's size is rounded up to the next
+ * multiple of 8. Doing that would require adjusting many locations
+ * that allocate the array.
+ */
+ firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr);
+ }
+ else
+ firstNullAttr = natts;
tp = (char *) tup + tup->t_hoff;
+ attnum = 0;
- off = 0;
+ if (firstNonCacheOffsetAttr > 0)
+ {
+#ifdef USE_ASSERT_CHECKING
+ /* In Assert enabled builds, verify attcacheoff is correct */
+ int offcheck = 0;
+#endif
+ do
+ {
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ off = cattr->attcacheoff;
- for (attnum = 0; attnum < natts; attnum++)
+#ifdef USE_ASSERT_CHECKING
+ offcheck = att_nominal_alignby(offcheck, cattr->attalignby);
+ Assert(offcheck == cattr->attcacheoff);
+ offcheck += cattr->attlen;
+#endif
+
+ values[attnum] = fetch_att_noerr(tp + off,
+ cattr->attbyval,
+ cattr->attlen);
+ } while (++attnum < firstNonCacheOffsetAttr);
+ off += cattr->attlen;
+ }
+ else
+ off = 0;
+
+ for (; attnum < firstNullAttr; attnum++)
{
- CompactAttribute *thisatt = TupleDescCompactAttr(tupleDesc, attnum);
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ cattr->attlen,
+ cattr->attalignby);
+ }
+
+ for (; attnum < natts; attnum++)
+ {
+ Assert(hasnulls);
- if (hasnulls && att_isnull(attnum, bp))
+ if (att_isnull(attnum, bp))
{
values[attnum] = (Datum) 0;
isnull[attnum] = true;
- slow = true; /* can't use attcacheoff anymore */
continue;
}
isnull[attnum] = false;
-
- if (!slow && thisatt->attcacheoff >= 0)
- off = thisatt->attcacheoff;
- else if (thisatt->attlen == -1)
- {
- /*
- * We can only cache the offset for a varlena attribute if the
- * offset is already suitably aligned, so that there would be no
- * pad bytes in any case: then the offset will be valid for either
- * an aligned or unaligned value.
- */
- if (!slow &&
- off == att_nominal_alignby(off, thisatt->attalignby))
- thisatt->attcacheoff = off;
- else
- {
- off = att_pointer_alignby(off, thisatt->attalignby, -1,
- tp + off);
- slow = true;
- }
- }
- else
- {
- /* not varlena, so safe to use att_nominal_alignby */
- off = att_nominal_alignby(off, thisatt->attalignby);
-
- if (!slow)
- thisatt->attcacheoff = off;
- }
-
- values[attnum] = fetchatt(thisatt, tp + off);
-
- off = att_addlength_pointer(off, thisatt->attlen, tp + off);
-
- if (thisatt->attlen <= 0)
- slow = true; /* can't use attcacheoff anymore */
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+
+ /* align 'off', fetch the attr's value, and increment off beyond it */
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ cattr->attlen,
+ cattr->attalignby);
}
/*
*
* This gets called from index_getattr() macro, and only in cases
* where we can't use cacheoffset and the value is not null.
- *
- * This caches attribute offsets in the attribute descriptor.
- *
- * An alternative way to speed things up would be to cache offsets
- * with the tuple, but that seems more difficult unless you take
- * the storage hit of actually putting those offsets into the
- * tuple you send to disk. Yuck.
- *
- * This scheme will be slightly slower than that, but should
- * perform well for queries which hit large #'s of tuples. After
- * you cache the offsets once, examining all the other tuples using
- * the same attribute descriptor will go much quicker. -cim 5/4/91
* ----------------
*/
Datum
int attnum,
TupleDesc tupleDesc)
{
+ CompactAttribute *cattr;
char *tp; /* ptr to data part of tuple */
bits8 *bp = NULL; /* ptr to null bitmap in tuple */
- bool slow = false; /* do we have to walk attrs? */
int data_off; /* tuple data offset */
int off; /* current offset within data */
+ int startAttr;
+ int firstNullAttr;
+ bool hasnulls = IndexTupleHasNulls(tup);
+ int i;
- /* ----------------
- * Three cases:
- *
- * 1: No nulls and no variable-width attributes.
- * 2: Has a null or a var-width AFTER att.
- * 3: Has nulls or var-widths BEFORE att.
- * ----------------
- */
-
- data_off = IndexInfoFindDataOffset(tup->t_info);
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupleDesc->firstNonCachedOffsetAttr >= 0);
attnum--;
- if (IndexTupleHasNulls(tup))
- {
- /*
- * there's a null somewhere in the tuple
- *
- * check to see if desired att is null
- */
+ data_off = IndexInfoFindDataOffset(tup->t_info);
+ tp = (char *) tup + data_off;
- /* XXX "knows" t_bits are just after fixed tuple header! */
+ /*
+ * To minimize the number of attributes we need to look at, start walking
+ * the tuple at the attribute with the highest attcacheoff prior to attnum
+ * or the first NULL attribute prior to attnum, whichever comes first.
+ */
+ if (hasnulls)
+ {
bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData));
-
- /*
- * Now check to see if any preceding bits are null...
- */
- {
- int byte = attnum >> 3;
- int finalbit = attnum & 0x07;
-
- /* check for nulls "before" final bit of last byte */
- if ((~bp[byte]) & ((1 << finalbit) - 1))
- slow = true;
- else
- {
- /* check for nulls in any "earlier" bytes */
- int i;
-
- for (i = 0; i < byte; i++)
- {
- if (bp[i] != 0xFF)
- {
- slow = true;
- break;
- }
- }
- }
- }
+ firstNullAttr = first_null_attr(bp, attnum);
}
+ else
+ firstNullAttr = attnum;
- tp = (char *) tup + data_off;
-
- if (!slow)
+ if (tupleDesc->firstNonCachedOffsetAttr > 0)
{
- CompactAttribute *att;
-
- /*
- * If we get here, there are no nulls up to and including the target
- * attribute. If we have a cached offset, we can use it.
- */
- att = TupleDescCompactAttr(tupleDesc, attnum);
- if (att->attcacheoff >= 0)
- return fetchatt(att, tp + att->attcacheoff);
-
/*
- * Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize the
- * cached offsets for these attrs.
+ * Start at the highest attcacheoff attribute with no NULLs in prior
+ * attributes.
*/
- if (IndexTupleHasVarwidths(tup))
- {
- int j;
-
- for (j = 0; j <= attnum; j++)
- {
- if (TupleDescCompactAttr(tupleDesc, j)->attlen <= 0)
- {
- slow = true;
- break;
- }
- }
- }
+ startAttr = Min(tupleDesc->firstNonCachedOffsetAttr - 1, firstNullAttr);
+ off = TupleDescCompactAttr(tupleDesc, startAttr)->attcacheoff;
}
-
- if (!slow)
+ else
{
- int natts = tupleDesc->natts;
- int j = 1;
-
- /*
- * If we get here, we have a tuple with no nulls or var-widths up to
- * and including the target attribute, so we can use the cached offset
- * ... only we don't have it yet, or we'd not have got here. Since
- * it's cheap to compute offsets for fixed-width columns, we take the
- * opportunity to initialize the cached offsets for *all* the leading
- * fixed-width columns, in hope of avoiding future visits to this
- * routine.
- */
- TupleDescCompactAttr(tupleDesc, 0)->attcacheoff = 0;
+ /* Otherwise, start at the beginning... */
+ startAttr = 0;
+ off = 0;
+ }
- /* we might have set some offsets in the slow path previously */
- while (j < natts && TupleDescCompactAttr(tupleDesc, j)->attcacheoff > 0)
- j++;
+ /*
+ * Calculate 'off' up to the first NULL attr. We use two cheaper loops
+ * when the tuple has no variable-width columns. When variable-width
+ * columns exists, we use att_addlength_pointer() to move the offset
+ * beyond the current attribute.
+ */
+ if (IndexTupleHasVarwidths(tup))
+ {
+ /* Calculate the offset up until the first NULL */
+ for (i = startAttr; i < firstNullAttr; i++)
+ {
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- off = TupleDescCompactAttr(tupleDesc, j - 1)->attcacheoff +
- TupleDescCompactAttr(tupleDesc, j - 1)->attlen;
+ off = att_pointer_alignby(off,
+ cattr->attalignby,
+ cattr->attlen,
+ tp + off);
+ off = att_addlength_pointer(off, cattr->attlen, tp + off);
+ }
- for (; j < natts; j++)
+ /* Calculate the offset for any remaining columns. */
+ for (; i < attnum; i++)
{
- CompactAttribute *att = TupleDescCompactAttr(tupleDesc, j);
+ Assert(hasnulls);
- if (att->attlen <= 0)
- break;
+ if (att_isnull(i, bp))
+ continue;
- off = att_nominal_alignby(off, att->attalignby);
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- att->attcacheoff = off;
-
- off += att->attlen;
+ off = att_pointer_alignby(off,
+ cattr->attalignby,
+ cattr->attlen,
+ tp + off);
+ off = att_addlength_pointer(off, cattr->attlen, tp + off);
}
-
- Assert(j > attnum);
-
- off = TupleDescCompactAttr(tupleDesc, attnum)->attcacheoff;
}
else
{
- bool usecache = true;
- int i;
+ /* Handle tuples with only fixed-width attributes */
- /*
- * Now we know that we have to walk the tuple CAREFULLY. But we still
- * might be able to cache some offsets for next time.
- *
- * Note - This loop is a little tricky. For each non-null attribute,
- * we have to first account for alignment padding before the attr,
- * then advance over the attr based on its length. Nulls have no
- * storage and no alignment padding either. We can use/set
- * attcacheoff until we reach either a null or a var-width attribute.
- */
- off = 0;
- for (i = 0;; i++) /* loop exit is at "break" */
+ /* Calculate the offset up until the first NULL */
+ for (i = startAttr; i < firstNullAttr; i++)
{
- CompactAttribute *att = TupleDescCompactAttr(tupleDesc, i);
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
- {
- usecache = false;
- continue; /* this cannot be the target att */
- }
-
- /* If we know the next offset, we can skip the rest */
- if (usecache && att->attcacheoff >= 0)
- off = att->attcacheoff;
- else if (att->attlen == -1)
- {
- /*
- * We can only cache the offset for a varlena attribute if the
- * offset is already suitably aligned, so that there would be
- * no pad bytes in any case: then the offset will be valid for
- * either an aligned or unaligned value.
- */
- if (usecache &&
- off == att_nominal_alignby(off, att->attalignby))
- att->attcacheoff = off;
- else
- {
- off = att_pointer_alignby(off, att->attalignby, -1,
- tp + off);
- usecache = false;
- }
- }
- else
- {
- /* not varlena, so safe to use att_nominal_alignby */
- off = att_nominal_alignby(off, att->attalignby);
+ Assert(cattr->attlen > 0);
+ off = att_nominal_alignby(off, cattr->attalignby);
+ off += cattr->attlen;
+ }
- if (usecache)
- att->attcacheoff = off;
- }
+ /* Calculate the offset for any remaining columns. */
+ for (; i < attnum; i++)
+ {
+ Assert(hasnulls);
- if (i == attnum)
- break;
+ if (att_isnull(i, bp))
+ continue;
- off = att_addlength_pointer(off, att->attlen, tp + off);
+ cattr = TupleDescCompactAttr(tupleDesc, i);
- if (usecache && att->attlen <= 0)
- usecache = false;
+ Assert(cattr->attlen > 0);
+ off = att_nominal_alignby(off, cattr->attalignby);
+ off += cattr->attlen;
}
}
- return fetchatt(TupleDescCompactAttr(tupleDesc, attnum), tp + off);
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ off = att_pointer_alignby(off, cattr->attalignby,
+ cattr->attlen, tp + off);
+ return fetchatt(cattr, tp + off);
}
/*
Datum *values, bool *isnull,
char *tp, bits8 *bp, int hasnulls)
{
+ CompactAttribute *cattr;
int natts = tupleDescriptor->natts; /* number of atts to extract */
- int attnum;
- int off = 0; /* offset in tuple data */
- bool slow = false; /* can we use/set attcacheoff? */
+ int attnum = 0;
+ uint32 off = 0; /* offset in tuple data */
+ int firstNonCacheOffsetAttr;
+ int firstNullAttr;
/* Assert to protect callers who allocate fixed-size arrays */
Assert(natts <= INDEX_MAX_KEYS);
- for (attnum = 0; attnum < natts; attnum++)
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupleDescriptor->firstNonCachedOffsetAttr >= 0);
+
+ firstNonCacheOffsetAttr = Min(tupleDescriptor->firstNonCachedOffsetAttr, natts);
+
+ if (hasnulls)
+ {
+ firstNullAttr = first_null_attr(bp, natts);
+ firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr);
+ }
+ else
+ firstNullAttr = natts;
+
+ if (firstNonCacheOffsetAttr > 0)
{
- CompactAttribute *thisatt = TupleDescCompactAttr(tupleDescriptor, attnum);
+#ifdef USE_ASSERT_CHECKING
+ /* In Assert enabled builds, verify attcacheoff is correct */
+ off = 0;
+#endif
- if (hasnulls && att_isnull(attnum, bp))
+ do
{
- values[attnum] = (Datum) 0;
- isnull[attnum] = true;
- slow = true; /* can't use attcacheoff anymore */
- continue;
- }
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDescriptor, attnum);
- isnull[attnum] = false;
+#ifdef USE_ASSERT_CHECKING
+ off = att_nominal_alignby(off, cattr->attalignby);
+ Assert(off == cattr->attcacheoff);
+ off += cattr->attlen;
+#endif
- if (!slow && thisatt->attcacheoff >= 0)
- off = thisatt->attcacheoff;
- else if (thisatt->attlen == -1)
- {
- /*
- * We can only cache the offset for a varlena attribute if the
- * offset is already suitably aligned, so that there would be no
- * pad bytes in any case: then the offset will be valid for either
- * an aligned or unaligned value.
- */
- if (!slow &&
- off == att_nominal_alignby(off, thisatt->attalignby))
- thisatt->attcacheoff = off;
- else
- {
- off = att_pointer_alignby(off, thisatt->attalignby, -1,
- tp + off);
- slow = true;
- }
- }
- else
- {
- /* not varlena, so safe to use att_nominal_alignby */
- off = att_nominal_alignby(off, thisatt->attalignby);
+ values[attnum] = fetch_att_noerr(tp + cattr->attcacheoff, cattr->attbyval,
+ cattr->attlen);
+ } while (++attnum < firstNonCacheOffsetAttr);
- if (!slow)
- thisatt->attcacheoff = off;
- }
+ off = cattr->attcacheoff + cattr->attlen;
+ }
- values[attnum] = fetchatt(thisatt, tp + off);
+ for (; attnum < firstNullAttr; attnum++)
+ {
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDescriptor, attnum);
+
+ /* align 'off', fetch the datum, and increment off beyond the datum */
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ cattr->attlen,
+ cattr->attalignby);
+ }
- off = att_addlength_pointer(off, thisatt->attlen, tp + off);
+ for (; attnum < natts; attnum++)
+ {
+ Assert(hasnulls);
- if (thisatt->attlen <= 0)
- slow = true; /* can't use attcacheoff anymore */
+ if (att_isnull(attnum, bp))
+ {
+ values[attnum] = (Datum) 0;
+ isnull[attnum] = true;
+ continue;
+ }
+
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDescriptor, attnum);
+
+ /* align 'off', fetch the attr's value, and increment off beyond it */
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ cattr->attlen,
+ cattr->attalignby);
}
}
desc->tdtypmod = -1;
desc->tdrefcount = -1; /* assume not reference-counted */
+ /* This will be set to the correct value by TupleDescFinalize() */
+ desc->firstNonCachedOffsetAttr = -1;
+ desc->firstNonGuaranteedAttr = -1;
+
return desc;
}
* descriptor to another.
*
* !!! Constraints and defaults are not copied !!!
+ *
+ * The caller must take care of calling TupleDescFinalize() on 'dst' once all
+ * TupleDesc changes have been made.
*/
void
TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno,
populate_compact_attribute(dst, dstAttno - 1);
}
+/*
+ * TupleDescFinalize
+ * Finalize the given TupleDesc. This must be called after the
+ * attributes arrays have been populated or adjusted by any code.
+ *
+ * Must be called after populate_compact_attribute() and before
+ * BlessTupleDesc().
+ */
+void
+TupleDescFinalize(TupleDesc tupdesc)
+{
+ int firstNonCachedOffsetAttr = 0;
+ int firstNonGuaranteedAttr = tupdesc->natts;
+ int off = 0;
+
+ for (int i = 0; i < tupdesc->natts; i++)
+ {
+ CompactAttribute *cattr = TupleDescCompactAttr(tupdesc, i);
+
+ /*
+ * Find the highest attnum which is guaranteed to exist in all tuples
+ * in the table. We currently only pay attention to byval attributes
+ * to allow additional optimizations during tuple deformation.
+ */
+ if (firstNonGuaranteedAttr == tupdesc->natts &&
+ (cattr->attnullability != ATTNULLABLE_VALID || !cattr->attbyval ||
+ cattr->atthasmissing || cattr->attisdropped || cattr->attlen <= 0))
+ firstNonGuaranteedAttr = i;
+
+ if (cattr->attlen <= 0)
+ break;
+
+ off = att_nominal_alignby(off, cattr->attalignby);
+
+ cattr->attcacheoff = off;
+
+ off += cattr->attlen;
+ firstNonCachedOffsetAttr = i + 1;
+ }
+
+ tupdesc->firstNonCachedOffsetAttr = firstNonCachedOffsetAttr;
+ tupdesc->firstNonGuaranteedAttr = firstNonGuaranteedAttr;
+}
+
/*
* Free a TupleDesc including all substructure
*/
/* We shouldn't need to bother with making these valid: */
att->attcompression = InvalidCompressionMethod;
att->attcollation = InvalidOid;
- /* In case we changed typlen, we'd better reset following offsets */
- for (int i = spgFirstIncludeColumn; i < outTupDesc->natts; i++)
- TupleDescCompactAttr(outTupDesc, i)->attcacheoff = -1;
populate_compact_attribute(outTupDesc, spgKeyColumn);
TupleDescFinalize(outTupDesc);
*/
if (map != NULL)
slot = execute_attr_map_slot(map, slot,
- MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
+ MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
ExecGetUpdatedCols(rootrel, estate));
}
*/
if (map != NULL)
slot = execute_attr_map_slot(map, slot,
- MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
+ MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
ExecGetUpdatedCols(rootrel, estate));
rel = rootrel->ri_RelationDesc;
*/
if (map != NULL)
slot = execute_attr_map_slot(map, slot,
- MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
+ MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
ExecGetUpdatedCols(rootrel, estate));
rel = rootrel->ri_RelationDesc;
*/
if (map != NULL)
slot = execute_attr_map_slot(map, slot,
- MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
+ MakeTupleTableSlot(tupdesc, &TTSOpsVirtual, 0));
modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
ExecGetUpdatedCols(rootrel, estate));
}
}
-/*
- * slot_deform_heap_tuple_internal
- * An always inline helper function for use in slot_deform_heap_tuple to
- * allow the compiler to emit specialized versions of this function for
- * various combinations of "slow" and "hasnulls". For example, if a
- * given tuple has no nulls, then we needn't check "hasnulls" for every
- * attribute that we're deforming. The caller can just call this
- * function with hasnulls set to constant-false and have the compiler
- * remove the constant-false branches and emit more optimal code.
- *
- * Returns the next attnum to deform, which can be equal to natts when the
- * function manages to deform all requested attributes. *offp is an input and
- * output parameter which is the byte offset within the tuple to start deforming
- * from which, on return, gets set to the offset where the next attribute
- * should be deformed from. *slowp is set to true when subsequent deforming
- * of this tuple must use a version of this function with "slow" passed as
- * true.
- *
- * Callers cannot assume when we return "attnum" (i.e. all requested
- * attributes have been deformed) that slow mode isn't required for any
- * additional deforming as the final attribute may have caused a switch to
- * slow mode.
- */
-static pg_attribute_always_inline int
-slot_deform_heap_tuple_internal(TupleTableSlot *slot, HeapTuple tuple,
- int attnum, int natts, bool slow,
- bool hasnulls, uint32 *offp, bool *slowp)
-{
- TupleDesc tupleDesc = slot->tts_tupleDescriptor;
- Datum *values = slot->tts_values;
- bool *isnull = slot->tts_isnull;
- HeapTupleHeader tup = tuple->t_data;
- char *tp; /* ptr to tuple data */
- bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */
- bool slownext = false;
-
- tp = (char *) tup + tup->t_hoff;
-
- for (; attnum < natts; attnum++)
- {
- CompactAttribute *thisatt = TupleDescCompactAttr(tupleDesc, attnum);
-
- if (hasnulls && att_isnull(attnum, bp))
- {
- values[attnum] = (Datum) 0;
- isnull[attnum] = true;
- if (!slow)
- {
- *slowp = true;
- return attnum + 1;
- }
- else
- continue;
- }
-
- isnull[attnum] = false;
-
- /* calculate the offset of this attribute */
- if (!slow && thisatt->attcacheoff >= 0)
- *offp = thisatt->attcacheoff;
- else if (thisatt->attlen == -1)
- {
- /*
- * We can only cache the offset for a varlena attribute if the
- * offset is already suitably aligned, so that there would be no
- * pad bytes in any case: then the offset will be valid for either
- * an aligned or unaligned value.
- */
- if (!slow && *offp == att_nominal_alignby(*offp, thisatt->attalignby))
- thisatt->attcacheoff = *offp;
- else
- {
- *offp = att_pointer_alignby(*offp,
- thisatt->attalignby,
- -1,
- tp + *offp);
-
- if (!slow)
- slownext = true;
- }
- }
- else
- {
- /* not varlena, so safe to use att_nominal_alignby */
- *offp = att_nominal_alignby(*offp, thisatt->attalignby);
-
- if (!slow)
- thisatt->attcacheoff = *offp;
- }
-
- values[attnum] = fetchatt(thisatt, tp + *offp);
-
- *offp = att_addlength_pointer(*offp, thisatt->attlen, tp + *offp);
-
- /* check if we need to switch to slow mode */
- if (!slow)
- {
- /*
- * We're unable to deform any further if the above code set
- * 'slownext', or if this isn't a fixed-width attribute.
- */
- if (slownext || thisatt->attlen <= 0)
- {
- *slowp = true;
- return attnum + 1;
- }
- }
- }
-
- return natts;
-}
-
/*
* slot_deform_heap_tuple
* Given a TupleTableSlot, extract data from the slot's physical tuple
slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
int reqnatts)
{
- bool hasnulls = HeapTupleHasNulls(tuple);
- int attnum;
+ CompactAttribute *cattr;
+ TupleDesc tupleDesc = slot->tts_tupleDescriptor;
+ HeapTupleHeader tup = tuple->t_data;
+ size_t attnum;
+ int firstNonCacheOffsetAttr;
+ int firstNonGuaranteedAttr;
+ int firstNullAttr;
int natts;
+ Datum *values;
+ bool *isnull;
+ char *tp; /* ptr to tuple data */
uint32 off; /* offset in tuple data */
- bool slow; /* can we use/set attcacheoff? */
- /* We can only fetch as many attributes as the tuple has. */
- natts = Min(HeapTupleHeaderGetNatts(tuple->t_data), reqnatts);
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupleDesc->firstNonCachedOffsetAttr >= 0);
+
+ isnull = slot->tts_isnull;
/*
- * Check whether the first call for this tuple, and initialize or restore
- * loop state.
+ * Some callers may form and deform tuples prior to NOT NULL constraints
+ * being checked. Here we'd like to optimize the case where we only need
+ * to fetch attributes before or up to the point where the attribute is
+ * guaranteed to exist in the tuple. We rely on the slot flag being set
+ * correctly to only enable this optimization when it's valid to do so.
+ * This optimization allows us to save fetching the number of attributes
+ * from the tuple and saves the additional cost of handling non-byval
+ * attrs.
*/
+ firstNonGuaranteedAttr = Min(reqnatts, slot->tts_first_nonguaranteed);
+
+ firstNonCacheOffsetAttr = tupleDesc->firstNonCachedOffsetAttr;
+
+ if (HeapTupleHasNulls(tuple))
+ {
+ natts = HeapTupleHeaderGetNatts(tup);
+ tp = (char *) tup + MAXALIGN(offsetof(HeapTupleHeaderData, t_bits) +
+ BITMAPLEN(natts));
+
+ natts = Min(natts, reqnatts);
+ if (natts > firstNonGuaranteedAttr)
+ {
+ bits8 *bp = tup->t_bits;
+
+ /* Find the first NULL attr */
+ firstNullAttr = first_null_attr(bp, natts);
+
+ /*
+ * And populate the isnull array for all attributes being fetched
+ * from the tuple.
+ */
+ populate_isnull_array(bp, natts, isnull);
+ }
+ else
+ {
+ /* Otherwise all required columns are guaranteed to exist */
+ firstNullAttr = natts;
+ }
+ }
+ else
+ {
+ tp = (char *) tup + MAXALIGN(offsetof(HeapTupleHeaderData, t_bits));
+
+ /*
+ * We only need to look at the tuple's natts if we need more than the
+ * guaranteed number of columns
+ */
+ if (reqnatts > firstNonGuaranteedAttr)
+ natts = Min(HeapTupleHeaderGetNatts(tup), reqnatts);
+ else
+ {
+ /* No need to access the number of attributes in the tuple */
+ natts = reqnatts;
+ }
+
+ /* All attrs can be fetched without checking for NULLs */
+ firstNullAttr = natts;
+ }
+
attnum = slot->tts_nvalid;
+ values = slot->tts_values;
slot->tts_nvalid = reqnatts;
- if (attnum == 0)
+
+ /* Ensure we calculated tp correctly */
+ Assert(tp == (char *) tup + tup->t_hoff);
+
+ if (attnum < firstNonGuaranteedAttr)
{
- /* Start from the first attribute */
- off = 0;
- slow = false;
+ int attlen;
+
+ do
+ {
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ attlen = cattr->attlen;
+
+ /* We don't expect any non-byval types */
+ pg_assume(attlen > 0);
+ Assert(cattr->attbyval == true);
+
+ off = cattr->attcacheoff;
+ values[attnum] = fetch_att_noerr(tp + off, true, attlen);
+ attnum++;
+ } while (attnum < firstNonGuaranteedAttr);
+
+ off += attlen;
+
+ if (attnum == reqnatts)
+ goto done;
}
else
{
- /* Restore state from previous execution */
+ /*
+ * We may be incrementally deforming the tuple, so set 'off' to the
+ * previously cached value. This may be 0, if the slot has just
+ * received a new tuple.
+ */
off = *offp;
- slow = TTS_SLOW(slot);
+
+ /* We expect *offp to be set to 0 when attnum == 0 */
+ Assert(off == 0 || attnum > 0);
}
+ /* We can use attcacheoff up until the first NULL */
+ firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr);
+
/*
- * If 'slow' isn't set, try deforming using deforming code that does not
- * contain any of the extra checks required for non-fixed offset
- * deforming. During deforming, if or when we find a NULL or a variable
- * length attribute, we'll switch to a deforming method which includes the
- * extra code required for non-fixed offset deforming, a.k.a slow mode.
- * Because this is performance critical, we inline
- * slot_deform_heap_tuple_internal passing the 'slow' and 'hasnull'
- * parameters as constants to allow the compiler to emit specialized code
- * with the known-const false comparisons and subsequent branches removed.
+ * Handle the portion of the tuple that we have cached the offset for up
+ * to the first NULL attribute. The offset is effectively fixed for
+ * these, so we can use the CompactAttribute's attcacheoff.
*/
- if (!slow)
+ if (attnum < firstNonCacheOffsetAttr)
{
- /* Tuple without any NULLs? We can skip doing any NULL checking */
- if (!hasnulls)
- attnum = slot_deform_heap_tuple_internal(slot,
- tuple,
- attnum,
- natts,
- false, /* slow */
- false, /* hasnulls */
- &off,
- &slow);
- else
- attnum = slot_deform_heap_tuple_internal(slot,
- tuple,
- attnum,
- natts,
- false, /* slow */
- true, /* hasnulls */
- &off,
- &slow);
+ int attlen;
+
+ do
+ {
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ attlen = cattr->attlen;
+
+ off = cattr->attcacheoff;
+ values[attnum] = fetch_att_noerr(tp + off,
+ cattr->attbyval,
+ attlen);
+ attnum++;
+ } while (attnum < firstNonCacheOffsetAttr);
+
+ /*
+ * Point the offset after the end of the last attribute with a cached
+ * offset. We expect the final cached offset attribute to have a
+ * fixed width, so just add the attlen to the attcacheoff
+ */
+ Assert(attlen > 0);
+ off += attlen;
}
- /* If there's still work to do then we must be in slow mode */
- if (attnum < natts)
+ /*
+ * Handle any portion of the tuple that doesn't have a fixed offset up
+ * until the first NULL attribute. This loop only differs from the one
+ * after it by the NULL checks.
+ */
+ for (; attnum < firstNullAttr; attnum++)
{
- /* XXX is it worth adding a separate call when hasnulls is false? */
- attnum = slot_deform_heap_tuple_internal(slot,
- tuple,
- attnum,
- natts,
- true, /* slow */
- hasnulls,
- &off,
- &slow);
+ int attlen;
+
+ isnull[attnum] = false;
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ attlen = cattr->attlen;
+
+ /*
+ * cstrings don't exist in heap tuples. Use pg_assume to instruct the
+ * compiler not to emit the cstring-related code in
+ * align_fetch_then_add().
+ */
+ pg_assume(attlen > 0 || attlen == -1);
+
+ /* align 'off', fetch the datum, and increment off beyond the datum */
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ attlen,
+ cattr->attalignby);
}
/*
- * Save state for next execution
+ * Now handle any remaining attributes in the tuple up to the requested
+ * attnum. This time, include NULL checks as we're now at the first NULL
+ * attribute.
*/
- *offp = off;
- if (slow)
- slot->tts_flags |= TTS_FLAG_SLOW;
- else
- slot->tts_flags &= ~TTS_FLAG_SLOW;
+ for (; attnum < natts; attnum++)
+ {
+ int attlen;
+
+ if (isnull[attnum])
+ {
+ values[attnum] = (Datum) 0;
+ continue;
+ }
- /* Fetch any missing attrs and raise an error if reqnatts is invalid. */
+ cattr = TupleDescCompactAttr(tupleDesc, attnum);
+ attlen = cattr->attlen;
+
+ /* As above, we don't expect cstrings */
+ pg_assume(attlen > 0 || attlen == -1);
+
+ /* align 'off', fetch the datum, and increment off beyond the datum */
+ values[attnum] = align_fetch_then_add(tp,
+ &off,
+ cattr->attbyval,
+ attlen,
+ cattr->attalignby);
+ }
+
+ /* Fetch any missing attrs and raise an error if reqnatts is invalid */
if (unlikely(attnum < reqnatts))
+ {
+ /*
+ * Cache the offset before calling the function to allow the compiler
+ * to implement a tail-call optimization
+ */
+ *offp = off;
slot_getmissingattrs(slot, attnum, reqnatts);
+ return;
+ }
+done:
+
+ /* Save current offset for next execution */
+ *offp = off;
}
const TupleTableSlotOps TTSOpsVirtual = {
* Basic routine to make an empty TupleTableSlot of given
* TupleTableSlotType. If tupleDesc is specified the slot's descriptor is
* fixed for its lifetime, gaining some efficiency. If that's
- * undesirable, pass NULL.
+ * undesirable, pass NULL. 'flags' allows any of non-TTS_FLAGS_TRANSIENT
+ * flags to be set in tts_flags.
* --------------------------------
*/
TupleTableSlot *
MakeTupleTableSlot(TupleDesc tupleDesc,
- const TupleTableSlotOps *tts_ops)
+ const TupleTableSlotOps *tts_ops, uint16 flags)
{
Size basesz,
allocsz;
basesz = tts_ops->base_slot_size;
+ /* Ensure callers don't have any way to set transient flags permanently */
+ flags &= ~TTS_FLAGS_TRANSIENT;
+
/*
* When a fixed descriptor is specified, we can reduce overhead by
* allocating the entire slot in one go.
+ *
+ * We round the size of tts_isnull up to the next highest multiple of 8.
+ * This is needed as populate_isnull_array() operates on 8 elements at a
+ * time when converting a tuple's NULL bitmap into a boolean array.
*/
if (tupleDesc)
allocsz = MAXALIGN(basesz) +
MAXALIGN(tupleDesc->natts * sizeof(Datum)) +
- MAXALIGN(tupleDesc->natts * sizeof(bool));
+ TYPEALIGN(8, tupleDesc->natts * sizeof(bool));
else
allocsz = basesz;
/* const for optimization purposes, OK to modify at allocation time */
*((const TupleTableSlotOps **) &slot->tts_ops) = tts_ops;
slot->type = T_TupleTableSlot;
- slot->tts_flags |= TTS_FLAG_EMPTY;
+ slot->tts_flags = TTS_FLAG_EMPTY | flags;
if (tupleDesc != NULL)
slot->tts_flags |= TTS_FLAG_FIXED;
slot->tts_tupleDescriptor = tupleDesc;
slot->tts_values = (Datum *)
(((char *) slot)
+ MAXALIGN(basesz));
+
slot->tts_isnull = (bool *)
(((char *) slot)
+ MAXALIGN(basesz)
+ MAXALIGN(tupleDesc->natts * sizeof(Datum)));
PinTupleDesc(tupleDesc);
+
+ /*
+ * Precalculate the maximum guaranteed attribute that has to exist in
+ * every tuple which gets deformed into this slot. When the
+ * TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS flag is enabled, we simply take
+ * the precalculated value from the tupleDesc, otherwise the
+ * optimization is disabled, and we set the value to 0.
+ */
+ if ((flags & TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS) != 0)
+ slot->tts_first_nonguaranteed = tupleDesc->firstNonGuaranteedAttr;
+ else
+ slot->tts_first_nonguaranteed = 0;
}
/*
*/
TupleTableSlot *
ExecAllocTableSlot(List **tupleTable, TupleDesc desc,
- const TupleTableSlotOps *tts_ops)
+ const TupleTableSlotOps *tts_ops, uint16 flags)
{
- TupleTableSlot *slot = MakeTupleTableSlot(desc, tts_ops);
+ TupleTableSlot *slot = MakeTupleTableSlot(desc, tts_ops, flags);
*tupleTable = lappend(*tupleTable, slot);
MakeSingleTupleTableSlot(TupleDesc tupdesc,
const TupleTableSlotOps *tts_ops)
{
- TupleTableSlot *slot = MakeTupleTableSlot(tupdesc, tts_ops);
+ TupleTableSlot *slot = MakeTupleTableSlot(tupdesc, tts_ops, 0);
return slot;
}
*/
slot->tts_values = (Datum *)
MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum));
+
+ /*
+ * We round the size of tts_isnull up to the next highest multiple of 8.
+ * This is needed as populate_isnull_array() operates on 8 elements at a
+ * time when converting a tuple's NULL bitmap into a boolean array.
+ */
slot->tts_isnull = (bool *)
- MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(bool));
+ MemoryContextAlloc(slot->tts_mcxt, TYPEALIGN(8, tupdesc->natts * sizeof(bool)));
}
/* --------------------------------
TupleTableSlot *slot;
slot = ExecAllocTableSlot(&planstate->state->es_tupleTable,
- planstate->ps_ResultTupleDesc, tts_ops);
+ planstate->ps_ResultTupleDesc, tts_ops, 0);
planstate->ps_ResultTupleSlot = slot;
planstate->resultopsfixed = planstate->ps_ResultTupleDesc != NULL;
*/
void
ExecInitScanTupleSlot(EState *estate, ScanState *scanstate,
- TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
+ TupleDesc tupledesc, const TupleTableSlotOps *tts_ops,
+ uint16 flags)
{
scanstate->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable,
- tupledesc, tts_ops);
+ tupledesc, tts_ops, flags);
scanstate->ps.scandesc = tupledesc;
scanstate->ps.scanopsfixed = tupledesc != NULL;
scanstate->ps.scanops = tts_ops;
TupleDesc tupledesc,
const TupleTableSlotOps *tts_ops)
{
- return ExecAllocTableSlot(&estate->es_tupleTable, tupledesc, tts_ops);
+ return ExecAllocTableSlot(&estate->es_tupleTable, tupledesc, tts_ops, 0);
}
/* ----------------
* This happens "for free" if the tupdesc came from a relcache entry, but
* not if we have manufactured a tupdesc for a transient RECORD datatype.
* In that case we have to notify typcache.c of the existence of the type.
+ *
+ * TupleDescFinalize() must be called on the TupleDesc before calling this
+ * function.
*/
TupleDesc
BlessTupleDesc(TupleDesc tupdesc)
{
+ /* Did someone forget to call TupleDescFinalize()? */
+ Assert(tupdesc->firstNonCachedOffsetAttr >= 0);
+
if (tupdesc->tdtypeid == RECORDOID &&
tupdesc->tdtypmod < 0)
assign_record_type_typmod(tupdesc);
outerPlan = outerPlanState(scanstate);
tupDesc = ExecGetResultType(outerPlan);
- ExecInitScanTupleSlot(estate, scanstate, tupDesc, tts_ops);
+ ExecInitScanTupleSlot(estate, scanstate, tupDesc, tts_ops, 0);
}
/* ----------------------------------------------------------------
&perhash->hashfunctions);
perhash->hashslot =
ExecAllocTableSlot(&estate->es_tupleTable, hashDesc,
- &TTSOpsMinimalTuple);
+ &TTSOpsMinimalTuple, 0);
list_free(hashTlist);
bms_free(colnos);
*/
ExecInitScanTupleSlot(estate, &scanstate->ss,
RelationGetDescr(currentRelation),
- table_slot_callbacks(currentRelation));
+ table_slot_callbacks(currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
*/
ExecInitScanTupleSlot(estate, &scanstate->ss,
ExecGetResultType(scanstate->cteplanstate),
- &TTSOpsMinimalTuple);
+ &TTSOpsMinimalTuple, 0);
/*
* Initialize result type and projection.
TupleDesc scan_tupdesc;
scan_tupdesc = ExecTypeFromTL(cscan->custom_scan_tlist);
- ExecInitScanTupleSlot(estate, &css->ss, scan_tupdesc, slotOps);
+ ExecInitScanTupleSlot(estate, &css->ss, scan_tupdesc, slotOps, 0);
/* Node's targetlist will contain Vars with varno = INDEX_VAR */
tlistvarno = INDEX_VAR;
}
else
{
ExecInitScanTupleSlot(estate, &css->ss, RelationGetDescr(scan_rel),
- slotOps);
+ slotOps, 0);
/* Node's targetlist will contain Vars with varno = scanrelid */
tlistvarno = scanrelid;
}
scan_tupdesc = ExecTypeFromTL(node->fdw_scan_tlist);
ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc,
- &TTSOpsHeapTuple);
+ &TTSOpsHeapTuple, 0);
/* Node's targetlist will contain Vars with varno = INDEX_VAR */
tlistvarno = INDEX_VAR;
}
/* don't trust FDWs to return tuples fulfilling NOT NULL constraints */
scan_tupdesc = CreateTupleDescCopy(RelationGetDescr(currentRelation));
ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc,
- &TTSOpsHeapTuple);
+ &TTSOpsHeapTuple, 0);
/* Node's targetlist will contain Vars with varno = scanrelid */
tlistvarno = scanrelid;
}
* Initialize scan slot and type.
*/
ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc,
- &TTSOpsMinimalTuple);
+ &TTSOpsMinimalTuple, 0);
/*
* Initialize result slot, type and projection.
*/
tupDesc = ExecTypeFromTL(node->indextlist);
ExecInitScanTupleSlot(estate, &indexstate->ss, tupDesc,
- &TTSOpsVirtual);
+ &TTSOpsVirtual,
+ 0);
/*
* We need another slot, in a format that's suitable for the table AM, for
indexstate->ioss_TableSlot =
ExecAllocTableSlot(&estate->es_tupleTable,
RelationGetDescr(currentRelation),
- table_slot_callbacks(currentRelation));
+ table_slot_callbacks(currentRelation), 0);
/*
* Initialize result type and projection info. The node's targetlist will
*/
ExecInitScanTupleSlot(estate, &indexstate->ss,
RelationGetDescr(currentRelation),
- table_slot_callbacks(currentRelation));
+ table_slot_callbacks(currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
* The scan tuple type is specified for the tuplestore.
*/
ExecInitScanTupleSlot(estate, &scanstate->ss, scanstate->tupdesc,
- &TTSOpsMinimalTuple);
+ &TTSOpsMinimalTuple, 0);
/*
* Initialize result type and projection.
/* and create slot with appropriate rowtype */
ExecInitScanTupleSlot(estate, &scanstate->ss,
RelationGetDescr(scanstate->ss.ss_currentRelation),
- table_slot_callbacks(scanstate->ss.ss_currentRelation));
+ table_slot_callbacks(scanstate->ss.ss_currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
/* and create slot with the appropriate rowtype */
ExecInitScanTupleSlot(estate, &scanstate->ss,
RelationGetDescr(scanstate->ss.ss_currentRelation),
- table_slot_callbacks(scanstate->ss.ss_currentRelation));
+ table_slot_callbacks(scanstate->ss.ss_currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
*/
ExecInitScanTupleSlot(estate, &subquerystate->ss,
ExecGetResultType(subquerystate->subplan),
- ExecGetResultSlotOps(subquerystate->subplan, NULL));
+ ExecGetResultSlotOps(subquerystate->subplan, NULL),
+ 0);
/*
* The slot used as the scantuple isn't the slot above (outside of EPQ),
tf->colcollations);
/* and the corresponding scan slot */
ExecInitScanTupleSlot(estate, &scanstate->ss, tupdesc,
- &TTSOpsMinimalTuple);
+ &TTSOpsMinimalTuple, 0);
/*
* Initialize result type and projection.
*/
ExecInitScanTupleSlot(estate, &tidrangestate->ss,
RelationGetDescr(currentRelation),
- table_slot_callbacks(currentRelation));
+ table_slot_callbacks(currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
*/
ExecInitScanTupleSlot(estate, &tidstate->ss,
RelationGetDescr(currentRelation),
- table_slot_callbacks(currentRelation));
+ table_slot_callbacks(currentRelation),
+ TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS);
/*
* Initialize result type and projection.
* Get info about values list, initialize scan slot with it.
*/
tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists));
- ExecInitScanTupleSlot(estate, &scanstate->ss, tupdesc, &TTSOpsVirtual);
+ ExecInitScanTupleSlot(estate, &scanstate->ss, tupdesc, &TTSOpsVirtual, 0);
/*
* Initialize result type and projection.
scanstate->ss.ps.resultopsset = true;
scanstate->ss.ps.resultopsfixed = false;
- ExecInitScanTupleSlot(estate, &scanstate->ss, NULL, &TTSOpsMinimalTuple);
+ ExecInitScanTupleSlot(estate, &scanstate->ss, NULL, &TTSOpsMinimalTuple, 0);
/*
* initialize child expressions
LLVMValueRef v_tts_values;
LLVMValueRef v_tts_nulls;
LLVMValueRef v_slotoffp;
- LLVMValueRef v_flagsp;
LLVMValueRef v_nvalidp;
LLVMValueRef v_nvalid;
LLVMValueRef v_maxatt;
v_tts_nulls =
l_load_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_ISNULL,
"tts_ISNULL");
- v_flagsp = l_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_FLAGS, "");
v_nvalidp = l_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_NVALID, "");
if (ops == &TTSOpsHeapTuple || ops == &TTSOpsBufferHeapTuple)
{
LLVMValueRef v_off = l_load(b, TypeSizeT, v_offp, "");
- LLVMValueRef v_flags;
LLVMBuildStore(b, l_int16_const(lc, natts), v_nvalidp);
v_off = LLVMBuildTrunc(b, v_off, LLVMInt32TypeInContext(lc), "");
LLVMBuildStore(b, v_off, v_slotoffp);
- v_flags = l_load(b, LLVMInt16TypeInContext(lc), v_flagsp, "tts_flags");
- v_flags = LLVMBuildOr(b, v_flags, l_int16_const(lc, TTS_FLAG_SLOW), "");
- LLVMBuildStore(b, v_flags, v_flagsp);
LLVMBuildRetVoid(b);
}
if (relentry->attrmap)
{
TupleTableSlot *slot = MakeTupleTableSlot(RelationGetDescr(targetrel),
- &TTSOpsVirtual);
+ &TTSOpsVirtual, 0);
old_slot = execute_attr_map_slot(relentry->attrmap, old_slot, slot);
}
if (relentry->attrmap)
{
TupleTableSlot *slot = MakeTupleTableSlot(RelationGetDescr(targetrel),
- &TTSOpsVirtual);
+ &TTSOpsVirtual, 0);
new_slot = execute_attr_map_slot(relentry->attrmap, new_slot, slot);
}
elog(ERROR, "pg_attribute catalog is missing %d attribute(s) for relation OID %u",
need, RelationGetRelid(relation));
- /*
- * We can easily set the attcacheoff value for the first attribute: it
- * must be zero. This eliminates the need for special cases for attnum=1
- * that used to exist in fastgetattr() and index_getattr().
- */
- if (RelationGetNumberOfAttributes(relation) > 0)
- TupleDescCompactAttr(relation->rd_att, 0)->attcacheoff = 0;
-
/*
* Set up constraint/default info
*/
populate_compact_attribute(relation->rd_att, i);
}
- /* initialize first attribute's attcacheoff, cf RelationBuildTupleDesc */
- TupleDescCompactAttr(relation->rd_att, 0)->attcacheoff = 0;
TupleDescFinalize(relation->rd_att);
/* mark not-null status */
populate_compact_attribute(result, i);
}
- /* initialize first attribute's attcacheoff, cf RelationBuildTupleDesc */
- TupleDescCompactAttr(result, 0)->attcacheoff = 0;
TupleDescFinalize(result);
/* Note: we don't bother to set up a TupleConstr entry */
* Any code making changes manually to and fields in the FormData_pg_attribute
* array must subsequently call populate_compact_attribute() to flush the
* changes out to the corresponding 'compact_attrs' element.
+ *
+ * firstNonCachedOffsetAttr stores the index into the compact_attrs array for
+ * the first attribute that we don't have a known attcacheoff for.
+ *
+ * firstNonGuaranteedAttr stores the index to into the compact_attrs array for
+ * the first attribute that is either NULLable, missing, or !attbyval. This
+ * can be used in locations as a guarantee that attributes before this will
+ * always exist in tuples. The !attbyval part isn't required for this, but
+ * including this allows various tuple deforming routines to forego any checks
+ * for !attbyval.
+ *
+ * Once a TupleDesc has been populated, before it is used for any purpose,
+ * TupleDescFinalize() must be called on it.
*/
typedef struct TupleDescData
{
Oid tdtypeid; /* composite type ID for tuple type */
int32 tdtypmod; /* typmod for tuple type */
int tdrefcount; /* reference count, or -1 if not counting */
+ int firstNonCachedOffsetAttr; /* index of first compact_attrs
+ * element without an attcacheoff */
+ int firstNonGuaranteedAttr; /* index of the first nullable,
+ * missing, dropped, or !attbyval
+ * compact_attrs element. */
TupleConstr *constr; /* constraints, or NULL if none */
/* compact_attrs[N] is the compact metadata of Attribute Number N+1 */
CompactAttribute compact_attrs[FLEXIBLE_ARRAY_MEMBER];
extern TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc);
-#define TupleDescFinalize(d) ((void) 0)
#define TupleDescSize(src) \
(offsetof(struct TupleDescData, compact_attrs) + \
(src)->natts * sizeof(CompactAttribute) + \
extern void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno,
TupleDesc src, AttrNumber srcAttno);
+extern void TupleDescFinalize(TupleDesc tupdesc);
extern void FreeTupleDesc(TupleDesc tupdesc);
extern void IncrTupleDescRefCount(TupleDesc tupdesc);
#define TUPMACS_H
#include "catalog/pg_type_d.h" /* for TYPALIGN macros */
-
+#include "port/pg_bitutils.h"
+#include "port/pg_bswap.h"
+#include "varatt.h"
/*
* Check a tuple's null bitmap to determine whether the attribute is null.
return !(BITS[ATT >> 3] & (1 << (ATT & 0x07)));
}
+/*
+ * populate_isnull_array
+ * Transform a tuple's null bitmap into a boolean array.
+ *
+ * Caller must ensure that the isnull array is sized so it contains
+ * at least as many elements as there are bits in the 'bits' array.
+ * Callers should be aware that isnull is populated 8 elements at a time,
+ * effectively as if natts is rounded up to the next multiple of 8.
+ */
+static inline void
+populate_isnull_array(const bits8 *bits, int natts, bool *isnull)
+{
+ int nbytes = (natts + 7) >> 3;
+
+ /*
+ * Multiplying the inverted NULL bitmap byte by this value results in the
+ * lowest bit in each byte being set the same as each bit of the inverted
+ * byte. We perform this as 2 32-bit operations rather than a single
+ * 64-bit operation as multiplying by the required value to do this in
+ * 64-bits would result in overflowing a uint64 in some cases.
+ *
+ * XXX if we ever require BMI2 (-march=x86-64-v3), then this could be done
+ * more efficiently on most X86-64 CPUs with the PDEP instruction. Beware
+ * that some chips (e.g. AMD's Zen2) are horribly inefficient at PDEP.
+ */
+#define SPREAD_BITS_MULTIPLIER_32 0x204081U
+
+ for (int i = 0; i < nbytes; i++, isnull += 8)
+ {
+ uint64 isnull_8;
+ bits8 nullbyte = ~bits[i];
+
+ /* Convert the lower 4 bits of NULL bitmap word into a 64 bit int */
+ isnull_8 = (nullbyte & 0xf) * SPREAD_BITS_MULTIPLIER_32;
+
+ /*
+ * Convert the upper 4 bits of NULL bitmap word into a 64 bit int,
+ * shift into the upper 32 bit and bitwise-OR with the result of the
+ * lower 4 bits.
+ */
+ isnull_8 |= ((uint64) ((nullbyte >> 4) * SPREAD_BITS_MULTIPLIER_32)) << 32;
+
+ /* Mask out all other bits apart from the lowest bit of each byte. */
+ isnull_8 &= UINT64CONST(0x0101010101010101);
+
+#ifdef WORDS_BIGENDIAN
+
+ /*
+ * Fix byte order on big-endian machines before copying to the array.
+ */
+ isnull_8 = pg_bswap64(isnull_8);
+#endif
+ memcpy(isnull, &isnull_8, sizeof(uint64));
+ }
+}
+
#ifndef FRONTEND
/*
* Given an attbyval and an attlen from either a Form_pg_attribute or
else
return PointerGetDatum(T);
}
+
+/*
+ * Same as fetch_att, but no error checking for invalid attlens for byval
+ * types. This is safe to use when attlen comes from CompactAttribute as we
+ * validate the length when populating that struct.
+ */
+static inline Datum
+fetch_att_noerr(const void *T, bool attbyval, int attlen)
+{
+ if (attbyval)
+ {
+ switch (attlen)
+ {
+ case sizeof(int32):
+ return Int32GetDatum(*((const int32 *) T));
+ case sizeof(int16):
+ return Int16GetDatum(*((const int16 *) T));
+ case sizeof(char):
+ return CharGetDatum(*((const char *) T));
+ default:
+ Assert(attlen == sizeof(int64));
+ return Int64GetDatum(*((const int64 *) T));
+ }
+ }
+ else
+ return PointerGetDatum(T);
+}
+
+
+/*
+ * align_fetch_then_add
+ * Applies all the functionality of att_pointer_alignby(),
+ * fetch_att_noerr() and att_addlength_pointer(), resulting in the *off
+ * pointer to the perhaps unaligned number of bytes into 'tupptr', ready
+ * to deform the next attribute.
+ *
+ * tupptr: pointer to the beginning of the tuple, after the header and any
+ * NULL bitmask.
+ * off: offset in bytes for reading tuple data, possibly unaligned.
+ * attbyval, attlen and attalignby are values from CompactAttribute.
+ */
+static inline Datum
+align_fetch_then_add(const char *tupptr, uint32 *off, bool attbyval, int attlen,
+ uint8 attalignby)
+{
+ Datum res;
+
+ if (attlen > 0)
+ {
+ const char *offset_ptr;
+
+ *off = TYPEALIGN(attalignby, *off);
+ offset_ptr = tupptr + *off;
+ *off += attlen;
+ if (attbyval)
+ {
+ switch (attlen)
+ {
+ case sizeof(char):
+ return CharGetDatum(*((const char *) offset_ptr));
+ case sizeof(int16):
+ return Int16GetDatum(*((const int16 *) offset_ptr));
+ case sizeof(int32):
+ return Int32GetDatum(*((const int32 *) offset_ptr));
+ default:
+
+ /*
+ * populate_compact_attribute_internal() should have
+ * checked
+ */
+ Assert(attlen == sizeof(int64));
+ return Int64GetDatum(*((const int64 *) offset_ptr));
+ }
+ }
+ return PointerGetDatum(offset_ptr);
+ }
+ else if (attlen == -1)
+ {
+ if (!VARATT_IS_SHORT(tupptr + *off))
+ *off = TYPEALIGN(attalignby, *off);
+
+ res = PointerGetDatum(tupptr + *off);
+ *off += VARSIZE_ANY(DatumGetPointer(res));
+ return res;
+ }
+ else
+ {
+ Assert(attlen == -2);
+ *off = TYPEALIGN(attalignby, *off);
+ res = PointerGetDatum(tupptr + *off);
+ *off += strlen(tupptr + *off) + 1;
+ return res;
+ }
+}
+
+/*
+ * first_null_attr
+ * Inspect a NULL bitmap from a tuple and return the 0-based attnum of the
+ * first NULL attribute. Returns natts if no NULLs were found.
+ *
+ * This is coded to expect that 'bits' contains at least one 0 bit somewhere
+ * in the array, but not necessarily < natts. Note that natts may be passed
+ * as a value lower than the number of bits physically stored in the tuple's
+ * NULL bitmap, in which case we may not find a NULL and return natts.
+ *
+ * The reason we require at least one 0 bit somewhere in the NULL bitmap is
+ * that the for loop that checks 0xFF bytes would loop to the last byte in
+ * the array if all bytes were 0xFF, and the subsequent code that finds the
+ * right-most 0 bit would access the first byte beyond the bitmap. Provided
+ * we find a 0 bit before then, that won't happen. Since tuples which have no
+ * NULLs don't have a NULL bitmap, this function won't get called for that
+ * case.
+ */
+static inline int
+first_null_attr(const bits8 *bits, int natts)
+{
+ int nattByte = natts >> 3;
+ int bytenum;
+ int res;
+
+#ifdef USE_ASSERT_CHECKING
+ int firstnull_check = natts;
+
+ /* Do it the slow way and check we get the same answer. */
+ for (int i = 0; i < natts; i++)
+ {
+ if (att_isnull(i, bits))
+ {
+ firstnull_check = i;
+ break;
+ }
+ }
+#endif
+
+ /* Process all bytes up to just before the byte for the natts attribute */
+ for (bytenum = 0; bytenum < nattByte; bytenum++)
+ {
+ /* break if there's any NULL attrs (a 0 bit) */
+ if (bits[bytenum] != 0xFF)
+ break;
+ }
+
+ /*
+ * Look for the highest 0-bit in the 'bytenum' element. To do this, we
+ * promote the uint8 to uint32 before performing the bitwise NOT and
+ * looking for the first 1-bit. This works even when the byte is 0xFF, as
+ * the bitwise NOT of 0xFF in 32 bits is 0xFFFFFF00, in which case
+ * pg_rightmost_one_pos32() will return 8. We may end up with a value
+ * higher than natts here, but we'll fix that with the Min() below.
+ */
+ res = bytenum << 3;
+ res += pg_rightmost_one_pos32(~((uint32) bits[bytenum]));
+
+ /*
+ * Since we did no masking to mask out bits beyond the natts'th bit, we
+ * may have found a bit higher than natts, so we must cap res to natts
+ */
+ res = Min(res, natts);
+
+ /* Ensure we got the same answer as the att_isnull() loop got */
+ Assert(res == firstnull_check);
+
+ return res;
+}
#endif /* FRONTEND */
/*
const TupleTableSlotOps *tts_ops);
extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate,
TupleDesc tupledesc,
- const TupleTableSlotOps *tts_ops);
+ const TupleTableSlotOps *tts_ops,
+ uint16 flags);
extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate,
TupleDesc tupledesc,
const TupleTableSlotOps *tts_ops);
* tts_values/tts_isnull are allocated either when the slot is created (when
* the descriptor is provided), or when a descriptor is assigned to the slot;
* they are of length equal to the descriptor's natts.
- *
- * The TTS_FLAG_SLOW flag is saved state for
- * slot_deform_heap_tuple, and should not be touched by any other code.
*----------
*/
#define TTS_FLAG_SHOULDFREE (1 << 2)
#define TTS_SHOULDFREE(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE) != 0)
-/* saved state for slot_deform_heap_tuple */
-#define TTS_FLAG_SLOW (1 << 3)
-#define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0)
+/*
+ * true = slot's formed tuple guaranteed to not have NULLs in NOT NULLable
+ * columns.
+ */
+#define TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS (1 << 3)
+#define TTS_OBEYS_NOT_NULL_CONSTRAINTS(slot) \
+ (((slot)->tts_flags & TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS) != 0)
/* fixed tuple descriptor */
#define TTS_FLAG_FIXED (1 << 4)
#define TTS_FIXED(slot) (((slot)->tts_flags & TTS_FLAG_FIXED) != 0)
+/*
+ * Defines which of the above flags should never be set in tts_flags when the
+ * TupleTableSlot is created.
+ */
+#define TTS_FLAGS_TRANSIENT (TTS_FLAG_EMPTY|TTS_FLAG_SHOULDFREE)
+
struct TupleTableSlotOps;
typedef struct TupleTableSlotOps TupleTableSlotOps;
#define FIELDNO_TUPLETABLESLOT_VALUES 5
Datum *tts_values; /* current per-attribute values */
#define FIELDNO_TUPLETABLESLOT_ISNULL 6
- bool *tts_isnull; /* current per-attribute isnull flags */
+ bool *tts_isnull; /* current per-attribute isnull flags. Array
+ * size must always be rounded up to the next
+ * multiple of 8 elements. */
+ int tts_first_nonguaranteed; /* The value from the TupleDesc's
+ * firstNonGuaranteedAttr, or 0
+ * when tts_flags does not contain
+ * TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS */
+
MemoryContext tts_mcxt; /* slot itself is in this context */
ItemPointerData tts_tid; /* stored tuple's tid */
Oid tts_tableOid; /* table oid of tuple */
/* in executor/execTuples.c */
extern TupleTableSlot *MakeTupleTableSlot(TupleDesc tupleDesc,
- const TupleTableSlotOps *tts_ops);
+ const TupleTableSlotOps *tts_ops,
+ uint16 flags);
extern TupleTableSlot *ExecAllocTableSlot(List **tupleTable, TupleDesc desc,
- const TupleTableSlotOps *tts_ops);
+ const TupleTableSlotOps *tts_ops,
+ uint16 flags);
extern void ExecResetTupleTable(List *tupleTable, bool shouldFree);
extern TupleTableSlot *MakeSingleTupleTableSlot(TupleDesc tupdesc,
const TupleTableSlotOps *tts_ops);