n = cm->ncds * 2;
if (n > MAX_COLOR + 1)
n = MAX_COLOR + 1;
+ /* the MAX_COLOR+1 limit ensures these alloc sizes can't overflow: */
if (cm->cd == cm->cdspace)
{
newCd = (struct colordesc *) MALLOC(n * sizeof(struct colordesc));
CERR(REG_ESPACE);
return 0;
}
- newarray = (color *) REALLOC(cm->hicolormap,
- cm->maxarrayrows * 2 *
- cm->hiarraycols * sizeof(color));
+ newarray = REALLOC_ARRAY(cm->hicolormap, color,
+ cm->maxarrayrows * 2 * cm->hiarraycols);
if (newarray == NULL)
{
CERR(REG_ESPACE);
CERR(REG_ESPACE);
return;
}
- newarray = (color *) REALLOC(cm->hicolormap,
- cm->maxarrayrows *
- cm->hiarraycols * 2 * sizeof(color));
+ newarray = REALLOC_ARRAY(cm->hicolormap, color,
+ cm->maxarrayrows * cm->hiarraycols * 2);
if (newarray == NULL)
{
CERR(REG_ESPACE);
* Potentially, we could need two more colormapranges than we have now, if
* the given chr is in the middle of some existing range.
*/
- newranges = (colormaprange *)
- MALLOC((cm->numcmranges + 2) * sizeof(colormaprange));
+ newranges = MALLOC_ARRAY(colormaprange, cm->numcmranges + 2);
if (newranges == NULL)
{
CERR(REG_ESPACE);
* Potentially, if we have N non-adjacent ranges, we could need as many as
* 2N+1 result ranges (consider case where new range spans 'em all).
*/
- newranges = (colormaprange *)
- MALLOC((cm->numcmranges * 2 + 1) * sizeof(colormaprange));
+ newranges = MALLOC_ARRAY(colormaprange, cm->numcmranges * 2 + 1);
if (newranges == NULL)
{
CERR(REG_ESPACE);
/*
* newcvec - allocate a new cvec
+ *
+ * Note: in current usage, nchrs and nranges are never so large that we risk
+ * integer overflow in these size calculations, even with 32-bit size_t.
*/
static struct cvec *
newcvec(int nchrs, /* to hold this many chrs... */
assert(!NISERR());
+ /*
+ * The REG_MAX_COMPILE_SPACE restriction ensures that integer overflow
+ * can't occur in this loop nor in the allocation requests below.
+ */
nstates = 0;
narcs = 0;
for (s = nfa->states; s != NULL; s = s->next)
case LACON:
assert(s->no != cnfa->pre);
assert(a->co >= 0);
+ /* make sure the modified color number will fit */
+ if (a->co > MAX_COLOR - cnfa->ncolors)
+ {
+ NERR(REG_ECOLORS);
+ return;
+ }
ca->co = (color) (cnfa->ncolors + a->co);
ca->to = a->to->no;
ca++;
assert(wanted > 0 && (size_t) wanted >= v->nsubs);
n = (size_t) wanted * 3 / 2 + 1;
+ /* n is bounded by the number of states, so no chance of overflow here */
if (v->subs == v->sub10)
{
p = (struct subre **) MALLOC(n * sizeof(struct subre *));
else
{
n = v->nlacons;
- newlacons = (struct subre *) REALLOC(v->lacons,
- (n + 1) * sizeof(struct subre));
+ /* better use REALLOC_ARRAY here, as struct subre is big */
+ newlacons = REALLOC_ARRAY(v->lacons, struct subre, n + 1);
}
if (newlacons == NULL)
{
}
else
{
+ /*
+ * Restrict the ranges of nstates and ncolors enough that the arrays
+ * we allocate here have no more than INT_MAX members. This protects
+ * not only the allocation calculations just below, but later indexing
+ * into these arrays.
+ */
+ if (wordsper >= INT_MAX / (nss + WORK) ||
+ cnfa->ncolors >= INT_MAX / nss)
+ {
+ ERR(REG_ETOOBIG);
+ return NULL;
+ }
d = (struct dfa *) MALLOC(sizeof(struct dfa));
if (d == NULL)
{
ERR(REG_ESPACE);
return NULL;
}
- d->ssets = (struct sset *) MALLOC(nss * sizeof(struct sset));
- d->statesarea = (unsigned *) MALLOC((nss + WORK) * wordsper *
- sizeof(unsigned));
+ d->ssets = MALLOC_ARRAY(struct sset, nss);
+ d->statesarea = MALLOC_ARRAY(unsigned, (nss + WORK) * wordsper);
d->work = &d->statesarea[nss * wordsper];
- d->outsarea = (struct sset **) MALLOC(nss * cnfa->ncolors *
- sizeof(struct sset *));
- d->incarea = (struct arcp *) MALLOC(nss * cnfa->ncolors *
- sizeof(struct arcp));
+ d->outsarea = MALLOC_ARRAY(struct sset *, nss * cnfa->ncolors);
+ d->incarea = MALLOC_ARRAY(struct arcp, nss * cnfa->ncolors);
d->ismalloced = true;
d->arraysmalloced = true;
/* now freedfa() will behave sanely */
if (v->nmatch <= LOCALMAT)
v->pmatch = mat;
else
- v->pmatch = (regmatch_t *) MALLOC(v->nmatch * sizeof(regmatch_t));
+ v->pmatch = MALLOC_ARRAY(regmatch_t, v->nmatch);
if (v->pmatch == NULL)
return REG_ESPACE;
zapallsubs(v->pmatch, v->nmatch);
v->subdfas = subdfas;
else
{
+ /* ntree is surely less than the number of states, so this is safe: */
v->subdfas = (struct dfa **) MALLOC(n * sizeof(struct dfa *));
if (v->subdfas == NULL)
{
n = (size_t) v->g->nlacons;
if (n > 0)
{
+ /* nlacons is surely less than the number of arcs, so this is safe: */
v->ladfas = (struct dfa **) MALLOC(n * sizeof(struct dfa *));
if (v->ladfas == NULL)
{
max_matches = t->max;
if (max_matches < min_matches)
max_matches = min_matches;
- endpts = (chr **) MALLOC((max_matches + 1) * sizeof(chr *));
+ endpts = MALLOC_ARRAY(chr *, max_matches + 1);
if (endpts == NULL)
return REG_ESPACE;
endpts[0] = begin;
max_matches = t->max;
if (max_matches < min_matches)
max_matches = min_matches;
- endpts = (chr **) MALLOC((max_matches + 1) * sizeof(chr *));
+ endpts = MALLOC_ARRAY(chr *, max_matches + 1);
if (endpts == NULL)
return REG_ESPACE;
endpts[0] = begin;
#define MALLOC(n) palloc_extended((n), MCXT_ALLOC_NO_OOM)
#define FREE(p) pfree(VS(p))
#define REALLOC(p,n) repalloc_extended(VS(p),(n), MCXT_ALLOC_NO_OOM)
+#define MALLOC_ARRAY(type, n) palloc_array_extended(type, n, MCXT_ALLOC_NO_OOM)
+#define REALLOC_ARRAY(p, type, n) repalloc_array_extended(p, type, n, MCXT_ALLOC_NO_OOM)
#define INTERRUPT(re) CHECK_FOR_INTERRUPTS()
#undef assert
#define assert(x) Assert(x)
#ifndef FREE
#define FREE(p) free(VS(p))
#endif
+#ifndef MALLOC_ARRAY
+/* we don't depend on calloc's zeroing behavior, we do need overflow check */
+#define MALLOC_ARRAY(type, n) ((type *) calloc(sizeof(type), n))
+#endif
+#ifndef REALLOC_ARRAY
+/* XXX this definition does not provide the desired overflow check */
+#define REALLOC_ARRAY(p, type, n) ((type *) REALLOC(p, sizeof(type) * (n)))
+#endif
/* interruption */
#ifndef INTERRUPT
* (the compacted NFA and the colormap).
* The scaling here is based on an empirical measurement that very large
* NFAs tend to have about 4 arcs/state.
+ *
+ * Do not raise this so high as to allow more than INT_MAX/8 states or arcs,
+ * or you risk integer overflows in various space allocation requests.
+ * (We could be more defensive in those places, but that's so far beyond the
+ * practical range of NFA sizes that it doesn't seem worth additional code.)
*/
#ifndef REG_MAX_COMPILE_SPACE
#define REG_MAX_COMPILE_SPACE \