/*
* Constants
*
- * A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
- * buckets is thus dsize * ssize (but dsize may be expansible). Of course,
- * the number of records in the table can be larger, but we don't want a
- * whole lot of records per bucket or performance goes down.
+ * A hash table has a top-level "directory", each of whose entries points to a
+ * "segment" of HASH_SEGSIZE bucket headers. The maximum number of hash
+ * buckets is thus dsize * HASH_SEGSIZE (but dsize may be expansible). Of
+ * course, the number of records in the table can be larger, but we don't want
+ * a whole lot of records per bucket or performance goes down.
*
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
-#define DEF_SEGSIZE 256
-#define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */
+#define HASH_SEGSIZE 256
+#define HASH_SEGSIZE_SHIFT 8 /* must be log2(HASH_SEGSIZE) */
#define DEF_DIRSIZE 256
/* Number of freelists to be used for a partitioned hash table. */
Size entrysize; /* total user element size in bytes */
int64 num_partitions; /* # partitions (must be power of 2), or 0 */
int64 max_dsize; /* 'dsize' limit if directory is fixed size */
- int64 ssize; /* segment size --- must be power of 2 */
- int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
bool isfixed; /* if true, don't enlarge */
/* We keep local copies of these fixed values to reduce contention */
Size keysize; /* hash key length in bytes */
- int64 ssize; /* segment size --- must be power of 2 */
- int sshift; /* segment shift = log2(ssize) */
/*
* In a USE_VALGRIND build, non-shared hashtables keep an slist chain of
/* make local copies of some heavily-used values */
hctl = hashp->hctl;
hashp->keysize = hctl->keysize;
- hashp->ssize = hctl->ssize;
- hashp->sshift = hctl->sshift;
return hashp;
}
hctl->num_partitions = info->num_partitions;
}
- if (flags & HASH_SEGMENT)
- {
- hctl->ssize = info->ssize;
- hctl->sshift = my_log2(info->ssize);
- /* ssize had better be a power of 2 */
- Assert(hctl->ssize == (1L << hctl->sshift));
- }
-
/*
* SHM hash tables have fixed directory size passed by the caller.
*/
/* make local copies of heavily-used constant fields */
hashp->keysize = hctl->keysize;
- hashp->ssize = hctl->ssize;
- hashp->sshift = hctl->sshift;
/* Build the hash directory structure */
if (!init_htab(hashp, nelem))
/* table has no fixed maximum size */
hctl->max_dsize = NO_MAX_DSIZE;
- hctl->ssize = DEF_SEGSIZE;
- hctl->sshift = DEF_SEGSIZE_SHIFT;
-
hctl->isfixed = false; /* can be enlarged */
#ifdef HASH_STATISTICS
/*
* Figure number of directory segments needed, round up to a power of 2
*/
- nsegs = (nbuckets - 1) / hctl->ssize + 1;
+ nsegs = (nbuckets - 1) / HASH_SEGSIZE + 1;
nsegs = next_pow2_int(nsegs);
/*
/* estimate number of buckets wanted */
nBuckets = next_pow2_int64(num_entries);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_int64((nBuckets - 1) / HASH_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
/* segments */
size = add_size(size, mul_size(nSegments,
- MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
+ MAXALIGN(HASH_SEGSIZE * sizeof(HASHBUCKET))));
/* elements --- allocated in groups of choose_nelem_alloc() entries */
elementAllocCnt = choose_nelem_alloc(entrysize);
nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
/* estimate number of buckets wanted */
nBuckets = next_pow2_int64(num_entries);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_int64((nBuckets - 1) / HASH_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
HTAB *hashp;
HASHHDR *hctl;
uint32 max_bucket;
- int64 ssize;
int64 segment_num;
int64 segment_ndx;
HASHSEGMENT segp;
curBucket = status->curBucket;
hashp = status->hashp;
hctl = hashp->hctl;
- ssize = hashp->ssize;
max_bucket = hctl->max_bucket;
if (curBucket > max_bucket)
/*
* first find the right segment in the table directory.
*/
- segment_num = curBucket >> hashp->sshift;
- segment_ndx = MOD(curBucket, ssize);
+ segment_num = curBucket >> HASH_SEGSIZE_SHIFT;
+ segment_ndx = MOD(curBucket, HASH_SEGSIZE);
segp = hashp->dir[segment_num];
hash_seq_term(status);
return NULL; /* search is done */
}
- if (++segment_ndx >= ssize)
+ if (++segment_ndx >= HASH_SEGSIZE)
{
segment_num++;
segment_ndx = 0;
#endif
new_bucket = hctl->max_bucket + 1;
- new_segnum = new_bucket >> hashp->sshift;
- new_segndx = MOD(new_bucket, hashp->ssize);
+ new_segnum = new_bucket >> HASH_SEGSIZE_SHIFT;
+ new_segndx = MOD(new_bucket, HASH_SEGSIZE);
if (new_segnum >= hctl->nsegs)
{
* split at this point. With a different way of reducing the hash value,
* that might not be true!
*/
- old_segnum = old_bucket >> hashp->sshift;
- old_segndx = MOD(old_bucket, hashp->ssize);
+ old_segnum = old_bucket >> HASH_SEGSIZE_SHIFT;
+ old_segndx = MOD(old_bucket, HASH_SEGSIZE);
old_seg = hashp->dir[old_segnum];
new_seg = hashp->dir[new_segnum];
HASHSEGMENT segp;
CurrentDynaHashCxt = hashp->hcxt;
- segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
+ segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * HASH_SEGSIZE);
if (!segp)
return NULL;
- MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
+ MemSet(segp, 0, sizeof(HASHBUCKET) * HASH_SEGSIZE);
return segp;
}
bucket = calc_bucket(hctl, hashvalue);
- segment_num = bucket >> hashp->sshift;
- segment_ndx = MOD(bucket, hashp->ssize);
+ segment_num = bucket >> HASH_SEGSIZE_SHIFT;
+ segment_ndx = MOD(bucket, HASH_SEGSIZE);
segp = hashp->dir[segment_num];