/*- * Copyright (c) 1990, 1993, 1994 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Margo Seltzer. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. ***REMOVED*** - see * ftp://ftp.cs.berkeley.edu/pub/4bsd/README.Impt.License.Change * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE.
*/
#ifdefined(LIBC_SCCS) && !defined(lint) staticchar sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94"; #endif/* LIBC_SCCS and not lint */
#ifdef HASH_STATISTICS int hash_accesses, hash_collisions, hash_expansions, hash_overflows; #endif
/* A new Lou (montulli@mozilla.com) routine. * * The database is screwed. * * This closes the file, flushing buffers as appropriate.
*/ staticvoid
dbm_remove_database(DB *dbp)
{
HTAB *hashp = (HTAB *)dbp->internal;
assert(0);
if (!hashp) return;
hdestroy(hashp);
dbp->internal = NULL;
}
extern DB *
dbm_hash_open(constchar *file, int flags, int mode, const HASHINFO *info, int dflags)
{
HTAB *hashp = NULL; struct stat statbuf;
DB *dbp; int bpages, hdrsize, new_table, nsegs, save_errno;
/* * Even if user wants write only, we need to be able to read * the actual file, so we need to open it read/write. But, the * field in the hashp structure needs to be accurate so that * we can check accesses.
*/
hashp->flags = flags;
new_table = 0; if (!file || (flags & O_TRUNC) || (stat(file, &statbuf) && (errno == ENOENT))) { if (errno == ENOENT)
errno = 0; /* Just in case someone looks at errno */
new_table = 1;
} elseif (statbuf.st_mtime && statbuf.st_size == 0) { /* check for a zero length file and delete it * if it exists
*/
new_table = 1;
}
hashp->file_size = statbuf.st_size;
hdrsize = read(hashp->fp, (char *)&hashp->hdr, sizeof(HASHHDR)); if (hdrsize == -1)
RETURN_ERROR(errno, error1); if (hdrsize != sizeof(HASHHDR))
RETURN_ERROR(EFTYPE, error1); #if BYTE_ORDER == LITTLE_ENDIAN
swap_header(hashp); #endif /* Verify file type, versions and hash function */ if (hashp->MAGIC != HASHMAGIC)
RETURN_ERROR(EFTYPE, error1); #define OLDHASHVERSION 1 if (hashp->VERSION != HASHVERSION &&
hashp->VERSION != OLDHASHVERSION)
RETURN_ERROR(EFTYPE, error1); if (hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
RETURN_ERROR(EFTYPE, error1); if (hashp->NKEYS < 0) /* Old bad database. */
RETURN_ERROR(EFTYPE, error1);
/* * Figure out how many segments we need. Max_Bucket is the * maximum bucket number, so the number of buckets is * max_bucket + 1.
*/
nsegs = (hashp->MAX_BUCKET + 1 + hashp->SGSIZE - 1) /
hashp->SGSIZE;
hashp->nsegs = 0; if (alloc_segs(hashp, nsegs)) /* If alloc_segs fails, errno will have been set. */
RETURN_ERROR(errno, error1); /* Read in bitmaps */
bpages = (hashp->SPARES[hashp->OVFL_POINT] +
(hashp->BSIZE << BYTE_SHIFT) - 1) >>
(hashp->BSHIFT + BYTE_SHIFT);
/* new code added by Lou to reduce block * size down below MAX_BSIZE
*/ if (hashp->BSIZE > MAX_BSIZE)
hashp->BSIZE = MAX_BSIZE; #endif
hashp->BSHIFT = dbm_log2((uint32)hashp->BSIZE);
}
if (info) { if (info->bsize) { /* Round pagesize up to power of 2 */
hashp->BSHIFT = dbm_log2(info->bsize);
hashp->BSIZE = 1 << hashp->BSHIFT; if (hashp->BSIZE > MAX_BSIZE) {
errno = EINVAL; return (NULL);
}
} if (info->ffactor)
hashp->FFACTOR = info->ffactor; if (info->hash)
hashp->hash = info->hash; if (info->nelem)
nelem = info->nelem; if (info->lorder) { if (info->lorder != BIG_ENDIAN &&
info->lorder != LITTLE_ENDIAN) {
errno = EINVAL; return (NULL);
}
hashp->LORDER = info->lorder;
}
} /* init_htab sets errno if it fails */ if (init_htab(hashp, nelem)) return (NULL); else return (hashp);
} /* * This calls alloc_segs which may run out of memory. Alloc_segs will * set errno, so we just pass the error information along. * * Returns 0 on No Error
*/ staticint
init_htab(HTAB *hashp, int nelem)
{ registerint nbuckets, nsegs; int l2;
/* * Divide number of elements by the fill factor and determine a * desired number of buckets. Allocate space for the next greater * power of two number of buckets.
*/
nelem = (nelem - 1) / hashp->FFACTOR + 1;
/* * Flushes any changes to the file if necessary and destroys the hashp * structure, freeing all allocated space.
*/ staticint
hdestroy(HTAB *hashp)
{ int i, save_errno;
for (i = 0; i < NCACHED; i++)
(void)fprintf(stderr, "spares[%d] = %d\n", i, hashp->SPARES[i]); #endif /* * Call on buffer manager to free buffers, and if required, * write them to disk.
*/ if (dbm_buf_free(hashp, 1, hashp->save_file))
save_errno = errno; if (hashp->dir) {
free(*hashp->dir); /* Free initial segments */ /* Free extra segments */ while (hashp->exsegs--)
free(hashp->dir[--hashp->nsegs]);
free(hashp->dir);
} if (flush_meta(hashp) && !save_errno)
save_errno = errno; /* Free Bigmaps */ for (i = 0; i < hashp->nmaps; i++) if (hashp->mapp[i])
free(hashp->mapp[i]);
if (hashp->fp != -1)
(void)close(hashp->fp);
if (hashp->filename) { #ifdefined(_WIN32) || defined(_WINDOWS) if (hashp->is_temp)
(void)unlink(hashp->filename); #endif
free(hashp->filename);
} if (hashp->tmp_buf)
free(hashp->tmp_buf); if (hashp->tmp_key)
free(hashp->tmp_key);
free(hashp); if (save_errno) {
errno = save_errno; return (DBM_ERROR);
} return (SUCCESS);
}
#ifdefined(_WIN32) || defined(_WINDOWS) /* * Close and reopen file to force file length update on windows. * * Returns: * 0 == OK * -1 DBM_ERROR
*/ staticint
update_EOF(HTAB *hashp)
{ #ifdefined(DBM_REOPEN_ON_FLUSH) char *file = hashp->filename;
off_t file_size; int flags; int mode = -1; struct stat statbuf;
memset(&statbuf, 0, sizeof statbuf);
/* make sure we won't lose the file by closing it. */ if (!file || (stat(file, &statbuf) && (errno == ENOENT))) { /* pretend we did it. */ return 0;
}
hashp = (HTAB *)dbp->internal; if (!hashp) return (DBM_ERROR);
if (!hashp->save_file) return (0); if (dbm_buf_free(hashp, 0, 1) || flush_meta(hashp)) return (DBM_ERROR); #ifdefined(_WIN32) || defined(_WINDOWS) if (hashp->updateEOF && hashp->filename && !hashp->is_temp) { int status = update_EOF(hashp);
hashp->updateEOF = 0; if (status) return status;
} #endif
hashp->new_file = 0; return (0);
}
/* * Returns: * 0 == OK * -1 indicates that errno should be set
*/ staticint
flush_meta(HTAB *hashp)
{
HASHHDR *whdrp; #if BYTE_ORDER == LITTLE_ENDIAN
HASHHDR whdr; #endif int fp, i, wsize;
/*******************************SEARCH ROUTINES *****************************/ /* * All the access routines return * * Returns: * 0 on SUCCESS * 1 to indicate an external DBM_ERROR (i.e. key not found, etc) * -1 to indicate an internal DBM_ERROR (i.e. out of memory, etc)
*/ staticint
hash_get( const DB *dbp, const DBT *key,
DBT *data,
uint flag)
{
HTAB *hashp; int rv;
hashp = (HTAB *)dbp->internal; if (!hashp) return (DBM_ERROR);
/* Check if we need a new segment */ if (new_segnum >= hashp->nsegs) { /* Check if we need to expand directory */ if (new_segnum >= hashp->DSIZE) { /* Reallocate directory */
dirsize = hashp->DSIZE * sizeof(SEGMENT *); if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1)) return (-1);
hashp->DSIZE = dirsize << 1;
} if ((hashp->dir[new_segnum] =
(SEGMENT)calloc((size_t)hashp->SGSIZE, sizeof(BUFHEAD *))) == NULL) return (-1);
hashp->exsegs++;
hashp->nsegs++;
} /* * If the split point is increasing (MAX_BUCKET's log base 2 * * increases), we need to copy the current contents of the spare * split bucket to the next bucket.
*/
spare_ndx = dbm_log2((uint32)(hashp->MAX_BUCKET + 1)); if (spare_ndx > hashp->OVFL_POINT) {
hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
hashp->OVFL_POINT = spare_ndx;
}
if (new_bucket > (uint32)hashp->HIGH_MASK) { /* Starting a new doubling */
hashp->LOW_MASK = hashp->HIGH_MASK;
hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
} /* Relocate records to the new bucket */ return (dbm_split_page(hashp, old_bucket, new_bucket));
}
/* * If realloc guarantees that the pointer is not destroyed if the realloc * fails, then this routine can go away.
*/ staticvoid *
hash_realloc(
SEGMENT **p_ptr,
size_t oldsize, size_t newsize)
{ registervoid *p;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.