/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 2009 Oracle. All rights reserved.
*
*/
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Text;
using BerkeleyDB.Internal;
namespace BerkeleyDB {
///
/// A class representing a HashDatabase. The Hash format is an extensible,
/// dynamic hashing scheme.
///
public class HashDatabase : Database {
private HashFunctionDelegate hashHandler;
private EntryComparisonDelegate compareHandler;
private EntryComparisonDelegate dupCompareHandler;
private BDB_CompareDelegate doCompareRef;
private BDB_HashDelegate doHashRef;
private BDB_CompareDelegate doDupCompareRef;
#region Constructors
private HashDatabase(DatabaseEnvironment env, uint flags)
: base(env, flags) { }
internal HashDatabase(BaseDatabase clone) : base(clone) { }
private void Config(HashDatabaseConfig cfg) {
base.Config(cfg);
/*
* Database.Config calls set_flags, but that doesn't get the Hash
* specific flags. No harm in calling it again.
*/
db.set_flags(cfg.flags);
if (cfg.HashFunction != null)
HashFunction = cfg.HashFunction;
// The duplicate comparison function cannot change.
if (cfg.DuplicateCompare != null)
DupCompare = cfg.DuplicateCompare;
if (cfg.fillFactorIsSet)
db.set_h_ffactor(cfg.FillFactor);
if (cfg.nelemIsSet)
db.set_h_nelem(cfg.TableSize);
if (cfg.HashComparison != null)
Compare = cfg.HashComparison;
}
///
/// Instantiate a new HashDatabase object and open the database
/// represented by .
///
///
///
/// If is null, the database is strictly
/// temporary and cannot be opened by any other thread of control, thus
/// the database can only be accessed by sharing the single database
/// object that created it, in circumstances where doing so is safe.
///
///
/// If is set, the operation
/// will be implicitly transaction protected. Note that transactionally
/// protected operations on a datbase object requires the object itself
/// be transactionally protected during its open.
///
///
///
/// The name of an underlying file that will be used to back the
/// database. In-memory databases never intended to be preserved on disk
/// may be created by setting this parameter to null.
///
/// The database's configuration
/// A new, open database object
public static HashDatabase Open(
string Filename, HashDatabaseConfig cfg) {
return Open(Filename, null, cfg, null);
}
///
/// Instantiate a new HashDatabase object and open the database
/// represented by and
/// .
///
///
///
/// If both and
/// are null, the database is strictly
/// temporary and cannot be opened by any other thread of control, thus
/// the database can only be accessed by sharing the single database
/// object that created it, in circumstances where doing so is safe. If
/// is null and
/// is non-null, the database can be
/// opened by other threads of control and will be replicated to client
/// sites in any replication group.
///
///
/// If is set, the operation
/// will be implicitly transaction protected. Note that transactionally
/// protected operations on a datbase object requires the object itself
/// be transactionally protected during its open.
///
///
///
/// The name of an underlying file that will be used to back the
/// database. In-memory databases never intended to be preserved on disk
/// may be created by setting this parameter to null.
///
///
/// This parameter allows applications to have multiple databases in a
/// single file. Although no DatabaseName needs to be specified, it is
/// an error to attempt to open a second database in a file that was not
/// initially created using a database name.
///
/// The database's configuration
/// A new, open database object
public static HashDatabase Open(
string Filename, string DatabaseName, HashDatabaseConfig cfg) {
return Open(Filename, DatabaseName, cfg, null);
}
///
/// Instantiate a new HashDatabase object and open the database
/// represented by .
///
///
///
/// If is null, the database is strictly
/// temporary and cannot be opened by any other thread of control, thus
/// the database can only be accessed by sharing the single database
/// object that created it, in circumstances where doing so is safe.
///
///
/// If is null, but
/// is set, the operation will
/// be implicitly transaction protected. Note that transactionally
/// protected operations on a datbase object requires the object itself
/// be transactionally protected during its open. Also note that the
/// transaction must be committed before the object is closed.
///
///
///
/// The name of an underlying file that will be used to back the
/// database. In-memory databases never intended to be preserved on disk
/// may be created by setting this parameter to null.
///
/// The database's configuration
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
/// A new, open database object
public static HashDatabase Open(
string Filename, HashDatabaseConfig cfg, Transaction txn) {
return Open(Filename, null, cfg, txn);
}
///
/// Instantiate a new HashDatabase object and open the database
/// represented by and
/// .
///
///
///
/// If both and
/// are null, the database is strictly
/// temporary and cannot be opened by any other thread of control, thus
/// the database can only be accessed by sharing the single database
/// object that created it, in circumstances where doing so is safe. If
/// is null and
/// is non-null, the database can be
/// opened by other threads of control and will be replicated to client
/// sites in any replication group.
///
///
/// If is null, but
/// is set, the operation will
/// be implicitly transaction protected. Note that transactionally
/// protected operations on a datbase object requires the object itself
/// be transactionally protected during its open. Also note that the
/// transaction must be committed before the object is closed.
///
///
///
/// The name of an underlying file that will be used to back the
/// database. In-memory databases never intended to be preserved on disk
/// may be created by setting this parameter to null.
///
///
/// This parameter allows applications to have multiple databases in a
/// single file. Although no DatabaseName needs to be specified, it is
/// an error to attempt to open a second database in a file that was not
/// initially created using a database name.
///
/// The database's configuration
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
/// A new, open database object
public static HashDatabase Open(string Filename,
string DatabaseName, HashDatabaseConfig cfg, Transaction txn) {
HashDatabase ret = new HashDatabase(cfg.Env, 0);
ret.Config(cfg);
ret.db.open(Transaction.getDB_TXN(txn),
Filename, DatabaseName, DBTYPE.DB_HASH, cfg.openFlags, 0);
ret.isOpen = true;
return ret;
}
#endregion Constructors
#region Callbacks
private static int doDupCompare(
IntPtr dbp, IntPtr dbt1p, IntPtr dbt2p) {
DB db = new DB(dbp, false);
DBT dbt1 = new DBT(dbt1p, false);
DBT dbt2 = new DBT(dbt2p, false);
return ((HashDatabase)(db.api_internal)).DupCompare(
DatabaseEntry.fromDBT(dbt1), DatabaseEntry.fromDBT(dbt2));
}
private static uint doHash(IntPtr dbp, IntPtr datap, uint len) {
DB db = new DB(dbp, false);
byte[] t_data = new byte[len];
Marshal.Copy(datap, t_data, 0, (int)len);
return ((HashDatabase)(db.api_internal)).hashHandler(t_data);
}
private static int doCompare(IntPtr dbp, IntPtr dbtp1, IntPtr dbtp2) {
DB db = new DB(dbp, false);
DBT dbt1 = new DBT(dbtp1, false);
DBT dbt2 = new DBT(dbtp2, false);
return ((HashDatabase)(db.api_internal)).compareHandler(
DatabaseEntry.fromDBT(dbt1), DatabaseEntry.fromDBT(dbt2));
}
#endregion Callbacks
#region Properties
///
/// The Hash key comparison function. The comparison function is called
/// whenever it is necessary to compare a key specified by the
/// application with a key currently stored in the tree.
///
public EntryComparisonDelegate Compare {
get { return compareHandler; }
private set {
if (value == null)
db.set_h_compare(null);
else if (compareHandler == null) {
if (doCompareRef == null)
doCompareRef = new BDB_CompareDelegate(doCompare);
db.set_h_compare(doCompareRef);
}
compareHandler = value;
}
}
///
/// The duplicate data item comparison function.
///
public EntryComparisonDelegate DupCompare {
get { return dupCompareHandler; }
private set {
/* Cannot be called after open. */
if (value == null)
db.set_dup_compare(null);
else if (dupCompareHandler == null) {
if (doDupCompareRef == null)
doDupCompareRef = new BDB_CompareDelegate(doDupCompare);
db.set_dup_compare(doDupCompareRef);
}
dupCompareHandler = value;
}
}
///
/// Whether the insertion of duplicate data items in the database is
/// permitted, and whether duplicates items are sorted.
///
public DuplicatesPolicy Duplicates {
get {
uint flags = 0;
db.get_flags(ref flags);
if ((flags & DbConstants.DB_DUPSORT) != 0)
return DuplicatesPolicy.SORTED;
else if ((flags & DbConstants.DB_DUP) != 0)
return DuplicatesPolicy.UNSORTED;
else
return DuplicatesPolicy.NONE;
}
}
///
/// The desired density within the hash table.
///
public uint FillFactor {
get {
uint ret = 0;
db.get_h_ffactor(ref ret);
return ret;
}
}
///
/// A user-defined hash function; if no hash function is specified, a
/// default hash function is used.
///
public HashFunctionDelegate HashFunction {
get { return hashHandler; }
private set {
if (value == null)
db.set_h_hash(null);
else if (hashHandler == null) {
if (doHashRef == null)
doHashRef = new BDB_HashDelegate(doHash);
db.set_h_hash(doHashRef);
}
hashHandler = value;
}
}
///
/// An estimate of the final size of the hash table.
///
public uint TableSize {
get {
uint ret = 0;
db.get_h_nelem(ref ret);
return ret;
}
}
#endregion Properties
#region Methods
///
/// Create a database cursor.
///
/// A newly created cursor
public new HashCursor Cursor() {
return Cursor(new CursorConfig(), null);
}
///
/// Create a database cursor with the given configuration.
///
///
/// The configuration properties for the cursor.
///
/// A newly created cursor
public new HashCursor Cursor(CursorConfig cfg) {
return Cursor(cfg, null);
}
///
/// Create a transactionally protected database cursor.
///
///
/// The transaction context in which the cursor may be used.
///
/// A newly created cursor
public new HashCursor Cursor(Transaction txn) {
return Cursor(new CursorConfig(), txn);
}
///
/// Create a transactionally protected database cursor with the given
/// configuration.
///
///
/// The configuration properties for the cursor.
///
///
/// The transaction context in which the cursor may be used.
///
/// A newly created cursor
public new HashCursor Cursor(CursorConfig cfg, Transaction txn) {
return new HashCursor(
db.cursor(Transaction.getDB_TXN(txn), cfg.flags), Pagesize);
}
///
/// Return the database statistical information which does not require
/// traversal of the database.
///
///
/// The database statistical information which does not require
/// traversal of the database.
///
public HashStats FastStats() {
return Stats(null, true, Isolation.DEGREE_THREE);
}
///
/// Return the database statistical information which does not require
/// traversal of the database.
///
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
///
/// The database statistical information which does not require
/// traversal of the database.
///
public HashStats FastStats(Transaction txn) {
return Stats(txn, true, Isolation.DEGREE_THREE);
}
///
/// Return the database statistical information which does not require
/// traversal of the database.
///
///
///
/// Among other things, this method makes it possible for applications
/// to request key and record counts without incurring the performance
/// penalty of traversing the entire database.
///
///
/// The statistical information is described by the
/// , ,
/// , and classes.
///
///
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
///
/// The level of isolation for database reads.
/// will be silently ignored for
/// databases which did not specify
/// .
///
///
/// The database statistical information which does not require
/// traversal of the database.
///
public HashStats FastStats(Transaction txn, Isolation isoDegree) {
return Stats(txn, true, isoDegree);
}
///
/// Return pages to the filesystem that are already free and at the end
/// of the file.
///
///
/// The number of database pages returned to the filesystem
///
public uint TruncateUnusedPages() {
return TruncateUnusedPages(null);
}
///
/// Return pages to the filesystem that are already free and at the end
/// of the file.
///
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
///
/// The number of database pages returned to the filesystem
///
public uint TruncateUnusedPages(Transaction txn) {
DB_COMPACT cdata = new DB_COMPACT();
db.compact(Transaction.getDB_TXN(txn),
null, null, cdata, DbConstants.DB_FREELIST_ONLY, null);
return cdata.compact_pages_truncated;
}
///
/// Store the key/data pair in the database only if it does not already
/// appear in the database.
///
/// The key to store in the database
/// The data item to store in the database
public void PutNoDuplicate(DatabaseEntry key, DatabaseEntry data) {
PutNoDuplicate(key, data, null);
}
///
/// Store the key/data pair in the database only if it does not already
/// appear in the database.
///
/// The key to store in the database
/// The data item to store in the database
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
public void PutNoDuplicate(
DatabaseEntry key, DatabaseEntry data, Transaction txn) {
Put(key, data, txn, DbConstants.DB_NODUPDATA);
}
///
/// Return the database statistical information for this database.
///
/// Database statistical information.
public HashStats Stats() {
return Stats(null, false, Isolation.DEGREE_THREE);
}
///
/// Return the database statistical information for this database.
///
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
/// Database statistical information.
public HashStats Stats(Transaction txn) {
return Stats(txn, false, Isolation.DEGREE_THREE);
}
///
/// Return the database statistical information for this database.
///
///
/// The statistical information is described by
/// .
///
///
/// If the operation is part of an application-specified transaction,
/// is a Transaction object returned from
/// ; if
/// the operation is part of a Berkeley DB Concurrent Data Store group,
/// is a handle returned from
/// ; otherwise null.
///
///
/// The level of isolation for database reads.
/// will be silently ignored for
/// databases which did not specify
/// .
///
/// Database statistical information.
public HashStats Stats(Transaction txn, Isolation isoDegree) {
return Stats(txn, false, isoDegree);
}
private HashStats Stats(
Transaction txn, bool fast, Isolation isoDegree) {
uint flags = 0;
flags |= fast ? DbConstants.DB_FAST_STAT : 0;
switch (isoDegree) {
case Isolation.DEGREE_ONE:
flags |= DbConstants.DB_READ_UNCOMMITTED;
break;
case Isolation.DEGREE_TWO:
flags |= DbConstants.DB_READ_COMMITTED;
break;
}
HashStatStruct st = db.stat_hash(Transaction.getDB_TXN(txn), flags);
return new HashStats(st);
}
#endregion Methods
}
}