use crate::{
column_family::AsColumnFamilyRef,
column_family::BoundColumnFamily,
column_family::UnboundColumnFamily,
db_options::OptionsMustOutliveDB,
ffi,
ffi_util::{from_cstr, opt_bytes_to_ptr, raw_data, to_cpath, CStrLike},
ColumnFamily, ColumnFamilyDescriptor, CompactOptions, DBIteratorWithThreadMode,
DBPinnableSlice, DBRawIteratorWithThreadMode, DBWALIterator, Direction, Error, FlushOptions,
IngestExternalFileOptions, IteratorMode, Options, ReadOptions, SnapshotWithThreadMode,
WriteBatch, WriteOptions, DEFAULT_COLUMN_FAMILY_NAME,
};
use crate::ffi_util::CSlice;
use libc::{self, c_char, c_int, c_uchar, c_void, size_t};
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
use std::fmt;
use std::fs;
use std::iter;
use std::path::Path;
use std::path::PathBuf;
use std::ptr;
use std::slice;
use std::str;
use std::sync::Arc;
use std::sync::RwLock;
use std::time::Duration;
pub trait ThreadMode {
fn new_cf_map_internal(
cf_map: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self;
fn drop_all_cfs_internal(&mut self);
}
pub struct SingleThreaded {
pub(crate) cfs: BTreeMap<String, ColumnFamily>,
}
pub struct MultiThreaded {
pub(crate) cfs: RwLock<BTreeMap<String, Arc<UnboundColumnFamily>>>,
}
impl ThreadMode for SingleThreaded {
fn new_cf_map_internal(
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self {
Self {
cfs: cfs
.into_iter()
.map(|(n, c)| (n, ColumnFamily { inner: c }))
.collect(),
}
}
fn drop_all_cfs_internal(&mut self) {
self.cfs.clear();
}
}
impl ThreadMode for MultiThreaded {
fn new_cf_map_internal(
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self {
Self {
cfs: RwLock::new(
cfs.into_iter()
.map(|(n, c)| (n, Arc::new(UnboundColumnFamily { inner: c })))
.collect(),
),
}
}
fn drop_all_cfs_internal(&mut self) {
self.cfs.write().unwrap().clear();
}
}
pub trait DBInner {
fn inner(&self) -> *mut ffi::rocksdb_t;
}
pub struct DBCommon<T: ThreadMode, D: DBInner> {
pub(crate) inner: D,
cfs: T, path: PathBuf,
_outlive: Vec<OptionsMustOutliveDB>,
}
pub trait DBAccess {
unsafe fn create_snapshot(&self) -> *const ffi::rocksdb_snapshot_t;
unsafe fn release_snapshot(&self, snapshot: *const ffi::rocksdb_snapshot_t);
unsafe fn create_iterator(&self, readopts: &ReadOptions) -> *mut ffi::rocksdb_iterator_t;
unsafe fn create_iterator_cf(
&self,
cf_handle: *mut ffi::rocksdb_column_family_handle_t,
readopts: &ReadOptions,
) -> *mut ffi::rocksdb_iterator_t;
fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error>;
fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error>;
fn get_pinned_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error>;
fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error>;
fn multi_get_opt<K, I>(
&self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>;
fn multi_get_cf_opt<'b, K, I, W>(
&self,
keys_cf: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: AsColumnFamilyRef + 'b;
}
impl<T: ThreadMode, D: DBInner> DBAccess for DBCommon<T, D> {
unsafe fn create_snapshot(&self) -> *const ffi::rocksdb_snapshot_t {
ffi::rocksdb_create_snapshot(self.inner.inner())
}
unsafe fn release_snapshot(&self, snapshot: *const ffi::rocksdb_snapshot_t) {
ffi::rocksdb_release_snapshot(self.inner.inner(), snapshot);
}
unsafe fn create_iterator(&self, readopts: &ReadOptions) -> *mut ffi::rocksdb_iterator_t {
ffi::rocksdb_create_iterator(self.inner.inner(), readopts.inner)
}
unsafe fn create_iterator_cf(
&self,
cf_handle: *mut ffi::rocksdb_column_family_handle_t,
readopts: &ReadOptions,
) -> *mut ffi::rocksdb_iterator_t {
ffi::rocksdb_create_iterator_cf(self.inner.inner(), readopts.inner, cf_handle)
}
fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_opt(key, readopts)
}
fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_cf_opt(cf, key, readopts)
}
fn get_pinned_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error> {
self.get_pinned_opt(key, readopts)
}
fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error> {
self.get_pinned_cf_opt(cf, key, readopts)
}
fn multi_get_opt<K, Iter>(
&self,
keys: Iter,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
Iter: IntoIterator<Item = K>,
{
self.multi_get_opt(keys, readopts)
}
fn multi_get_cf_opt<'b, K, Iter, W>(
&self,
keys_cf: Iter,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
Iter: IntoIterator<Item = (&'b W, K)>,
W: AsColumnFamilyRef + 'b,
{
self.multi_get_cf_opt(keys_cf, readopts)
}
}
pub struct DBWithThreadModeInner {
inner: *mut ffi::rocksdb_t,
}
impl DBInner for DBWithThreadModeInner {
fn inner(&self) -> *mut ffi::rocksdb_t {
self.inner
}
}
impl Drop for DBWithThreadModeInner {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_close(self.inner);
}
}
}
pub type DBWithThreadMode<T> = DBCommon<T, DBWithThreadModeInner>;
#[cfg(not(feature = "multi-threaded-cf"))]
pub type DB = DBWithThreadMode<SingleThreaded>;
#[cfg(feature = "multi-threaded-cf")]
pub type DB = DBWithThreadMode<MultiThreaded>;
unsafe impl<T: ThreadMode + Send, I: DBInner> Send for DBCommon<T, I> {}
unsafe impl<T: ThreadMode, I: DBInner> Sync for DBCommon<T, I> {}
enum AccessType<'a> {
ReadWrite,
ReadOnly { error_if_log_file_exist: bool },
Secondary { secondary_path: &'a Path },
WithTTL { ttl: Duration },
}
impl<T: ThreadMode> DBWithThreadMode<T> {
pub fn open_default<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let mut opts = Options::default();
opts.create_if_missing(true);
Self::open(&opts, path)
}
pub fn open<P: AsRef<Path>>(opts: &Options, path: P) -> Result<Self, Error> {
Self::open_cf(opts, path, None::<&str>)
}
pub fn open_for_read_only<P: AsRef<Path>>(
opts: &Options,
path: P,
error_if_log_file_exist: bool,
) -> Result<Self, Error> {
Self::open_cf_for_read_only(opts, path, None::<&str>, error_if_log_file_exist)
}
pub fn open_as_secondary<P: AsRef<Path>>(
opts: &Options,
primary_path: P,
secondary_path: P,
) -> Result<Self, Error> {
Self::open_cf_as_secondary(opts, primary_path, secondary_path, None::<&str>)
}
pub fn open_with_ttl<P: AsRef<Path>>(
opts: &Options,
path: P,
ttl: Duration,
) -> Result<Self, Error> {
Self::open_cf_descriptors_with_ttl(opts, path, std::iter::empty(), ttl)
}
pub fn open_cf_with_ttl<P, I, N>(
opts: &Options,
path: P,
cfs: I,
ttl: Duration,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_with_ttl(opts, path, cfs, ttl)
}
pub fn open_cf_descriptors_with_ttl<P, I>(
opts: &Options,
path: P,
cfs: I,
ttl: Duration,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::WithTTL { ttl })
}
pub fn open_cf<P, I, N>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
pub fn open_cf_with_opts<P, I, N>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = (N, Options)>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|(name, opts)| ColumnFamilyDescriptor::new(name.as_ref(), opts));
Self::open_cf_descriptors(opts, path, cfs)
}
pub fn open_cf_for_read_only<P, I, N>(
opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_with_opts_for_read_only<P, I, N>(
db_opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = (N, Options)>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|(name, cf_opts)| ColumnFamilyDescriptor::new(name.as_ref(), cf_opts));
Self::open_cf_descriptors_internal(
db_opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_descriptors_read_only<P, I>(
opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_as_secondary<P, I, N>(
opts: &Options,
primary_path: P,
secondary_path: P,
cfs: I,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(
opts,
primary_path,
cfs,
&AccessType::Secondary {
secondary_path: secondary_path.as_ref(),
},
)
}
pub fn open_cf_descriptors_as_secondary<P, I>(
opts: &Options,
path: P,
secondary_path: P,
cfs: I,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::Secondary {
secondary_path: secondary_path.as_ref(),
},
)
}
pub fn open_cf_descriptors<P, I>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
fn open_cf_descriptors_internal<P, I>(
opts: &Options,
path: P,
cfs: I,
access_type: &AccessType,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
let cfs: Vec<_> = cfs.into_iter().collect();
let outlive = iter::once(opts.outlive.clone())
.chain(cfs.iter().map(|cf| cf.options.outlive.clone()))
.collect();
let cpath = to_cpath(&path)?;
if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!(
"Failed to create RocksDB directory: `{e:?}`."
)));
}
let db: *mut ffi::rocksdb_t;
let mut cf_map = BTreeMap::new();
if cfs.is_empty() {
db = Self::open_raw(opts, &cpath, access_type)?;
} else {
let mut cfs_v = cfs;
if !cfs_v.iter().any(|cf| cf.name == DEFAULT_COLUMN_FAMILY_NAME) {
cfs_v.push(ColumnFamilyDescriptor {
name: String::from(DEFAULT_COLUMN_FAMILY_NAME),
options: Options::default(),
});
}
let c_cfs: Vec<CString> = cfs_v
.iter()
.map(|cf| CString::new(cf.name.as_bytes()).unwrap())
.collect();
let cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect();
let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect();
let cfopts: Vec<_> = cfs_v
.iter()
.map(|cf| cf.options.inner as *const _)
.collect();
db = Self::open_cf_raw(
opts,
&cpath,
&cfs_v,
&cfnames,
&cfopts,
&mut cfhandles,
access_type,
)?;
for handle in &cfhandles {
if handle.is_null() {
return Err(Error::new(
"Received null column family handle from DB.".to_owned(),
));
}
}
for (cf_desc, inner) in cfs_v.iter().zip(cfhandles) {
cf_map.insert(cf_desc.name.clone(), inner);
}
}
if db.is_null() {
return Err(Error::new("Could not initialize database.".to_owned()));
}
Ok(Self {
inner: DBWithThreadModeInner { inner: db },
path: path.as_ref().to_path_buf(),
cfs: T::new_cf_map_internal(cf_map),
_outlive: outlive,
})
}
fn open_raw(
opts: &Options,
cpath: &CString,
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only(
opts.inner,
cpath.as_ptr(),
c_uchar::from(error_if_log_file_exist),
)),
AccessType::ReadWrite => {
ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr()))
}
AccessType::Secondary { secondary_path } => {
ffi_try!(ffi::rocksdb_open_as_secondary(
opts.inner,
cpath.as_ptr(),
to_cpath(secondary_path)?.as_ptr(),
))
}
AccessType::WithTTL { ttl } => ffi_try!(ffi::rocksdb_open_with_ttl(
opts.inner,
cpath.as_ptr(),
ttl.as_secs() as c_int,
)),
}
};
Ok(db)
}
#[allow(clippy::pedantic)]
fn open_cf_raw(
opts: &Options,
cpath: &CString,
cfs_v: &[ColumnFamilyDescriptor],
cfnames: &[*const c_char],
cfopts: &[*const ffi::rocksdb_options_t],
cfhandles: &mut [*mut ffi::rocksdb_column_family_handle_t],
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only_column_families(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
c_uchar::from(error_if_log_file_exist),
)),
AccessType::ReadWrite => ffi_try!(ffi::rocksdb_open_column_families(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
)),
AccessType::Secondary { secondary_path } => {
ffi_try!(ffi::rocksdb_open_as_secondary_column_families(
opts.inner,
cpath.as_ptr(),
to_cpath(secondary_path)?.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
))
}
AccessType::WithTTL { ttl } => {
let ttls_v = vec![ttl.as_secs() as c_int; cfs_v.len()];
ffi_try!(ffi::rocksdb_open_column_families_with_ttl(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
ttls_v.as_ptr(),
))
}
}
};
Ok(db)
}
pub fn delete_range_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_range_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn delete_range_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
) -> Result<(), Error> {
self.delete_range_cf_opt(cf, from, to, &WriteOptions::default())
}
pub fn write_opt(&self, batch: WriteBatch, writeopts: &WriteOptions) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_write(
self.inner.inner(),
writeopts.inner,
batch.inner
));
}
Ok(())
}
pub fn write(&self, batch: WriteBatch) -> Result<(), Error> {
self.write_opt(batch, &WriteOptions::default())
}
pub fn write_without_wal(&self, batch: WriteBatch) -> Result<(), Error> {
let mut wo = WriteOptions::new();
wo.disable_wal(true);
self.write_opt(batch, &wo)
}
}
impl<T: ThreadMode, D: DBInner> DBCommon<T, D> {
pub(crate) fn new(inner: D, cfs: T, path: PathBuf, outlive: Vec<OptionsMustOutliveDB>) -> Self {
Self {
inner,
cfs,
path,
_outlive: outlive,
}
}
pub fn list_cf<P: AsRef<Path>>(opts: &Options, path: P) -> Result<Vec<String>, Error> {
let cpath = to_cpath(path)?;
let mut length = 0;
unsafe {
let ptr = ffi_try!(ffi::rocksdb_list_column_families(
opts.inner,
cpath.as_ptr(),
&mut length,
));
let vec = slice::from_raw_parts(ptr, length)
.iter()
.map(|ptr| CStr::from_ptr(*ptr).to_string_lossy().into_owned())
.collect();
ffi::rocksdb_list_column_families_destroy(ptr, length);
Ok(vec)
}
}
pub fn destroy<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> {
let cpath = to_cpath(path)?;
unsafe {
ffi_try!(ffi::rocksdb_destroy_db(opts.inner, cpath.as_ptr()));
}
Ok(())
}
pub fn repair<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> {
let cpath = to_cpath(path)?;
unsafe {
ffi_try!(ffi::rocksdb_repair_db(opts.inner, cpath.as_ptr()));
}
Ok(())
}
pub fn path(&self) -> &Path {
self.path.as_path()
}
pub fn flush_wal(&self, sync: bool) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush_wal(
self.inner.inner(),
c_uchar::from(sync)
));
}
Ok(())
}
pub fn flush_opt(&self, flushopts: &FlushOptions) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush(self.inner.inner(), flushopts.inner));
}
Ok(())
}
pub fn flush(&self) -> Result<(), Error> {
self.flush_opt(&FlushOptions::default())
}
pub fn flush_cf_opt(
&self,
cf: &impl AsColumnFamilyRef,
flushopts: &FlushOptions,
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush_cf(
self.inner.inner(),
flushopts.inner,
cf.inner()
));
}
Ok(())
}
pub fn flush_cf(&self, cf: &impl AsColumnFamilyRef) -> Result<(), Error> {
self.flush_cf_opt(cf, &FlushOptions::default())
}
pub fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_pinned_opt(key, readopts)
.map(|x| x.map(|v| v.as_ref().to_vec()))
}
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Vec<u8>>, Error> {
self.get_opt(key.as_ref(), &ReadOptions::default())
}
pub fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_pinned_cf_opt(cf, key, readopts)
.map(|x| x.map(|v| v.as_ref().to_vec()))
}
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<Option<Vec<u8>>, Error> {
self.get_cf_opt(cf, key.as_ref(), &ReadOptions::default())
}
pub fn get_pinned_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
unsafe {
let val = ffi_try!(ffi::rocksdb_get_pinned(
self.inner.inner(),
readopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
));
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(val)))
}
}
}
pub fn get_pinned<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<DBPinnableSlice>, Error> {
self.get_pinned_opt(key, &ReadOptions::default())
}
pub fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice>, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
unsafe {
let val = ffi_try!(ffi::rocksdb_get_pinned_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
));
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(val)))
}
}
}
pub fn get_pinned_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<Option<DBPinnableSlice>, Error> {
self.get_pinned_cf_opt(cf, key, &ReadOptions::default())
}
pub fn multi_get<K, I>(&self, keys: I) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
self.multi_get_opt(keys, &ReadOptions::default())
}
pub fn multi_get_opt<K, I>(
&self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
let (keys, keys_sizes): (Vec<Box<[u8]>>, Vec<_>) = keys
.into_iter()
.map(|k| (Box::from(k.as_ref()), k.as_ref().len()))
.unzip();
let ptr_keys: Vec<_> = keys.iter().map(|k| k.as_ptr() as *const c_char).collect();
let mut values = vec![ptr::null_mut(); keys.len()];
let mut values_sizes = vec![0_usize; keys.len()];
let mut errors = vec![ptr::null_mut(); keys.len()];
unsafe {
ffi::rocksdb_multi_get(
self.inner.inner(),
readopts.inner,
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
values.as_mut_ptr(),
values_sizes.as_mut_ptr(),
errors.as_mut_ptr(),
);
}
convert_values(values, values_sizes, errors)
}
pub fn multi_get_cf<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
self.multi_get_cf_opt(keys, &ReadOptions::default())
}
pub fn multi_get_cf_opt<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
let (cfs_and_keys, keys_sizes): (Vec<(_, Box<[u8]>)>, Vec<_>) = keys
.into_iter()
.map(|(cf, key)| ((cf, Box::from(key.as_ref())), key.as_ref().len()))
.unzip();
let ptr_keys: Vec<_> = cfs_and_keys
.iter()
.map(|(_, k)| k.as_ptr() as *const c_char)
.collect();
let ptr_cfs: Vec<_> = cfs_and_keys
.iter()
.map(|(c, _)| c.inner() as *const _)
.collect();
let mut values = vec![ptr::null_mut(); ptr_keys.len()];
let mut values_sizes = vec![0_usize; ptr_keys.len()];
let mut errors = vec![ptr::null_mut(); ptr_keys.len()];
unsafe {
ffi::rocksdb_multi_get_cf(
self.inner.inner(),
readopts.inner,
ptr_cfs.as_ptr(),
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
values.as_mut_ptr(),
values_sizes.as_mut_ptr(),
errors.as_mut_ptr(),
);
}
convert_values(values, values_sizes, errors)
}
pub fn batched_multi_get_cf<K, I>(
&self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
) -> Vec<Result<Option<DBPinnableSlice>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
self.batched_multi_get_cf_opt(cf, keys, sorted_input, &ReadOptions::default())
}
pub fn batched_multi_get_cf_opt<K, I>(
&self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
readopts: &ReadOptions,
) -> Vec<Result<Option<DBPinnableSlice>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
let (keys, keys_sizes): (Vec<Box<[u8]>>, Vec<_>) = keys
.into_iter()
.map(|k| (Box::from(k.as_ref()), k.as_ref().len()))
.unzip();
let ptr_keys: Vec<_> = keys.iter().map(|k| k.as_ptr() as *const c_char).collect();
let mut pinned_values = vec![ptr::null_mut(); ptr_keys.len()];
let mut errors = vec![ptr::null_mut(); ptr_keys.len()];
unsafe {
ffi::rocksdb_batched_multi_get_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
pinned_values.as_mut_ptr(),
errors.as_mut_ptr(),
sorted_input,
);
pinned_values
.into_iter()
.zip(errors.into_iter())
.map(|(v, e)| {
if e.is_null() {
if v.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(v)))
}
} else {
Err(Error::new(crate::ffi_util::error_message(e)))
}
})
.collect()
}
}
pub fn key_may_exist<K: AsRef<[u8]>>(&self, key: K) -> bool {
self.key_may_exist_opt(key, &ReadOptions::default())
}
pub fn key_may_exist_opt<K: AsRef<[u8]>>(&self, key: K, readopts: &ReadOptions) -> bool {
let key = key.as_ref();
unsafe {
0 != ffi::rocksdb_key_may_exist(
self.inner.inner(),
readopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
ptr::null_mut(), ptr::null_mut(), ptr::null(), 0, ptr::null_mut(), )
}
}
pub fn key_may_exist_cf<K: AsRef<[u8]>>(&self, cf: &impl AsColumnFamilyRef, key: K) -> bool {
self.key_may_exist_cf_opt(cf, key, &ReadOptions::default())
}
pub fn key_may_exist_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> bool {
let key = key.as_ref();
0 != unsafe {
ffi::rocksdb_key_may_exist_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
ptr::null_mut(), ptr::null_mut(), ptr::null(), 0, ptr::null_mut(), )
}
}
pub fn key_may_exist_cf_opt_value<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> (bool, Option<CSlice>) {
let key = key.as_ref();
let mut val: *mut c_char = ptr::null_mut();
let mut val_len: usize = 0;
let mut value_found: c_uchar = 0;
let may_exists = 0
!= unsafe {
ffi::rocksdb_key_may_exist_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
&mut val, &mut val_len, ptr::null(), 0, &mut value_found, )
};
if may_exists && value_found != 0 {
(
may_exists,
Some(unsafe { CSlice::from_raw_parts(val, val_len) }),
)
} else {
(may_exists, None)
}
}
fn create_inner_cf_handle(
&self,
name: impl CStrLike,
opts: &Options,
) -> Result<*mut ffi::rocksdb_column_family_handle_t, Error> {
let cf_name = name.bake().map_err(|err| {
Error::new(format!(
"Failed to convert path to CString when creating cf: {err}"
))
})?;
Ok(unsafe {
ffi_try!(ffi::rocksdb_create_column_family(
self.inner.inner(),
opts.inner,
cf_name.as_ptr(),
))
})
}
pub fn iterator<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let readopts = ReadOptions::default();
self.iterator_opt(mode, readopts)
}
pub fn iterator_opt<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
readopts: ReadOptions,
) -> DBIteratorWithThreadMode<'b, Self> {
DBIteratorWithThreadMode::new(self, readopts, mode)
}
pub fn iterator_cf_opt<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
readopts: ReadOptions,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), readopts, mode)
}
pub fn full_iterator<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_total_order_seek(true);
DBIteratorWithThreadMode::new(self, opts, mode)
}
pub fn prefix_iterator<'a: 'b, 'b, P: AsRef<[u8]>>(
&'a self,
prefix: P,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
DBIteratorWithThreadMode::new(
self,
opts,
IteratorMode::From(prefix.as_ref(), Direction::Forward),
)
}
pub fn iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts, mode)
}
pub fn full_iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_total_order_seek(true);
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts, mode)
}
pub fn prefix_iterator_cf<'a, P: AsRef<[u8]>>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
prefix: P,
) -> DBIteratorWithThreadMode<'a, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
DBIteratorWithThreadMode::<'a, Self>::new_cf(
self,
cf_handle.inner(),
opts,
IteratorMode::From(prefix.as_ref(), Direction::Forward),
)
}
pub fn raw_iterator<'a: 'b, 'b>(&'a self) -> DBRawIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBRawIteratorWithThreadMode::new(self, opts)
}
pub fn raw_iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
) -> DBRawIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts)
}
pub fn raw_iterator_opt<'a: 'b, 'b>(
&'a self,
readopts: ReadOptions,
) -> DBRawIteratorWithThreadMode<'b, Self> {
DBRawIteratorWithThreadMode::new(self, readopts)
}
pub fn raw_iterator_cf_opt<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
readopts: ReadOptions,
) -> DBRawIteratorWithThreadMode<'b, Self> {
DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), readopts)
}
pub fn snapshot(&self) -> SnapshotWithThreadMode<Self> {
SnapshotWithThreadMode::<Self>::new(self)
}
pub fn put_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn put_cf_opt<K, V>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn merge_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_merge(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn merge_cf_opt<K, V>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_merge_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn delete_opt<K: AsRef<[u8]>>(
&self,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn delete_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn put<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.put_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn put_cf<K, V>(&self, cf: &impl AsColumnFamilyRef, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.put_cf_opt(cf, key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn merge<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.merge_opt(key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn merge_cf<K, V>(&self, cf: &impl AsColumnFamilyRef, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
self.merge_cf_opt(cf, key.as_ref(), value.as_ref(), &WriteOptions::default())
}
pub fn delete<K: AsRef<[u8]>>(&self, key: K) -> Result<(), Error> {
self.delete_opt(key.as_ref(), &WriteOptions::default())
}
pub fn delete_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<(), Error> {
self.delete_cf_opt(cf, key.as_ref(), &WriteOptions::default())
}
pub fn compact_range<S: AsRef<[u8]>, E: AsRef<[u8]>>(&self, start: Option<S>, end: Option<E>) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range(
self.inner.inner(),
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_opt<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
start: Option<S>,
end: Option<E>,
opts: &CompactOptions,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_opt(
self.inner.inner(),
opts.inner,
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_cf<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
start: Option<S>,
end: Option<E>,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_cf(
self.inner.inner(),
cf.inner(),
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_cf_opt<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
start: Option<S>,
end: Option<E>,
opts: &CompactOptions,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_cf_opt(
self.inner.inner(),
cf.inner(),
opts.inner,
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn set_options(&self, opts: &[(&str, &str)]) -> Result<(), Error> {
let copts = convert_options(opts)?;
let cnames: Vec<*const c_char> = copts.iter().map(|opt| opt.0.as_ptr()).collect();
let cvalues: Vec<*const c_char> = copts.iter().map(|opt| opt.1.as_ptr()).collect();
let count = opts.len() as i32;
unsafe {
ffi_try!(ffi::rocksdb_set_options(
self.inner.inner(),
count,
cnames.as_ptr(),
cvalues.as_ptr(),
));
}
Ok(())
}
pub fn set_options_cf(
&self,
cf: &impl AsColumnFamilyRef,
opts: &[(&str, &str)],
) -> Result<(), Error> {
let copts = convert_options(opts)?;
let cnames: Vec<*const c_char> = copts.iter().map(|opt| opt.0.as_ptr()).collect();
let cvalues: Vec<*const c_char> = copts.iter().map(|opt| opt.1.as_ptr()).collect();
let count = opts.len() as i32;
unsafe {
ffi_try!(ffi::rocksdb_set_options_cf(
self.inner.inner(),
cf.inner(),
count,
cnames.as_ptr(),
cvalues.as_ptr(),
));
}
Ok(())
}
fn property_value_impl<R>(
name: impl CStrLike,
get_property: impl FnOnce(*const c_char) -> *mut c_char,
parse: impl FnOnce(&str) -> Result<R, Error>,
) -> Result<Option<R>, Error> {
let value = match name.bake() {
Ok(prop_name) => get_property(prop_name.as_ptr()),
Err(e) => {
return Err(Error::new(format!(
"Failed to convert property name to CString: {e}"
)));
}
};
if value.is_null() {
return Ok(None);
}
let result = match unsafe { CStr::from_ptr(value) }.to_str() {
Ok(s) => parse(s).map(|value| Some(value)),
Err(e) => Err(Error::new(format!(
"Failed to convert property value to string: {e}"
))),
};
unsafe {
libc::free(value as *mut c_void);
}
result
}
pub fn property_value(&self, name: impl CStrLike) -> Result<Option<String>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe { ffi::rocksdb_property_value(self.inner.inner(), prop_name) },
|str_value| Ok(str_value.to_owned()),
)
}
pub fn property_value_cf(
&self,
cf: &impl AsColumnFamilyRef,
name: impl CStrLike,
) -> Result<Option<String>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe {
ffi::rocksdb_property_value_cf(self.inner.inner(), cf.inner(), prop_name)
},
|str_value| Ok(str_value.to_owned()),
)
}
fn parse_property_int_value(value: &str) -> Result<u64, Error> {
value.parse::<u64>().map_err(|err| {
Error::new(format!(
"Failed to convert property value {value} to int: {err}"
))
})
}
pub fn property_int_value(&self, name: impl CStrLike) -> Result<Option<u64>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe { ffi::rocksdb_property_value(self.inner.inner(), prop_name) },
Self::parse_property_int_value,
)
}
pub fn property_int_value_cf(
&self,
cf: &impl AsColumnFamilyRef,
name: impl CStrLike,
) -> Result<Option<u64>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe {
ffi::rocksdb_property_value_cf(self.inner.inner(), cf.inner(), prop_name)
},
Self::parse_property_int_value,
)
}
pub fn latest_sequence_number(&self) -> u64 {
unsafe { ffi::rocksdb_get_latest_sequence_number(self.inner.inner()) }
}
pub fn get_updates_since(&self, seq_number: u64) -> Result<DBWALIterator, Error> {
unsafe {
let opts: *const ffi::rocksdb_wal_readoptions_t = ptr::null();
let iter = ffi_try!(ffi::rocksdb_get_updates_since(
self.inner.inner(),
seq_number,
opts
));
Ok(DBWALIterator {
inner: iter,
start_seq_number: seq_number,
})
}
}
pub fn try_catch_up_with_primary(&self) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_try_catch_up_with_primary(self.inner.inner()));
}
Ok(())
}
pub fn ingest_external_file<P: AsRef<Path>>(&self, paths: Vec<P>) -> Result<(), Error> {
let opts = IngestExternalFileOptions::default();
self.ingest_external_file_opts(&opts, paths)
}
pub fn ingest_external_file_opts<P: AsRef<Path>>(
&self,
opts: &IngestExternalFileOptions,
paths: Vec<P>,
) -> Result<(), Error> {
let paths_v: Vec<CString> = paths.iter().map(to_cpath).collect::<Result<Vec<_>, _>>()?;
let cpaths: Vec<_> = paths_v.iter().map(|path| path.as_ptr()).collect();
self.ingest_external_file_raw(opts, &paths_v, &cpaths)
}
pub fn ingest_external_file_cf<P: AsRef<Path>>(
&self,
cf: &impl AsColumnFamilyRef,
paths: Vec<P>,
) -> Result<(), Error> {
let opts = IngestExternalFileOptions::default();
self.ingest_external_file_cf_opts(cf, &opts, paths)
}
pub fn ingest_external_file_cf_opts<P: AsRef<Path>>(
&self,
cf: &impl AsColumnFamilyRef,
opts: &IngestExternalFileOptions,
paths: Vec<P>,
) -> Result<(), Error> {
let paths_v: Vec<CString> = paths.iter().map(to_cpath).collect::<Result<Vec<_>, _>>()?;
let cpaths: Vec<_> = paths_v.iter().map(|path| path.as_ptr()).collect();
self.ingest_external_file_raw_cf(cf, opts, &paths_v, &cpaths)
}
fn ingest_external_file_raw(
&self,
opts: &IngestExternalFileOptions,
paths_v: &[CString],
cpaths: &[*const c_char],
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_ingest_external_file(
self.inner.inner(),
cpaths.as_ptr(),
paths_v.len(),
opts.inner as *const _
));
Ok(())
}
}
fn ingest_external_file_raw_cf(
&self,
cf: &impl AsColumnFamilyRef,
opts: &IngestExternalFileOptions,
paths_v: &[CString],
cpaths: &[*const c_char],
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_ingest_external_file_cf(
self.inner.inner(),
cf.inner(),
cpaths.as_ptr(),
paths_v.len(),
opts.inner as *const _
));
Ok(())
}
}
pub fn live_files(&self) -> Result<Vec<LiveFile>, Error> {
unsafe {
let files = ffi::rocksdb_livefiles(self.inner.inner());
if files.is_null() {
Err(Error::new("Could not get live files".to_owned()))
} else {
let n = ffi::rocksdb_livefiles_count(files);
let mut livefiles = Vec::with_capacity(n as usize);
let mut key_size: usize = 0;
for i in 0..n {
let column_family_name =
from_cstr(ffi::rocksdb_livefiles_column_family_name(files, i));
let name = from_cstr(ffi::rocksdb_livefiles_name(files, i));
let size = ffi::rocksdb_livefiles_size(files, i);
let level = ffi::rocksdb_livefiles_level(files, i);
let smallest_key = ffi::rocksdb_livefiles_smallestkey(files, i, &mut key_size);
let smallest_key = raw_data(smallest_key, key_size);
let largest_key = ffi::rocksdb_livefiles_largestkey(files, i, &mut key_size);
let largest_key = raw_data(largest_key, key_size);
livefiles.push(LiveFile {
column_family_name,
name,
size,
level,
start_key: smallest_key,
end_key: largest_key,
num_entries: ffi::rocksdb_livefiles_entries(files, i),
num_deletions: ffi::rocksdb_livefiles_deletions(files, i),
});
}
ffi::rocksdb_livefiles_destroy(files);
Ok(livefiles)
}
}
}
pub fn delete_file_in_range<K: AsRef<[u8]>>(&self, from: K, to: K) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_file_in_range(
self.inner.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn delete_file_in_range_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_file_in_range_cf(
self.inner.inner(),
cf.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn cancel_all_background_work(&self, wait: bool) {
unsafe {
ffi::rocksdb_cancel_all_background_work(self.inner.inner(), c_uchar::from(wait));
}
}
fn drop_column_family<C>(
&self,
cf_inner: *mut ffi::rocksdb_column_family_handle_t,
cf: C,
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_drop_column_family(
self.inner.inner(),
cf_inner
));
}
drop(cf);
Ok(())
}
}
impl<I: DBInner> DBCommon<SingleThreaded, I> {
pub fn create_cf<N: AsRef<str>>(&mut self, name: N, opts: &Options) -> Result<(), Error> {
let inner = self.create_inner_cf_handle(name.as_ref(), opts)?;
self.cfs
.cfs
.insert(name.as_ref().to_string(), ColumnFamily { inner });
Ok(())
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), Error> {
if let Some(cf) = self.cfs.cfs.remove(name) {
self.drop_column_family(cf.inner, cf)
} else {
Err(Error::new(format!("Invalid column family: {name}")))
}
}
pub fn cf_handle(&self, name: &str) -> Option<&ColumnFamily> {
self.cfs.cfs.get(name)
}
}
impl<I: DBInner> DBCommon<MultiThreaded, I> {
pub fn create_cf<N: AsRef<str>>(&self, name: N, opts: &Options) -> Result<(), Error> {
let inner = self.create_inner_cf_handle(name.as_ref(), opts)?;
self.cfs.cfs.write().unwrap().insert(
name.as_ref().to_string(),
Arc::new(UnboundColumnFamily { inner }),
);
Ok(())
}
pub fn drop_cf(&self, name: &str) -> Result<(), Error> {
if let Some(cf) = self.cfs.cfs.write().unwrap().remove(name) {
self.drop_column_family(cf.inner, cf)
} else {
Err(Error::new(format!("Invalid column family: {name}")))
}
}
pub fn cf_handle(&self, name: &str) -> Option<Arc<BoundColumnFamily>> {
self.cfs
.cfs
.read()
.unwrap()
.get(name)
.cloned()
.map(UnboundColumnFamily::bound_column_family)
}
}
impl<T: ThreadMode, I: DBInner> Drop for DBCommon<T, I> {
fn drop(&mut self) {
self.cfs.drop_all_cfs_internal();
}
}
impl<T: ThreadMode, I: DBInner> fmt::Debug for DBCommon<T, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RocksDB {{ path: {:?} }}", self.path())
}
}
#[derive(Debug, Clone)]
pub struct LiveFile {
pub column_family_name: String,
pub name: String,
pub size: usize,
pub level: i32,
pub start_key: Option<Vec<u8>>,
pub end_key: Option<Vec<u8>>,
pub num_entries: u64,
pub num_deletions: u64,
}
fn convert_options(opts: &[(&str, &str)]) -> Result<Vec<(CString, CString)>, Error> {
opts.iter()
.map(|(name, value)| {
let cname = match CString::new(name.as_bytes()) {
Ok(cname) => cname,
Err(e) => return Err(Error::new(format!("Invalid option name `{e}`"))),
};
let cvalue = match CString::new(value.as_bytes()) {
Ok(cvalue) => cvalue,
Err(e) => return Err(Error::new(format!("Invalid option value: `{e}`"))),
};
Ok((cname, cvalue))
})
.collect()
}
pub(crate) fn convert_values(
values: Vec<*mut c_char>,
values_sizes: Vec<usize>,
errors: Vec<*mut c_char>,
) -> Vec<Result<Option<Vec<u8>>, Error>> {
values
.into_iter()
.zip(values_sizes.into_iter())
.zip(errors.into_iter())
.map(|((v, s), e)| {
if e.is_null() {
let value = unsafe { crate::ffi_util::raw_data(v, s) };
unsafe {
ffi::rocksdb_free(v as *mut c_void);
}
Ok(value)
} else {
Err(Error::new(crate::ffi_util::error_message(e)))
}
})
.collect()
}