update some Rust dependencies

I didn't go to quite the latest version of everything, in an effort to
minimize duplicates in the cargo tree.
This commit is contained in:
Scott Lamb 2024-01-06 10:43:20 -08:00
parent 2bcee02ea6
commit 86816e862a
25 changed files with 695 additions and 597 deletions

1089
server/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -25,8 +25,11 @@ members = ["base", "db"]
[workspace.dependencies]
base64 = "0.21.0"
h264-reader = "0.7.0"
nix = "0.26.1"
itertools = "0.12.0"
nix = "0.27.0"
tracing = { version = "0.1", features = ["log"] }
tracing-log = "0.2"
ring = "0.17.0"
rusqlite = "0.30.0"
[dependencies]
@ -40,22 +43,21 @@ chrono = "0.4.23"
cursive = { version = "0.20.0", default-features = false, features = ["termion-backend"] }
db = { package = "moonfire-db", path = "db" }
futures = "0.3"
fnv = "1.0"
h264-reader = { workspace = true }
http = "0.2.3"
http-serve = { version = "0.3.1", features = ["dir"] }
hyper = { version = "0.14.2", features = ["http1", "server", "stream", "tcp"] }
itertools = "0.10.0"
itertools = { workspace = true }
libc = "0.2"
log = { version = "0.4" }
memchr = "2.0.2"
nix = { workspace = true}
nix = { workspace = true, features = ["time", "user"] }
nom = "7.0.0"
password-hash = "0.4.2"
password-hash = "0.5.0"
protobuf = "3.0"
reffers = "0.7.0"
retina = "0.4.0"
ring = "0.16.2"
ring = { workspace = true }
rusqlite = { workspace = true }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@ -65,12 +67,12 @@ time = "0.1"
tokio = { version = "1.24", features = ["macros", "rt-multi-thread", "signal", "sync", "time"] }
tokio-stream = "0.1.5"
tokio-tungstenite = "0.20.0"
toml = "0.5"
toml = "0.6"
tracing = { workspace = true }
tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }
tracing-core = "0.1.30"
tracing-futures = { version = "0.2.5", features = ["futures-03", "std-future"] }
tracing-log = "0.1.3"
tracing-log = { workspace = true }
ulid = "1.0.0"
url = "2.1.1"
uuid = { version = "1.1.2", features = ["serde", "std", "v4"] }
@ -78,11 +80,11 @@ flate2 = "1.0.26"
git-version = "0.3.5"
[target.'cfg(target_os = "linux")'.dependencies]
libsystemd = "0.6.0"
libsystemd = "0.7.0"
[build-dependencies]
ahash = "0.8"
blake3 = "1.0.0"
fnv = "1.0"
walkdir = "2.3.3"
[dev-dependencies]

View File

@ -14,6 +14,7 @@ nightly = []
path = "lib.rs"
[dependencies]
ahash = "0.8"
chrono = "0.4.23"
coded = { git = "https://github.com/scottlamb/coded", rev = "2c97994974a73243d5dd12134831814f42cdb0e8"}
futures = "0.3"
@ -27,5 +28,5 @@ slab = "0.4"
time = "0.1"
tracing = { workspace = true }
tracing-core = "0.1.30"
tracing-log = "0.1.3"
tracing-log = { workspace = true }
tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }

View File

@ -10,3 +10,7 @@ pub mod time;
pub mod tracing_setup;
pub use crate::error::{Error, ErrorBuilder, ErrorKind, ResultExt};
pub use ahash::RandomState;
pub type FastHashMap<K, V> = std::collections::HashMap<K, V, ahash::RandomState>;
pub type FastHashSet<K> = std::collections::HashSet<K, ahash::RandomState>;

View File

@ -56,7 +56,7 @@ impl FileEncoding {
/// `favicons/blah.ico` rather than `../../ui/dist/favicons/blah.ico.gz`.
///
/// The best representation is gzipped if available, uncompressed otherwise.
type FileMap = fnv::FnvHashMap<String, File>;
type FileMap = std::collections::HashMap<String, File, ahash::RandomState>;
fn stringify_files(files: &FileMap) -> Result<String, std::fmt::Error> {
let mut buf = String::new();

View File

@ -21,23 +21,21 @@ blake3 = "1.0.0"
byteorder = "1.0"
cstr = "0.2.5"
diff = "0.1.12"
fnv = "1.0"
futures = "0.3"
h264-reader = { workspace = true }
hashlink = "0.8.1"
itertools = "0.10.0"
itertools = { workspace = true }
libc = "0.2"
nix = "0.26.1"
nix = { workspace = true, features = ["dir", "feature", "fs", "mman"] }
num-rational = { version = "0.4.0", default-features = false, features = ["std"] }
odds = { version = "0.4.0", features = ["std-vec"] }
pretty-hex = "0.3.0"
pretty-hex = "0.4.0"
protobuf = "3.0"
ring = "0.16.2"
ring = { workspace = true }
rusqlite = { workspace = true }
scrypt = "0.10.0"
scrypt = "0.11.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
#similar = "2.1.0"
smallvec = "1.0"
tempfile = "3.2.0"
time = "0.1"

View File

@ -6,9 +6,9 @@
use crate::json::UserConfig;
use crate::schema::Permissions;
use base::FastHashMap;
use base::{bail, err, strutil, Error, ErrorKind, ResultExt as _};
use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _};
use fnv::FnvHashMap;
use protobuf::Message;
use ring::rand::{SecureRandom, SystemRandom};
use rusqlite::{named_params, params, Connection, Transaction};
@ -42,7 +42,8 @@ fn params() -> &'static Params {
/// For testing only: use fast but insecure hashes.
/// Call via `testutil::init()`.
pub(crate) fn set_test_config() {
let test_params = scrypt::Params::new(8, 8, 1).expect("test params should be valid");
let test_params = scrypt::Params::new(8, 8, 1, scrypt::Params::RECOMMENDED_LEN)
.expect("test params should be valid");
if let Err(existing_params) = PARAMS.set(Params {
actual: test_params,
is_test: true,
@ -386,7 +387,7 @@ pub(crate) struct State {
/// TODO: Add eviction of clean sessions. Keep a linked hash set of clean session hashes and
/// evict the oldest when its size exceeds a threshold. Or just evict everything on every flush
/// (and accept more frequent database accesses).
sessions: FnvHashMap<SessionHash, Session>,
sessions: FastHashMap<SessionHash, Session>,
rand: SystemRandom,
}
@ -396,7 +397,7 @@ impl State {
let mut state = State {
users_by_id: BTreeMap::new(),
users_by_name: BTreeMap::new(),
sessions: FnvHashMap::default(),
sessions: FastHashMap::default(),
rand: ring::rand::SystemRandom::new(),
};
let mut stmt = conn.prepare(
@ -657,7 +658,7 @@ impl State {
domain: Option<Vec<u8>>,
creation_password_id: Option<i32>,
flags: i32,
sessions: &'s mut FnvHashMap<SessionHash, Session>,
sessions: &'s mut FastHashMap<SessionHash, Session>,
permissions: Permissions,
) -> Result<(RawSessionId, &'s Session), base::Error> {
let mut session_id = RawSessionId([0u8; 48]);

View File

@ -12,7 +12,7 @@ use crate::raw;
use crate::recording;
use crate::schema;
use base::{err, Error};
use fnv::{FnvHashMap, FnvHashSet};
use base::{FastHashMap, FastHashSet};
use nix::fcntl::AtFlags;
use rusqlite::params;
use std::os::unix::io::AsRawFd;
@ -27,8 +27,8 @@ pub struct Options {
#[derive(Default)]
pub struct Context {
rows_to_delete: FnvHashSet<CompositeId>,
files_to_trash: FnvHashSet<(i32, CompositeId)>, // (dir_id, composite_id)
rows_to_delete: FastHashSet<CompositeId>,
files_to_trash: FastHashSet<(i32, CompositeId)>, // (dir_id, composite_id)
}
pub fn run(conn: &mut rusqlite::Connection, opts: &Options) -> Result<i32, Error> {
@ -79,7 +79,7 @@ pub fn run(conn: &mut rusqlite::Connection, opts: &Options) -> Result<i32, Error
let (db_uuid, _config) = raw::read_meta(conn)?;
// Scan directories.
let mut dirs_by_id: FnvHashMap<i32, Dir> = FnvHashMap::default();
let mut dirs_by_id: FastHashMap<i32, Dir> = FastHashMap::default();
{
let mut dir_stmt = conn.prepare(
r#"
@ -229,11 +229,11 @@ struct Recording {
#[derive(Default)]
struct Stream {
recordings: FnvHashMap<i32, Recording>,
recordings: FastHashMap<i32, Recording>,
cum_recordings: Option<i32>,
}
type Dir = FnvHashMap<i32, Stream>;
type Dir = FastHashMap<i32, Stream>;
fn summarize_index(video_index: &[u8]) -> Result<RecordingSummary, Error> {
let mut it = recording::SampleIndexIterator::default();

View File

@ -37,8 +37,7 @@ use crate::signal;
use base::clock::{self, Clocks};
use base::strutil::encode_size;
use base::{bail, err, Error};
// use failure::{bail, err, Error, ResultExt};
use fnv::{FnvHashMap, FnvHashSet};
use base::{FastHashMap, FastHashSet};
use hashlink::LinkedHashMap;
use itertools::Itertools;
use rusqlite::{named_params, params};
@ -325,7 +324,7 @@ pub struct SampleFileDir {
/// ids which are in the `garbage` database table (rather than `recording`) as of last commit
/// but may still exist on disk. These can't be safely removed from the database yet.
pub(crate) garbage_needs_unlink: FnvHashSet<CompositeId>,
pub(crate) garbage_needs_unlink: FastHashSet<CompositeId>,
/// ids which are in the `garbage` database table and are guaranteed to no longer exist on
/// disk (have been unlinked and the dir has been synced). These may be removed from the
@ -620,7 +619,7 @@ pub struct LockedDatabase {
streams_by_id: BTreeMap<i32, Stream>,
cameras_by_uuid: BTreeMap<Uuid, i32>, // values are ids.
video_sample_entries_by_id: BTreeMap<i32, Arc<VideoSampleEntry>>,
video_index_cache: RefCell<LinkedHashMap<i64, Box<[u8]>, fnv::FnvBuildHasher>>,
video_index_cache: RefCell<LinkedHashMap<i64, Box<[u8]>, base::RandomState>>,
on_flush: Vec<Box<dyn Fn() + Send>>,
}
@ -1010,7 +1009,7 @@ impl LockedDatabase {
};
let tx = self.conn.transaction()?;
let mut new_ranges =
FnvHashMap::with_capacity_and_hasher(self.streams_by_id.len(), Default::default());
FastHashMap::with_capacity_and_hasher(self.streams_by_id.len(), Default::default());
{
let mut stmt = tx.prepare_cached(UPDATE_STREAM_COUNTERS_SQL)?;
for (&stream_id, s) in &self.streams_by_id {
@ -1100,7 +1099,7 @@ impl LockedDatabase {
added_bytes: i64,
deleted_bytes: i64,
}
let mut dir_logs: FnvHashMap<i32, DirLog> = FnvHashMap::default();
let mut dir_logs: FastHashMap<i32, DirLog> = FastHashMap::default();
// Process delete_garbage.
for (&id, dir) in &mut self.sample_file_dirs_by_id {
@ -1214,7 +1213,7 @@ impl LockedDatabase {
/// Currently this only happens at startup (or during configuration), so this isn't a problem
/// in practice.
pub fn open_sample_file_dirs(&mut self, ids: &[i32]) -> Result<(), Error> {
let mut in_progress = FnvHashMap::with_capacity_and_hasher(ids.len(), Default::default());
let mut in_progress = FastHashMap::with_capacity_and_hasher(ids.len(), Default::default());
for &id in ids {
let e = in_progress.entry(id);
use ::std::collections::hash_map::Entry;
@ -1837,7 +1836,7 @@ impl LockedDatabase {
uuid,
dir: Some(dir),
last_complete_open: Some(*o),
garbage_needs_unlink: FnvHashSet::default(),
garbage_needs_unlink: FastHashSet::default(),
garbage_unlinked: Vec::new(),
}),
Entry::Occupied(_) => bail!(Internal, msg("duplicate sample file dir id {id}")),
@ -2161,7 +2160,7 @@ impl LockedDatabase {
pub fn signals_by_id(&self) -> &BTreeMap<u32, signal::Signal> {
self.signal.signals_by_id()
}
pub fn signal_types_by_uuid(&self) -> &FnvHashMap<Uuid, signal::Type> {
pub fn signal_types_by_uuid(&self) -> &FastHashMap<Uuid, signal::Type> {
self.signal.types_by_uuid()
}
pub fn list_changes_by_time(

View File

@ -25,6 +25,7 @@ use std::ffi::CStr;
use std::fs;
use std::io::{Read, Write};
use std::ops::Range;
use std::os::fd::{AsFd, BorrowedFd};
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
use std::sync::Arc;
@ -87,9 +88,9 @@ impl NixPath for CompositeIdPath {
#[derive(Debug)]
pub struct Fd(std::os::unix::io::RawFd);
impl std::os::unix::io::AsRawFd for Fd {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
self.0
impl AsFd for Fd {
fn as_fd(&self) -> std::os::unix::prelude::BorrowedFd<'_> {
unsafe { BorrowedFd::borrow_raw(self.0) }
}
}
@ -316,7 +317,7 @@ impl SampleFileDir {
pub(crate) fn opendir(&self) -> Result<nix::dir::Dir, nix::Error> {
nix::dir::Dir::openat(
self.fd.as_raw_fd(),
self.fd.as_fd().as_raw_fd(),
".",
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),

View File

@ -22,7 +22,6 @@
use std::convert::TryFrom;
use std::future::Future;
use std::os::unix::prelude::AsRawFd;
use std::path::Path;
use std::{
ops::Range,
@ -352,7 +351,7 @@ impl ReaderInt {
map_len,
nix::sys::mman::ProtFlags::PROT_READ,
nix::sys::mman::MapFlags::MAP_SHARED,
file.as_raw_fd(),
Some(&file),
offset,
)
}

View File

@ -7,8 +7,8 @@
use crate::db::{self, CompositeId, SqlUuid};
use crate::json::GlobalConfig;
use crate::recording;
use base::FastHashSet;
use base::{bail, err, Error, ErrorKind, ResultExt as _};
use fnv::FnvHashSet;
use rusqlite::{named_params, params};
use std::ops::Range;
use uuid::Uuid;
@ -422,8 +422,8 @@ pub(crate) fn get_range(
pub(crate) fn list_garbage(
conn: &rusqlite::Connection,
dir_id: i32,
) -> Result<FnvHashSet<CompositeId>, Error> {
let mut garbage = FnvHashSet::default();
) -> Result<FastHashSet<CompositeId>, Error> {
let mut garbage = FastHashSet::default();
let mut stmt =
conn.prepare_cached("select composite_id from garbage where sample_file_dir_id = ?")?;
let mut rows = stmt.query([&dir_id])?;

View File

@ -8,8 +8,8 @@
use crate::json::{SignalConfig, SignalTypeConfig};
use crate::{coding, days};
use crate::{recording, SqlUuid};
use base::FastHashMap;
use base::{bail, err, Error};
use fnv::FnvHashMap;
use rusqlite::{params, Connection, Transaction};
use std::collections::btree_map::Entry;
use std::collections::{BTreeMap, BTreeSet};
@ -25,7 +25,7 @@ pub(crate) struct State {
/// All types with known states. Note that currently there's no requirement an entry here
/// exists for every `type_` specified in a `Signal`, and there's an implied `0` (unknown)
/// state for every `Type`.
types_by_uuid: FnvHashMap<Uuid, Type>,
types_by_uuid: FastHashMap<Uuid, Type>,
/// All points in time.
/// Invariants, checked by `State::debug_assert_point_invariants`:
@ -691,8 +691,8 @@ impl State {
Ok(signals)
}
fn init_types(conn: &Connection) -> Result<FnvHashMap<Uuid, Type>, Error> {
let mut types = FnvHashMap::default();
fn init_types(conn: &Connection) -> Result<FastHashMap<Uuid, Type>, Error> {
let mut types = FastHashMap::default();
let mut stmt = conn.prepare(
r#"
select
@ -790,7 +790,7 @@ impl State {
pub fn signals_by_id(&self) -> &BTreeMap<u32, Signal> {
&self.signals_by_id
}
pub fn types_by_uuid(&self) -> &FnvHashMap<Uuid, Type> {
pub fn types_by_uuid(&self) -> &FastHashMap<Uuid, Type> {
&self.types_by_uuid
}

View File

@ -9,7 +9,7 @@ use crate::db;
use crate::dir;
use crate::writer;
use base::clock::Clocks;
use fnv::FnvHashMap;
use base::FastHashMap;
use std::env;
use std::sync::Arc;
use std::thread;
@ -47,7 +47,7 @@ pub fn init() {
pub struct TestDb<C: Clocks + Clone> {
pub db: Arc<db::Database<C>>,
pub dirs_by_stream_id: Arc<FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
pub dirs_by_stream_id: Arc<FastHashMap<i32, Arc<dir::SampleFileDir>>>,
pub shutdown_tx: base::shutdown::Sender,
pub shutdown_rx: base::shutdown::Receiver,
pub syncer_channel: writer::SyncerChannel<::std::fs::File>,
@ -116,7 +116,7 @@ impl<C: Clocks + Clone> TestDb<C> {
.get()
.unwrap();
}
let mut dirs_by_stream_id = FnvHashMap::default();
let mut dirs_by_stream_id = FastHashMap::default();
dirs_by_stream_id.insert(TEST_STREAM_ID, dir);
let (shutdown_tx, shutdown_rx) = base::shutdown::channel();
let (syncer_channel, syncer_join) =

View File

@ -166,7 +166,7 @@ mod tests {
use crate::compare;
use crate::testutil;
use base::err;
use fnv::FnvHashMap;
use base::FastHashMap;
const BAD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY: &[u8] = b"\x00\x00\x00\x84\x61\x76\x63\x31\x00\x00\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
@ -344,7 +344,7 @@ mod tests {
"#,
)?;
let mut rows = stmt.query(params![])?;
let mut pasp_by_id = FnvHashMap::default();
let mut pasp_by_id = FastHashMap::default();
while let Some(row) = rows.next()? {
let id: i32 = row.get(0)?;
let pasp_h_spacing: i32 = row.get(1)?;

View File

@ -308,7 +308,7 @@ fn verify_dir_contents(
params![],
|r| r.get(0),
)?;
let mut files = ::fnv::FnvHashSet::with_capacity_and_hasher(n as usize, Default::default());
let mut files = ::base::FastHashSet::with_capacity_and_hasher(n as usize, Default::default());
for e in dir.iter() {
let e = e?;
let f = e.file_name();

View File

@ -10,6 +10,7 @@ use crate::dir;
use crate::schema;
use base::Error;
use rusqlite::params;
use std::os::fd::AsFd as _;
use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use std::sync::Arc;
@ -71,9 +72,9 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
let from_path = super::UuidPath::from(sample_file_uuid.0);
let to_path = crate::dir::CompositeIdPath::from(id);
if let Err(e) = nix::fcntl::renameat(
Some(d.fd.as_raw_fd()),
Some(d.fd.as_fd().as_raw_fd()),
&from_path,
Some(d.fd.as_raw_fd()),
Some(d.fd.as_fd().as_raw_fd()),
&to_path,
) {
if e == nix::Error::ENOENT {

View File

@ -15,6 +15,7 @@ use nix::sys::stat::Mode;
use protobuf::Message;
use rusqlite::params;
use std::io::{Read, Write};
use std::os::fd::AsFd as _;
use std::os::unix::io::AsRawFd;
use tracing::info;
use uuid::Uuid;
@ -25,7 +26,12 @@ const FIXED_DIR_META_LEN: usize = 512;
fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool, Error> {
let tmp_path = cstr!("meta.tmp");
let meta_path = cstr!("meta");
let mut f = crate::fs::openat(dir.as_raw_fd(), meta_path, OFlag::O_RDONLY, Mode::empty())?;
let mut f = crate::fs::openat(
dir.as_fd().as_raw_fd(),
meta_path,
OFlag::O_RDONLY,
Mode::empty(),
)?;
let mut data = Vec::new();
f.read_to_end(&mut data)?;
if data.len() == FIXED_DIR_META_LEN {
@ -49,7 +55,7 @@ fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool,
);
}
let mut f = crate::fs::openat(
dir.as_raw_fd(),
dir.as_fd().as_raw_fd(),
tmp_path,
OFlag::O_CREAT | OFlag::O_TRUNC | OFlag::O_WRONLY,
Mode::S_IRUSR | Mode::S_IWUSR,
@ -72,9 +78,9 @@ fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool,
f.sync_all()?;
nix::fcntl::renameat(
Some(dir.as_raw_fd()),
Some(dir.as_fd().as_raw_fd()),
tmp_path,
Some(dir.as_raw_fd()),
Some(dir.as_fd().as_raw_fd()),
meta_path,
)?;
Ok(true)
@ -89,7 +95,7 @@ fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool,
fn maybe_cleanup_garbage_uuids(dir: &dir::Fd) -> Result<bool, Error> {
let mut need_sync = false;
let mut dir2 = nix::dir::Dir::openat(
dir.as_raw_fd(),
dir.as_fd().as_raw_fd(),
".",
OFlag::O_DIRECTORY | OFlag::O_RDONLY,
Mode::empty(),
@ -105,7 +111,7 @@ fn maybe_cleanup_garbage_uuids(dir: &dir::Fd) -> Result<bool, Error> {
if Uuid::parse_str(f_str).is_ok() {
info!("removing leftover garbage file {}", f_str);
nix::unistd::unlinkat(
Some(dir.as_raw_fd()),
Some(dir.as_fd().as_raw_fd()),
f,
nix::unistd::UnlinkatFlags::NoRemoveDir,
)?;

View File

@ -2,9 +2,9 @@
// Copyright (C) 2021 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception
use base::FastHashMap;
/// Upgrades a version 6 schema to a version 7 schema.
use base::{err, Error};
use fnv::FnvHashMap;
use rusqlite::{named_params, params};
use std::{convert::TryFrom, path::PathBuf};
use tracing::debug;
@ -133,7 +133,7 @@ fn copy_users(tx: &rusqlite::Transaction) -> Result<(), Error> {
}
fn copy_signal_types(tx: &rusqlite::Transaction) -> Result<(), Error> {
let mut types_ = FnvHashMap::default();
let mut types_ = FastHashMap::default();
let mut stmt = tx.prepare("select type_uuid, value, name from signal_type_enum")?;
let mut rows = stmt.query(params![])?;
while let Some(row) = rows.next()? {
@ -164,7 +164,7 @@ struct Signal {
}
fn copy_signals(tx: &rusqlite::Transaction) -> Result<(), Error> {
let mut signals = FnvHashMap::default();
let mut signals = FastHashMap::default();
// Read from signal table.
{

View File

@ -9,8 +9,8 @@ use crate::dir;
use crate::recording::{self, MAX_RECORDING_WALL_DURATION};
use base::clock::{self, Clocks};
use base::shutdown::ShutdownError;
use base::FastHashMap;
use base::{bail, err, Error};
use fnv::FnvHashMap;
use std::cmp::{self, Ordering};
use std::convert::TryFrom;
use std::io;
@ -294,7 +294,7 @@ impl<F: FileWriter> SyncerChannel<F> {
/// on opening.
fn list_files_to_abandon(
dir: &dir::SampleFileDir,
streams_to_next: FnvHashMap<i32, i32>,
streams_to_next: FastHashMap<i32, i32>,
) -> Result<Vec<CompositeId>, Error> {
let mut v = Vec::new();
let mut d = dir.opendir()?;
@ -330,7 +330,7 @@ impl<C: Clocks + Clone> Syncer<C, Arc<dir::SampleFileDir>> {
// Abandon files.
// First, get a list of the streams in question.
let streams_to_next: FnvHashMap<_, _> = l
let streams_to_next: FastHashMap<_, _> = l
.streams_by_id()
.iter()
.filter_map(|(&k, v)| {

View File

@ -4,14 +4,14 @@
//! UI bundled (compiled/linked) into the executable for single-file deployment.
use fnv::FnvHashMap;
use base::FastHashMap;
use http::{header, HeaderMap, HeaderValue};
use std::io::Read;
use std::sync::OnceLock;
use crate::body::{BoxedError, Chunk};
pub struct Ui(FnvHashMap<&'static str, FileSet>);
pub struct Ui(FastHashMap<&'static str, FileSet>);
/// A file as passed in from `build.rs`.
struct BuildFile {

View File

@ -7,10 +7,10 @@ use crate::web;
use crate::web::accept::Listener;
use base::clock;
use base::err;
use base::FastHashMap;
use base::{bail, Error};
use bpaf::Bpaf;
use db::{dir, writer};
use fnv::FnvHashMap;
use hyper::service::{make_service_fn, service_fn};
use itertools::Itertools;
use retina::client::SessionGroup;
@ -134,7 +134,7 @@ struct Syncer {
}
#[cfg(target_os = "linux")]
fn get_preopened_sockets() -> Result<FnvHashMap<String, Listener>, Error> {
fn get_preopened_sockets() -> Result<FastHashMap<String, Listener>, Error> {
use libsystemd::activation::IsType as _;
use std::os::fd::{FromRawFd, IntoRawFd};
@ -142,7 +142,7 @@ fn get_preopened_sockets() -> Result<FnvHashMap<String, Listener>, Error> {
// activation.
if std::env::var_os("LISTEN_FDS").is_none() {
info!("no LISTEN_FDs");
return Ok(FnvHashMap::default());
return Ok(FastHashMap::default());
}
let sockets = libsystemd::activation::receive_descriptors_with_names(false)
@ -176,13 +176,14 @@ fn get_preopened_sockets() -> Result<FnvHashMap<String, Listener>, Error> {
}
#[cfg(not(target_os = "linux"))]
fn get_preopened_sockets() -> Result<FnvHashMap<String, Listener>, Error> {
Ok(FnvHashMap::default())
fn get_preopened_sockets() -> Result<FastHashMap<String, Listener>, Error> {
Ok(FastHashMap::default())
}
fn read_config(path: &Path) -> Result<ConfigFile, Error> {
let config = std::fs::read(path)?;
let config = toml::from_slice(&config).map_err(|e| err!(InvalidArgument, source(e)))?;
let config = std::str::from_utf8(&config).map_err(|e| err!(InvalidArgument, source(e)))?;
let config = toml::from_str(&config).map_err(|e| err!(InvalidArgument, source(e)))?;
Ok(config)
}
@ -267,7 +268,7 @@ fn prepare_unix_socket(p: &Path) {
fn make_listener(
addr: &config::AddressConfig,
#[cfg_attr(not(target_os = "linux"), allow(unused))] preopened: &mut FnvHashMap<
#[cfg_attr(not(target_os = "linux"), allow(unused))] preopened: &mut FastHashMap<
String,
Listener,
>,
@ -341,11 +342,11 @@ async fn inner(
// Start a streamer for each stream.
let mut streamers = Vec::new();
let mut session_groups_by_camera: FnvHashMap<i32, Arc<retina::client::SessionGroup>> =
FnvHashMap::default();
let mut session_groups_by_camera: FastHashMap<i32, Arc<retina::client::SessionGroup>> =
FastHashMap::default();
let syncers = if !read_only {
let l = db.lock();
let mut dirs = FnvHashMap::with_capacity_and_hasher(
let mut dirs = FastHashMap::with_capacity_and_hasher(
l.sample_file_dirs_by_id().len(),
Default::default(),
);
@ -377,7 +378,7 @@ async fn inner(
// Then, with the lock dropped, create syncers.
drop(l);
let mut syncers = FnvHashMap::with_capacity_and_hasher(dirs.len(), Default::default());
let mut syncers = FastHashMap::with_capacity_and_hasher(dirs.len(), Default::default());
for (id, dir) in dirs.drain() {
let (channel, join) = writer::start_syncer(db.clone(), shutdown_rx.clone(), id)?;
syncers.insert(id, Syncer { dir, channel, join });

View File

@ -985,7 +985,7 @@ impl FileBuilder {
pub fn build(
mut self,
db: Arc<db::Database>,
dirs_by_stream_id: Arc<::fnv::FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
dirs_by_stream_id: Arc<::base::FastHashMap<i32, Arc<dir::SampleFileDir>>>,
) -> Result<File, Error> {
let mut max_end = None;
let mut etag = blake3::Hasher::new();
@ -1777,7 +1777,7 @@ impl BodyState {
struct FileInner {
db: Arc<db::Database>,
dirs_by_stream_id: Arc<::fnv::FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
dirs_by_stream_id: Arc<::base::FastHashMap<i32, Arc<dir::SampleFileDir>>>,
segments: Vec<Segment>,
slices: Slices<Slice>,
buf: Vec<u8>,

View File

@ -20,13 +20,13 @@ use crate::mp4;
use crate::web::static_file::Ui;
use base::err;
use base::Error;
use base::FastHashMap;
use base::ResultExt;
use base::{bail, clock::Clocks, ErrorKind};
use core::borrow::Borrow;
use core::str::FromStr;
use db::dir::SampleFileDir;
use db::{auth, recording};
use fnv::FnvHashMap;
use http::header::{self, HeaderValue};
use http::{status::StatusCode, Request, Response};
use hyper::body::Bytes;
@ -172,7 +172,7 @@ pub struct Config<'a> {
pub struct Service {
db: Arc<db::Database>,
ui: Ui,
dirs_by_stream_id: Arc<FnvHashMap<i32, Arc<SampleFileDir>>>,
dirs_by_stream_id: Arc<FastHashMap<i32, Arc<SampleFileDir>>>,
time_zone_name: String,
allow_unauthenticated_permissions: Option<db::Permissions>,
trust_forward_hdrs: bool,
@ -199,7 +199,7 @@ impl Service {
let dirs_by_stream_id = {
let l = config.db.lock();
let mut d =
FnvHashMap::with_capacity_and_hasher(l.streams_by_id().len(), Default::default());
FastHashMap::with_capacity_and_hasher(l.streams_by_id().len(), Default::default());
for (&id, s) in l.streams_by_id().iter() {
let dir_id = match s.sample_file_dir_id {
Some(d) => d,

View File

@ -144,8 +144,8 @@ fn encode_sid(sid: db::RawSessionId, flags: i32) -> String {
#[cfg(test)]
mod tests {
use base::FastHashMap;
use db::testutil;
use fnv::FnvHashMap;
use tracing::info;
use crate::web::tests::Server;
@ -163,7 +163,7 @@ mod tests {
let resp = cli.post(&login_url).send().await.unwrap();
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
let mut p = FnvHashMap::default();
let mut p = FastHashMap::default();
p.insert("username", "slamb");
p.insert("password", "asdf");
let resp = cli.post(&login_url).json(&p).send().await.unwrap();
@ -190,7 +190,7 @@ mod tests {
testutil::init();
let s = Server::new(None);
let cli = reqwest::Client::new();
let mut p = FnvHashMap::default();
let mut p = FastHashMap::default();
p.insert("username", "slamb");
p.insert("password", "hunter2");
let resp = cli
@ -239,7 +239,7 @@ mod tests {
.get("csrf")
.unwrap()
.as_str();
let mut p = FnvHashMap::default();
let mut p = FastHashMap::default();
p.insert("csrf", csrf);
let resp = cli
.post(&format!("{}/api/logout", &s.base_url))