Compare commits

..

No commits in common. "main" and "v0.1.0" have entirely different histories.
main ... v0.1.0

22 changed files with 549 additions and 2581 deletions

View file

@ -1,27 +0,0 @@
name: Rust
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
env:
CARGO_TERM_COLOR: always
XDBM_ENABLE_OVERWRITE_GITCONFIG: true
jobs:
build-and-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup
run: rustup component add clippy
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
- name: Lint
run: cargo clippy --all-targets --all-features

View file

@ -1,44 +1,5 @@
# Changelog
## [Unreleased]
### Added
- Option to use `git` cli in `sync` subcommand. This is now the default (#27)
## [0.4.0] - 2025-03-01
### Added
- `sync` subcommand, which performs git pull (fast-forward) and push (#21)
- Feature `vendored-openssl` to statically link openssl and libgit2 (#22)
### Fixed
- Git local config is now looked up. (#20)
- Git global config will not be polluted in test by default. (#20)
## [0.3.0] - 2024-12-02
### Added
- Add `status` subcommand to see storage and backup on given path or current working directory ([#17](https://github.com/qwjyh/xdbm/pull/17)).
### Changed
- Colored output for `storage list` and `backup list` ([#15](https://github.com/qwjyh/xdbm/pull/15))
- **BREAKING** Relative path is changed from `PathBuf` to `Vector<String>` for portability. This means that existing config files need to be changed.
## [0.2.1] - 2024-06-19
### Changed
- Dependencies are updated.
- Format of storage size printing has been changed due to the update of byte-unit.
### Fixed
- `libgit2-sys` was updated due to the security issue.
## [0.2.0] - 2024-05-21
### Changed
- Added CI on GitHub Actions (#10).
- Replaced `HashMap` with `BTreeMap` to produce cleaner diff (#11).
## [0.1.0] - 2024-03-18
### Added
@ -54,9 +15,4 @@
- `backup done` subcommand
- `completion` subcommand
[Unreleased]: https://github.com/qwjyh/xdbm/compare/v0.4.0...HEAD
[0.4.0]: https://github.com/qwjyh/xdbm/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/qwjyh/xdbm/compare/v0.2.1...v0.3.0
[0.2.1]: https://github.com/qwjyh/xdbm/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/qwjyh/xdbm/releases/tag/v0.2.0
[0.1.0]: https://github.com/qwjyh/xdbm/releases/tag/v0.1.0

1390
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,8 @@
[package]
name = "xdbm"
version = "0.4.0"
version = "0.1.0"
authors = ["qwjyh <urataw421@gmail.com>"]
edition = "2024"
edition = "2021"
description = "Cross device backup manager, which manages backups on several storages mounted on multiple devices."
readme = "README.md"
homepage = "https://github.com/qwjyh/xdbm"
@ -13,29 +13,25 @@ keywords = ["cli", "backup"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = { version = "4.5", features = ["cargo", "derive"] }
sysinfo = { version = "0.32", features = ["serde"] }
clap = { version = "4.4.0", features = ["cargo", "derive"] }
sysinfo = { version = "0.29.8", features = ["serde"] }
log = "0.4"
clap-verbosity-flag = "3.0"
clap_complete = "4.5"
chrono = { version = "0.4", features = ["serde"] }
env_logger = "0.11.5"
inquire = "0.7.5"
git2 = "0.19"
clap-verbosity-flag = "2.0.1"
clap_complete = "4.5.1"
chrono = { version = "0.4.35", features = ["serde"] }
env_logger = "0.10.0"
inquire = "0.6.2"
git2 = "0.17.2"
dirs = "5.0"
dunce = "1.0.5"
dunce = "1.0.4"
serde = { version = "1.0", features = ["derive"] }
serde_yaml = "0.9"
byte-unit = "5.1"
byte-unit = "4.0.19"
anyhow = "1.0"
pathdiff = "0.2.3"
unicode-width = "0.2.0"
console = "0.15"
pathdiff = "0.2.1"
unicode-width = "0.1.11"
[dev-dependencies]
assert_cmd = "2.0.16"
assert_fs = "1.1.2"
predicates = "3.1.2"
[features]
vendored-openssl = ["git2/vendored-openssl"]
assert_cmd = "2.0.14"
assert_fs = "1.1.1"
predicates = "3.1.0"

View file

@ -2,9 +2,6 @@
_Cross device backup manager_,
which manages backups on several storages mounted on multiple devices with a single repository.
## Install
- `git` is required for sync
## Usage
1. `xdbm init` to setup new device(i.e. PC).
2. `xdbm storage add` to add storages, or `xdbm storage bind` to make existing storages available on new device.
@ -27,7 +24,7 @@ which manages backups on several storages mounted on multiple devices with a sin
- [ ] write test for storage subcommand
- [x] storage add online
- [x] storage add directory
- [x] storage list
- [ ] storage list
- [x] update storage bind command
- [ ] add storage remove command
- [ ] add sync subcommand
@ -41,7 +38,7 @@ which manages backups on several storages mounted on multiple devices with a sin
- [x] backup list
- [x] status printing
- [x] backup done
- [x] fancy display
- [ ] fancy display
- [ ] json output
- [ ] no commit option

View file

@ -3,7 +3,7 @@
use core::panic;
use std::{
collections::BTreeMap,
collections::HashMap,
fs, io,
path::{Path, PathBuf},
};
@ -27,38 +27,32 @@ pub fn backups_file(device: &Device) -> PathBuf {
}
/// Targets for backup source or destination.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct BackupTarget {
/// `name()` of [`crate::storages::Storage`].
/// Use `String` for serialization/deserialization.
pub storage: String,
/// Relative path to the `storage`.
pub path: Vec<String>,
pub path: PathBuf,
}
impl BackupTarget {
pub fn new(storage_name: String, relative_path: PathBuf) -> Result<Self> {
let relative_path = relative_path
.components()
.map(|c| c.as_os_str().to_str().map(|s| s.to_owned()))
.collect::<Option<_>>()
.context("Path contains non-utf8 character")?;
Ok(BackupTarget {
pub fn new(storage_name: String, relative_path: PathBuf) -> Self {
BackupTarget {
storage: storage_name,
path: relative_path,
})
}
}
/// Get full path of the [`BackupTarget`].
pub fn path(&self, storages: &Storages, device: &Device) -> Option<PathBuf> {
pub fn path(&self, storages: &Storages, device: &Device) -> Result<PathBuf> {
let parent = storages.get(&self.storage).unwrap();
let parent_path = parent.mount_path(device)?;
Some(parent_path.join(self.path.clone().iter().collect::<PathBuf>()))
Ok(parent_path.join(self.path.clone()))
}
}
/// Type of backup commands.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub enum BackupCommand {
ExternallyInvoked(ExternallyInvoked),
}
@ -85,7 +79,7 @@ impl BackupCommandExt for BackupCommand {
/// Backup commands which is not invoked from xdbm itself.
/// Call xdbm externally to record backup datetime and status.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct ExternallyInvoked {
name: String,
pub note: String,
@ -108,7 +102,7 @@ impl BackupCommandExt for ExternallyInvoked {
}
/// Backup execution log.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct BackupLog {
pub datetime: DateTime<Local>,
status: BackupResult,
@ -128,7 +122,7 @@ impl BackupLog {
}
/// Result of backup.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub enum BackupResult {
Success,
Failure,
@ -145,7 +139,7 @@ impl BackupResult {
}
/// Backup source, destination, command and logs.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct Backup {
/// must be unique
name: String,
@ -180,7 +174,7 @@ impl Backup {
&self.name
}
pub fn device<'a>(&'a self, devices: &'a [Device]) -> Option<&'a Device> {
pub fn device<'a>(&'a self, devices: &'a [Device]) -> Option<&Device> {
devices.iter().find(|dev| dev.name() == self.device)
}
@ -206,16 +200,16 @@ impl Backup {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct Backups {
pub list: BTreeMap<String, Backup>,
pub list: HashMap<String, Backup>,
}
impl Backups {
/// Empty [`Backups`].
pub fn new() -> Backups {
Backups {
list: BTreeMap::new(),
list: HashMap::new(),
}
}

View file

@ -1,7 +1,7 @@
//! CLI arguments
use crate::PathBuf;
use crate::path;
use crate::PathBuf;
use clap::Args;
use clap::{Parser, Subcommand};
use clap_verbosity_flag::Verbosity;
@ -44,18 +44,6 @@ pub(crate) enum Commands {
#[command(subcommand)]
Backup(BackupSubCommands),
/// Print status for the given path.
Status {
/// Target path. Default is the current directory.
path: Option<PathBuf>,
/// Show storage which the path belongs to.
#[arg(short, long)]
storage: bool,
/// Show backup config covering the path.
#[arg(short, long)]
backup: bool,
},
/// Print config dir.
Path {},
@ -63,22 +51,15 @@ pub(crate) enum Commands {
Sync {
/// Remote name to sync.
remote_name: Option<String>,
/// Use custom git implementation.
#[arg(short, long)]
use_libgit2: bool,
/// Whether to use ssh-agent
#[arg(long)]
use_sshagent: bool,
/// Manually specify ssh key
#[arg(long)]
ssh_key: Option<PathBuf>,
},
/// Check config files validity.
Check {},
/// Generate completion script.
Completion { shell: clap_complete::Shell },
Completion {
shell: clap_complete::Shell,
}
}
#[derive(Args, Debug)]

View file

@ -1,12 +1,11 @@
use std::{
collections::BTreeMap,
collections::HashMap,
io::{self, stdout, Write},
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context, Ok, Result};
use chrono::Local;
use console::Style;
use dunce::canonicalize;
use git2::Repository;
use unicode_width::UnicodeWidthStr;
@ -89,8 +88,8 @@ fn new_backup(
Ok(Backup::new(
name,
device.name(),
src_target?,
dest_target?,
src_target,
dest_target,
command,
))
}
@ -104,14 +103,14 @@ pub fn cmd_backup_list(
storages: &Storages,
) -> Result<()> {
let devices = devices::get_devices(config_dir)?;
let backups: BTreeMap<(String, String), Backup> = match device_name {
let backups: HashMap<(String, String), Backup> = match device_name {
Some(device_name) => {
let device = devices
.iter()
.find(|dev| dev.name() == device_name)
.context(format!("Device with name {} doesn't exist", device_name))?;
let backups = Backups::read(config_dir, device)?;
let mut allbackups = BTreeMap::new();
let mut allbackups = HashMap::new();
for (name, backup) in backups.list {
if allbackups.insert((device.name(), name), backup).is_some() {
return Err(anyhow!("unexpected duplication in backups hashmap"));
@ -120,7 +119,7 @@ pub fn cmd_backup_list(
allbackups
}
None => {
let mut allbackups = BTreeMap::new();
let mut allbackups = HashMap::new();
for device in &devices {
let backups = Backups::read(config_dir, device)?;
for (name, backup) in backups.list {
@ -133,7 +132,7 @@ pub fn cmd_backup_list(
}
};
// source/destination filtering
let backups: BTreeMap<(String, String), Backup> = backups
let backups: HashMap<(String, String), Backup> = backups
.into_iter()
.filter(|((_dev, _name), backup)| {
let src_matched = match &src_storage {
@ -157,7 +156,7 @@ pub fn cmd_backup_list(
/// TODO: status printing
fn write_backups_list(
mut writer: impl io::Write,
backups: BTreeMap<(String, String), Backup>,
backups: HashMap<(String, String), Backup>,
longprint: bool,
storages: &Storages,
devices: &[Device],
@ -177,16 +176,10 @@ fn write_backups_list(
))?;
name_width = name_width.max(backup.name().width());
dev_width = dev_width.max(dev.width());
let src = backup
.source()
.path(storages, device)
.context("Couldn't get path for source")?;
let src = backup.source().path(storages, device)?;
src_width = src_width.max(format!("{}", src.display()).width());
src_storage_width = src_storage_width.max(backup.source().storage.width());
let dest = backup
.destination()
.path(storages, device)
.context("Couldn't get path for destination")?;
let dest = backup.destination().path(storages, device)?;
dest_width = dest_width.max(format!("{}", dest.display()).width());
dest_storage_width = dest_storage_width.max(backup.destination().storage.width());
let cmd_name = backup.command().name();
@ -195,77 +188,39 @@ fn write_backups_list(
// main printing
for ((dev, _name), backup) in &backups {
let device = backup.device(devices).context(format!(
"Couldn't find the device specified in the backup config: {}",
"Couldn't find device specified in backup config {}",
backup.name()
))?;
let src = backup
.source()
.path(storages, device)
.context("Couldn't get path for source")?;
let dest = backup
.destination()
.path(storages, device)
.context("Couldn't get path for destination")?;
let src = backup.source().path(storages, device)?;
let dest = backup.destination().path(storages, device)?;
let cmd_name = backup.command().name();
let (last_backup_elapsed, style_on_time_elapsed) = match backup.last_backup() {
let last_backup_elapsed = match backup.last_backup() {
Some(log) => {
let time = Local::now() - log.datetime;
let s = util::format_summarized_duration(time);
let style = util::duration_style(time);
(style.apply_to(s), style)
}
None => {
let style = Style::new().red();
(style.apply_to("---".to_string()), style)
util::format_summarized_duration(time)
}
None => "---".to_string(),
};
if !longprint {
writeln!(
writer,
"{name:<name_width$} [{dev:<dev_width$}] {src:<src_storage_width$} → {dest:<dest_storage_width$} {last_backup_elapsed}",
name = style_on_time_elapsed.apply_to(backup.name()),
dev = console::style(dev).blue(),
src = backup.source().storage,
dest = backup.destination().storage,
)?;
} else {
writeln!(
writer,
"[{dev:<dev_width$}] {name:<name_width$} {last_backup_elapsed}",
dev = console::style(dev).blue(),
name = style_on_time_elapsed.bold().apply_to(backup.name()),
)?;
let last_backup_date = match backup.last_backup() {
Some(date) => date.datetime.format("%Y-%m-%d %T").to_string(),
None => "never".to_string(),
};
writeln!(
writer,
"{name:<name_width$} [{dev:<dev_width$}] {src:<src_storage_width$} → {dest:<dest_storage_width$} {last_backup_elapsed}",
name = backup.name(),
src = backup.source().storage,
dest = backup.destination().storage,
)?;
if longprint {
let cmd_note = backup.command().note();
writeln!(writer, " src : {src:<src_width$}", src = src.display())?;
writeln!(
writer,
"{s_src} {src}",
s_src = console::style("src :").italic().bright().black(),
src = src.display()
)?;
writeln!(
writer,
"{s_dest} {dest}",
s_dest = console::style("dest:").italic().bright().black(),
" dest: {dest:<dest_width$}",
dest = dest.display()
)?;
writeln!(
writer,
"{s_last} {last}",
s_last = console::style("last:").italic().bright().black(),
last = last_backup_date,
" {cmd_name:<cmd_name_width$}({note})",
note = cmd_note,
)?;
writeln!(
writer,
"{s_cmd} {cmd_name}({note})",
s_cmd = console::style("cmd :").italic().bright().black(),
cmd_name = console::style(cmd_name).underlined(),
note = console::style(cmd_note).italic(),
)?;
writeln!(writer)?;
}
}
Ok(())
@ -361,9 +316,9 @@ mod test {
&storages,
)?;
assert!(backup.source().storage == "online");
assert_eq!(backup.source().path, vec!["docs"]);
assert_eq!(backup.source().path, PathBuf::from("docs"));
assert!(backup.destination().storage == "online");
assert!(backup.destination().path == vec!["tmp"]);
assert!(backup.destination().path == PathBuf::from("tmp"));
Ok(())
}
}

View file

@ -2,13 +2,11 @@
//! Initialize xdbm for the device.
use crate::backups::Backups;
use crate::storages::{STORAGESFILE, Storages};
use crate::storages::{Storages, STORAGESFILE};
use crate::{
DEVICESFILE, Device, add_and_commit, backups,
devices::{get_devices, write_devices},
full_status,
add_and_commit, backups, full_status, get_devices, write_devices, Device, DEVICESFILE,
};
use anyhow::{Context, Ok, Result, anyhow};
use anyhow::{anyhow, Context, Ok, Result};
use core::panic;
use git2::{Cred, RemoteCallbacks, Repository};
use inquire::Password;
@ -42,16 +40,20 @@ fn clone_repo(
}
};
Cred::ssh_key(
username_from_url.ok_or(git2::Error::from_str("No username found from the url"))?,
username_from_url
.context("No username found from the url")
.unwrap(),
None,
key as &Path,
&key as &Path,
passwd.as_deref(),
)
} else if use_sshagent {
// use ssh agent
info!("Using ssh agent to access the repository");
Cred::ssh_key_from_agent(
username_from_url.ok_or(git2::Error::from_str("No username found from the url"))?,
username_from_url
.context("No username found from the url")
.unwrap(),
)
} else {
error!("no ssh_key and use_sshagent");

View file

@ -1,298 +0,0 @@
use anyhow::{Context, Result};
use chrono::Local;
use console::Style;
use std::{
env,
path::{self, Path, PathBuf},
};
use crate::{
backups::{Backup, Backups},
devices::{self, Device},
storages::{self, Storage, StorageExt, Storages},
util,
};
// TODO: fine styling like `backup list`, or should I just use the same style?
pub(crate) fn cmd_status(
path: Option<PathBuf>,
show_storage: bool,
show_backup: bool,
config_dir: &Path,
) -> Result<()> {
let path = path.unwrap_or(env::current_dir().context("Failed to get current directory.")?);
let current_device = devices::get_device(config_dir)?;
if show_storage {
let storages = storages::Storages::read(config_dir)?;
let storage = util::min_parent_storage(&path, &storages, &current_device);
trace!("storage {:?}", storage);
// TODO: recursively trace all storages for subdirectory?
match storage {
Some(storage) => {
println!("Storage: {}", storage.0.name())
}
None => {
println!("Storage: None");
}
}
}
if show_backup {
let devices = devices::get_devices(config_dir)?;
let storages = storages::Storages::read(config_dir)?;
let backups = devices.iter().map(|device| {
Backups::read(config_dir, device)
.context("Backups were not found")
.unwrap()
});
let (target_storage, target_diff_from_storage) =
util::min_parent_storage(&path, &storages, &current_device)
.context("Target path is not covered in any storage")?;
let covering_backup: Vec<_> = devices
.iter()
.zip(backups)
.map(|(device, backups)| {
debug!(
"dev {}, storage {:?}",
device.name(),
backups
.list
.iter()
.map(|(backup_name, backup)| format!(
"{} {}",
backup_name,
backup.source().storage
))
.collect::<Vec<_>>()
);
(
device,
parent_backups(
&target_diff_from_storage,
target_storage,
backups,
&storages,
device,
),
)
})
.collect();
trace!("{:?}", covering_backup.first());
let name_len = &covering_backup
.iter()
.map(|(_, backups)| {
backups
.iter()
.map(|(backup, _path)| backup.name().len())
.max()
.unwrap_or(0)
})
.max()
.unwrap_or(5);
for (backup_device, covering_backups) in covering_backup {
if covering_backups.is_empty() {
continue;
}
println!("Device: {}", backup_device.name());
for (backup, path_from_backup) in covering_backups {
let (last_backup, style) = match backup.last_backup() {
Some(log) => {
let timediff = Local::now() - log.datetime;
(
util::format_summarized_duration(timediff),
util::duration_style(timediff),
)
}
None => ("---".to_string(), Style::new().red()),
};
println!(
" {:<name_len$} {} {}",
console::style(backup.name()).bold(),
style.apply_to(last_backup),
path_from_backup.display(),
);
}
}
}
Ok(())
}
/// Get [`Backup`]s for `device` which covers `target_path`.
/// Returns [`Vec`] of tuple of [`Backup`] and relative path from the backup root.
fn parent_backups<'a>(
target_path_from_storage: &'a Path,
target_storage: &'a Storage,
backups: Backups,
storages: &'a Storages,
device: &'a Device,
) -> Vec<(Backup, PathBuf)> {
trace!("Dev {:?}", device.name());
let target_path = match target_storage.mount_path(device) {
Some(target_path) => target_path.join(target_path_from_storage),
None => return vec![],
};
trace!("Path on the device {:?}", target_path);
backups
.list
.into_iter()
.filter_map(|(_k, backup)| {
let backup_path = backup.source().path(storages, device)?;
trace!("{:?}", backup_path.components());
let diff = pathdiff::diff_paths(&target_path, backup_path.clone())?;
trace!("Backup: {:?}, Diff: {:?}", backup_path, diff);
// note: Should `RootDir` is included in this list?
if diff
.components()
.any(|c| matches!(c, path::Component::ParentDir | path::Component::Prefix(_)))
{
None
} else {
Some((backup, diff))
}
})
.collect()
}
#[cfg(test)]
mod test {
use std::{path::PathBuf, vec};
use crate::{
backups::{self, ExternallyInvoked},
devices,
storages::{self, online_storage::OnlineStorage, StorageExt},
util,
};
use super::parent_backups;
#[test]
fn test_parent_backups() {
let device1 = devices::Device::new("device_1".to_string());
let mut storage1 = storages::Storage::Online(OnlineStorage::new(
"storage_1".to_string(),
"smb".to_string(),
1_000_000,
"str1".to_string(),
PathBuf::from("/home/foo/"),
&device1,
));
let storage2 = storages::Storage::Online(OnlineStorage::new(
"storage_2".to_string(),
"smb".to_string(),
1_000_000_000,
"str2".to_string(),
PathBuf::from("/"),
&device1,
));
let device2 = devices::Device::new("device_2".to_string());
storage1
.bound_on_device("alias".to_string(), PathBuf::from("/mnt/dev"), &device2)
.unwrap();
let storage3 = storages::Storage::Online(OnlineStorage::new(
"storage_3".to_string(),
"smb".to_string(),
2_000_000_000,
"str2".to_string(),
PathBuf::from("/"),
&device2,
));
let storages = {
let mut storages = storages::Storages::new();
storages.add(storage1).unwrap();
storages.add(storage2).unwrap();
storages.add(storage3).unwrap();
storages
};
let backup1 = backups::Backup::new(
"backup_1".to_string(),
device1.name().to_string(),
backups::BackupTarget {
storage: "storage_1".to_string(),
path: vec!["bar".to_string()],
},
backups::BackupTarget {
storage: "storage_1".to_string(),
path: vec!["hoge".to_string()],
},
backups::BackupCommand::ExternallyInvoked(ExternallyInvoked::new(
"cmd".to_string(),
"".to_string(),
)),
);
let backup2 = backups::Backup::new(
"backup_2".to_string(),
device2.name().to_string(),
backups::BackupTarget {
storage: "storage_1".to_string(),
path: vec!["".to_string()],
},
backups::BackupTarget {
storage: "storage_3".to_string(),
path: vec!["foo".to_string()],
},
backups::BackupCommand::ExternallyInvoked(ExternallyInvoked::new(
"cmd".to_string(),
"".to_string(),
)),
);
let backups = {
let mut backups = backups::Backups::new();
backups.add(backup1).unwrap();
backups.add(backup2).unwrap();
backups
};
let target_path1 = PathBuf::from("/home/foo/bar/hoo");
let (target_storage1, target_path_from_storage1) =
util::min_parent_storage(&target_path1, &storages, &device1)
.expect("Failed to get storage");
let covering_backups_1 = parent_backups(
&target_path_from_storage1,
target_storage1,
backups.clone(),
&storages,
&device1,
);
assert_eq!(covering_backups_1.len(), 2);
let target_path2 = PathBuf::from("/mnt/");
let (target_storage2, target_path_from_storage2) =
util::min_parent_storage(&target_path2, &storages, &device2)
.expect("Failed to get storage");
let covering_backups_2 = parent_backups(
&target_path_from_storage2,
target_storage2,
backups.clone(),
&storages,
&device2,
);
assert_eq!(covering_backups_2.len(), 0);
let target_path3 = PathBuf::from("/mnt/dev/foo");
let (target_storage3, target_path_from_storage3) =
util::min_parent_storage(&target_path3, &storages, &device2)
.expect("Failed to get storage");
let covering_backups_3 = parent_backups(
&target_path_from_storage3,
target_storage3,
backups,
&storages,
&device2,
);
assert_eq!(covering_backups_3.len(), 1);
let mut covering_backup_names_3 =
covering_backups_3.iter().map(|(backup, _)| backup.name());
assert_eq!(covering_backup_names_3.next().unwrap(), "backup_2");
assert!(covering_backup_names_3.next().is_none());
}
}

View file

@ -6,8 +6,7 @@ use std::{
};
use anyhow::{anyhow, Context, Result};
use byte_unit::{Byte, UnitType};
use console::style;
use byte_unit::Byte;
use dunce::canonicalize;
use git2::Repository;
use inquire::{Confirm, CustomType, Text};
@ -198,11 +197,10 @@ fn write_storages_list(
trace!("name widths: {}", name_width);
for storage in storages.list.values() {
let size_str = match storage.capacity() {
Some(b) => {
let size = Byte::from_u64(b).get_appropriate_unit(UnitType::Binary);
// TODO: split case for 500GB and 1.5TB?
format!("{:>+5.1}", size)
}
Some(b) => Byte::from_bytes(b.into())
.get_appropriate_unit(true)
.format(0)
.to_string(),
None => "".to_string(),
};
let isremovable = if let Storage::Physical(s) = storage {
@ -212,11 +210,11 @@ fn write_storages_list(
"-"
}
} else {
""
" "
};
let path = storage.mount_path(device).map_or_else(
|| {
info!("Mount path not found");
|e| {
info!("Not found: {}", e);
"".to_string()
},
|v| v.display().to_string(),
@ -228,24 +226,23 @@ fn write_storages_list(
} else {
""
};
let typestyle = storage.typestyle();
writeln!(
writer,
"{stype}{isremovable:<1}: {name:<name_width$} {size:>10} {parent:<name_width$} {path}",
stype = typestyle.apply_to(storage.shorttypename()),
"{stype}{isremovable}: {name:<name_width$} {size:>8} {parent:<name_width$} {path}",
stype = storage.shorttypename(),
isremovable = isremovable,
name = typestyle.apply_to(storage.name()),
name = storage.name(),
size = size_str,
parent = console::style(parent_name).bright().black(),
parent = parent_name,
path = path,
)?;
if long_display {
let note = match storage {
Storage::Physical(s) => format!("kind: {}", s.kind()),
Storage::SubDirectory(s) => s.notes.clone(),
Storage::Online(s) => s.provider.clone(),
Storage::Physical(s) => s.kind(),
Storage::SubDirectory(s) => &s.notes,
Storage::Online(s) => &s.provider,
};
writeln!(writer, " {}", style(note).italic())?;
writeln!(writer, " {}", note)?;
}
}
Ok(())

View file

@ -1,79 +1,10 @@
use std::{
io::{self, Write},
path::{Path, PathBuf},
process,
};
use std::path::PathBuf;
use anyhow::{Context, Result, anyhow};
use git2::{Cred, FetchOptions, PushOptions, RemoteCallbacks, Repository, build::CheckoutBuilder};
use anyhow::{anyhow, Result};
use git2::Repository;
pub(crate) fn cmd_sync(
config_dir: &PathBuf,
remote_name: Option<String>,
use_sshagent: bool,
ssh_key: Option<PathBuf>,
use_libgit2: bool,
) -> Result<()> {
if use_libgit2 {
cmd_sync_custom(config_dir, remote_name, use_sshagent, ssh_key)
} else {
cmd_sync_cl(config_dir, remote_name, ssh_key)
}
}
fn cmd_sync_cl(
config_dir: &PathBuf,
remote_name: Option<String>,
ssh_key: Option<PathBuf>,
) -> Result<()> {
info!("cmd_sync (command line version)");
trace!("pull");
let args = |cmd| {
let mut args = vec![cmd];
if let Some(ref remote_name) = remote_name {
args.push(remote_name.clone());
}
if let Some(ref ssh_key) = ssh_key {
args.push("-i".to_string());
args.push(ssh_key.to_str().unwrap().to_owned());
}
args
};
let git_pull_result = process::Command::new("git")
.args(args("pull".to_owned()))
.current_dir(config_dir)
.status()
.context("error while executing git pull")?
.success();
if git_pull_result {
eprintln!("git pull completed");
} else {
return Err(anyhow!("failed to complete git pull"));
}
trace!("push");
let git_push_result = process::Command::new("git")
.args(args("push".to_owned()))
.current_dir(config_dir)
.status()
.context("error while executing git push")?
.success();
if git_push_result {
eprintln!("git push completed");
} else {
return Err(anyhow!("failed to complete git push"));
}
Ok(())
}
fn cmd_sync_custom(
config_dir: &PathBuf,
remote_name: Option<String>,
use_sshagent: bool,
ssh_key: Option<PathBuf>,
) -> Result<()> {
info!("cmd_sync");
pub(crate) fn cmd_sync(config_dir: &PathBuf, remote_name: Option<String>) -> Result<()> {
warn!("Experimental");
let repo = Repository::open(config_dir)?;
let remote_name = match remote_name {
Some(remote_name) => remote_name,
@ -85,252 +16,7 @@ fn cmd_sync_custom(
remotes.get(0).unwrap().to_string()
}
};
debug!("resolved remote name: {remote_name}");
let mut remote = repo.find_remote(&remote_name)?;
pull(
&repo,
&mut remote,
remote_name,
&use_sshagent,
ssh_key.as_ref(),
)?;
push(&repo, &mut remote, &use_sshagent, ssh_key.as_ref())?;
Ok(())
}
fn remote_callback<'b, 'a>(
use_sshagent: &'a bool,
ssh_key: Option<&'a PathBuf>,
) -> RemoteCallbacks<'a>
where
'b: 'a,
{
// using credentials
let mut callbacks = RemoteCallbacks::new();
callbacks
.credentials(move |_url, username_from_url, _allowed_types| {
if let Some(key) = ssh_key {
info!("Using provided ssh key to access the repository");
let passwd = match inquire::Password::new("SSH passphrase").prompt() {
std::result::Result::Ok(s) => Some(s),
Err(err) => {
error!("Failed to get ssh passphrase: {:?}", err);
None
}
};
Cred::ssh_key(
username_from_url
.ok_or(git2::Error::from_str("No username found from the url"))?,
None,
key as &Path,
passwd.as_deref(),
)
} else if *use_sshagent {
// use ssh agent
info!("Using ssh agent to access the repository");
Cred::ssh_key_from_agent(
username_from_url
.ok_or(git2::Error::from_str("No username found from the url"))?,
)
} else {
error!("no ssh_key and use_sshagent");
panic!("This option must be unreachable.")
}
})
.transfer_progress(|progress| {
if progress.received_objects() == progress.total_objects() {
print!(
"\rResolving deltas {}/{}",
progress.indexed_deltas(),
progress.total_deltas()
);
} else {
print!(
"\rReceived {}/{} objects ({}) in {} bytes",
progress.received_objects(),
progress.total_objects(),
progress.indexed_objects(),
progress.received_bytes(),
);
}
io::stderr().flush().unwrap();
true
})
.sideband_progress(|text| {
let msg = String::from_utf8_lossy(text);
eprintln!("remote: {msg}");
true
})
.push_transfer_progress(|current, total, bytes| {
trace!("{current}/{total} files sent \t{bytes} bytes");
})
.push_update_reference(|reference_name, status_msg| {
debug!("remote reference_name {reference_name}");
match status_msg {
None => {
info!("successfully pushed");
eprintln!("successfully pushed to {}", reference_name);
Ok(())
}
Some(status) => {
error!("failed to push: {}", status);
Err(git2::Error::from_str(&format!(
"failed to push to {}: {}",
reference_name, status
)))
}
}
});
callbacks
}
fn pull(
repo: &Repository,
remote: &mut git2::Remote,
remote_name: String,
use_sshagent: &bool,
ssh_key: Option<&PathBuf>,
) -> Result<()> {
debug!("pull");
let callbacks = remote_callback(use_sshagent, ssh_key);
let mut fetchoptions = FetchOptions::new();
fetchoptions.remote_callbacks(callbacks);
let fetch_refspec: Vec<String> = remote
.refspecs()
.filter_map(|rs| match rs.direction() {
git2::Direction::Fetch => rs.str().map(|s| s.to_string()),
git2::Direction::Push => None,
})
.collect();
remote
.fetch(&fetch_refspec, Some(&mut fetchoptions), None)
.context("Failed to fetch (pull)")?;
let stats = remote.stats();
if stats.local_objects() > 0 {
eprintln!(
"\rReceived {}/{} objects in {} bytes (used {} local objects)",
stats.indexed_objects(),
stats.total_objects(),
stats.received_bytes(),
stats.local_objects(),
);
} else {
eprintln!(
"\rReceived {}/{} objects in {} bytes",
stats.indexed_objects(),
stats.total_objects(),
stats.received_bytes(),
);
}
let fetch_head = repo
.reference_to_annotated_commit(
&repo
.resolve_reference_from_short_name(&remote_name)
.context("failed to get reference from fetch refspec")?,
)
.context("failed to get annotated commit")?;
let (merge_analysis, merge_preference) = repo
.merge_analysis(&[&fetch_head])
.context("failed to do merge_analysis")?;
trace!("merge analysis: {:?}", merge_analysis);
trace!("merge preference: {:?}", merge_preference);
match merge_analysis {
ma if ma.is_up_to_date() => {
info!("HEAD is up to date. skip merging");
}
ma if ma.is_fast_forward() => {
// https://github.com/rust-lang/git2-rs/blob/master/examples/pull.rs
info!("fast forward is available");
let mut ref_remote = repo
.find_reference(
remote
.default_branch()
.context("failed to get remote default branch")?
.as_str()
.unwrap(),
)
.context("failed to get remote reference")?;
let name = match ref_remote.name() {
Some(s) => s.to_string(),
None => String::from_utf8_lossy(ref_remote.name_bytes()).to_string(),
};
let msg = format!("Fast-Forward: Setting {} to id: {}", name, fetch_head.id());
eprintln!("{}", msg);
ref_remote
.set_target(fetch_head.id(), &msg)
.context("failed to set target")?;
repo.checkout_head(Some(CheckoutBuilder::default().force()))
.context("failed to checkout")?;
}
ma if ma.is_unborn() => {
warn!("HEAD is invalid (unborn)");
return Err(anyhow!(
"HEAD is invalid: merge_analysis: {:?}",
merge_analysis
));
}
ma if ma.is_none() => {
error!("no merge is possible");
return Err(anyhow!("no merge is possible"));
}
ma if ma.is_normal() => {
error!("unable to fast-forward. manual merge is required");
return Err(anyhow!("unable to fast-forward. manual merge is required"));
}
_ma => {
error!(
"this code must not reachable: merge_analysis {:?}",
merge_analysis
);
return Err(anyhow!("must not be reachabel (uncovered merge_analysis)"));
}
}
Ok(())
}
fn push(
repo: &Repository,
remote: &mut git2::Remote,
use_sshagent: &bool,
ssh_key: Option<&PathBuf>,
) -> Result<()> {
debug!("push");
let callbacks = remote_callback(use_sshagent, ssh_key);
let mut push_options = PushOptions::new();
push_options.remote_callbacks(callbacks);
let num_push_refspecs = remote
.refspecs()
.filter(|rs| rs.direction() == git2::Direction::Push)
.count();
if num_push_refspecs > 1 {
warn!("more than one push refspecs are configured");
warn!("using the first one");
}
let head = repo.head().context("Failed to get HEAD")?;
if num_push_refspecs >= 1 {
trace!("using push refspec");
let push_refspec = remote
.refspecs()
.filter_map(|rs| match rs.direction() {
git2::Direction::Fetch => None,
git2::Direction::Push => Some(rs),
})
.next()
.expect("this must be unreachabe")
.str()
.context("failed to get valid utf8 push refspec")?
.to_string();
remote.push(&[push_refspec.as_str()] as &[&str], Some(&mut push_options))?;
} else {
trace!("using head as push refspec");
trace!("head is branch: {:?}", head.is_branch());
trace!("head is remote: {:?}", head.is_remote());
let push_refspec = head.name().context("failed to get head name")?;
remote.push(&[push_refspec] as &[&str], Some(&mut push_options))?;
};
remote.push(&[] as &[&str], None)?;
Ok(())
}

View file

@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
use std::fs::{File, OpenOptions};
use std::io::{BufRead, BufReader, BufWriter};
use std::path::Path;
use sysinfo::System;
use sysinfo::{System, SystemExt};
/// YAML file to store known devices.
pub const DEVICESFILE: &str = "devices.yml";
@ -25,17 +25,18 @@ impl Device {
/// Create new `Device` of name `name`. Additional data is obtained via sysinfo.
/// Filling fields which one failed to get is filled with "unknown".
pub fn new(name: String) -> Device {
let sys = System::new();
Device {
name,
os_name: System::name().unwrap_or_else(|| {
os_name: sys.name().unwrap_or_else(|| {
warn!("Failed to get OS name. Saving as \"unknown\".");
"unknown".to_string()
}),
os_version: System::os_version().unwrap_or_else(|| {
os_version: sys.os_version().unwrap_or_else(|| {
warn!("Failed to get OS version. Saving as \"unknown\".");
"unknown".to_string()
}),
hostname: System::host_name().unwrap_or_else(|| {
hostname: sys.host_name().unwrap_or_else(|| {
warn!("Failed to get hostname. Saving as \"unknown\".");
"unknown".to_string()
}),
@ -88,7 +89,6 @@ pub fn write_devices(config_dir: &Path, devices: Vec<Device>) -> Result<()> {
trace!("write_devices");
let f = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(config_dir.join(DEVICESFILE))?;
let writer = BufWriter::new(f);

View file

@ -1,40 +0,0 @@
use std::path::{Path, PathBuf};
use git2::{Cred, RemoteCallbacks};
use inquire::Password;
pub(crate) fn get_credential<'a>(
use_sshagent: bool,
ssh_key: Option<PathBuf>,
) -> RemoteCallbacks<'a> {
// using credentials
let mut callbacks = RemoteCallbacks::new();
callbacks.credentials(move |_url, username_from_url, _allowed_types| {
if let Some(key) = &ssh_key {
info!("Using provided ssh key to access the repository");
let passwd = match Password::new("SSH passphrase").prompt() {
std::result::Result::Ok(s) => Some(s),
Err(err) => {
error!("Failed to get ssh passphrase: {:?}", err);
None
}
};
Cred::ssh_key(
username_from_url.ok_or(git2::Error::from_str("No username found from the url"))?,
None,
key as &Path,
passwd.as_deref(),
)
} else if use_sshagent {
// use ssh agent
info!("Using ssh agent to access the repository");
Cred::ssh_key_from_agent(
username_from_url.ok_or(git2::Error::from_str("No username found from the url"))?,
)
} else {
error!("no ssh_key and use_sshagent");
panic!("This option must be unreachable.")
}
});
callbacks
}

View file

@ -17,7 +17,7 @@ impl FilePathCompleter {
return Ok(());
}
input.clone_into(&mut self.input);
self.input = input.to_owned();
self.paths.clear();
let input_path = std::path::PathBuf::from(input);

View file

@ -23,7 +23,7 @@ use std::path::{self, PathBuf};
use storages::Storages;
use crate::cmd_args::{BackupSubCommands, Cli, Commands, StorageCommands};
use devices::{DEVICESFILE, Device};
use devices::{Device, DEVICESFILE, *};
mod backups;
mod cmd_args;
@ -31,11 +31,9 @@ mod cmd_backup;
mod cmd_check;
mod cmd_completion;
mod cmd_init;
mod cmd_status;
mod cmd_storage;
mod cmd_sync;
mod devices;
mod git;
mod inquire_filepath_completer;
mod storages;
mod util;
@ -92,17 +90,7 @@ fn main() -> Result<()> {
Commands::Path {} => {
println!("{}", &config_dir.display());
}
Commands::Sync {
remote_name,
use_libgit2,
use_sshagent,
ssh_key,
} => cmd_sync::cmd_sync(&config_dir, remote_name, use_sshagent, ssh_key, use_libgit2)?,
Commands::Status {
path,
storage,
backup,
} => cmd_status::cmd_status(path, storage, backup, &config_dir)?,
Commands::Sync { remote_name } => cmd_sync::cmd_sync(&config_dir, remote_name)?,
Commands::Check {} => cmd_check::cmd_check(&config_dir)?,
Commands::Backup(backup) => {
trace!("backup subcommand with args: {:?}", backup);
@ -162,7 +150,7 @@ fn add_and_commit(repo: &Repository, path: &Path, message: &str) -> Result<Oid,
index.write()?;
let oid = index.write_tree()?;
let tree = repo.find_tree(oid)?;
let config = repo.config()?;
let config = git2::Config::open_default()?;
let signature = git2::Signature::now(
config.get_entry("user.name")?.value().unwrap(),
config.get_entry("user.email")?.value().unwrap(),

View file

@ -7,10 +7,9 @@ use crate::storages::{
};
use anyhow::{anyhow, Context, Result};
use clap::ValueEnum;
use console::Style;
use core::panic;
use serde::{Deserialize, Serialize};
use std::{collections::BTreeMap, fmt, fs, io, path};
use std::{collections::HashMap, fmt, fs, io, path, u64};
/// YAML file to store known storages..
pub const STORAGESFILE: &str = "storages.yml";
@ -51,14 +50,6 @@ impl Storage {
Self::Online(_) => "O",
}
}
pub fn typestyle(&self) -> Style {
match self {
Storage::Physical(_) => Style::new().cyan(),
Storage::SubDirectory(_) => Style::new().yellow(),
Storage::Online(_) => Style::new().green(),
}
}
}
impl StorageExt for Storage {
@ -78,7 +69,7 @@ impl StorageExt for Storage {
}
}
fn mount_path(&self, device: &devices::Device) -> Option<path::PathBuf> {
fn mount_path(&self, device: &devices::Device) -> Result<path::PathBuf> {
match self {
Self::Physical(s) => s.mount_path(device),
Self::SubDirectory(s) => s.mount_path(device),
@ -144,8 +135,8 @@ pub trait StorageExt {
fn local_info(&self, device: &devices::Device) -> Option<&local_info::LocalInfo>;
/// Get mount path of `self` on `device`.
/// Return [`None`] if the storage([`self`]) is not configured for the `device`.
fn mount_path(&self, device: &devices::Device) -> Option<path::PathBuf>;
/// `storages` is a `HashMap` with key of storage name and value of the storage.
fn mount_path(&self, device: &devices::Device) -> Result<path::PathBuf>;
/// Add local info of `device` to `self`.
fn bound_on_device(
@ -156,7 +147,7 @@ pub trait StorageExt {
) -> Result<()>;
/// Get parent
fn parent<'a>(&'a self, storages: &'a Storages) -> Option<&'a Storage>;
fn parent<'a>(&'a self, storages: &'a Storages) -> Option<&Storage>;
}
pub mod directory;
@ -166,14 +157,14 @@ pub mod physical_drive_partition;
#[derive(Debug, Serialize, Deserialize)]
pub struct Storages {
pub list: BTreeMap<String, Storage>,
pub list: HashMap<String, Storage>,
}
impl Storages {
/// Construct empty [`Storages`]
pub fn new() -> Storages {
Storages {
list: BTreeMap::new(),
list: HashMap::new(),
}
}

View file

@ -2,8 +2,7 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::{collections::BTreeMap, fmt, path};
use std::{collections::HashMap, fmt, path};
use crate::devices;
use crate::util;
@ -18,10 +17,10 @@ pub struct Directory {
/// ID of parent storage.
parent: String,
/// Relative path to the parent storage.
relative_path: Vec<String>,
relative_path: path::PathBuf,
pub notes: String,
/// [`devices::Device`] name and localinfo pairs.
local_infos: BTreeMap<String, LocalInfo>,
/// Device and localinfo pairs.
local_infos: HashMap<String, LocalInfo>,
}
impl Directory {
@ -34,20 +33,15 @@ impl Directory {
parent: String,
relative_path: path::PathBuf,
notes: String,
local_infos: BTreeMap<String, LocalInfo>,
) -> Result<Directory> {
let relative_path = relative_path
.components()
.map(|c| c.as_os_str().to_str().map(|s| s.to_owned()))
.collect::<Option<Vec<_>>>()
.context("Path contains non-utf8 character")?;
Ok(Directory {
local_infos: HashMap<String, LocalInfo>,
) -> Directory {
Directory {
name,
parent,
relative_path,
notes,
local_infos,
})
}
}
pub fn try_from_device_path(
@ -62,23 +56,23 @@ impl Directory {
.context("Failed to compare diff of paths")?;
trace!("Selected parent: {}", parent.name());
let local_info = LocalInfo::new(alias, path);
Directory::new(
Ok(Directory::new(
name,
parent.name().to_string(),
diff_path,
notes,
BTreeMap::from([(device.name(), local_info)]),
)
HashMap::from([(device.name(), local_info)]),
))
}
pub fn update_note(self, notes: String) -> Directory {
Directory {
name: self.name,
parent: self.parent,
relative_path: self.relative_path,
Directory::new(
self.name,
self.parent,
self.relative_path,
notes,
local_infos: self.local_infos,
}
self.local_infos,
)
}
/// Resolve mount path of directory with current device.
@ -86,9 +80,8 @@ impl Directory {
let parent_mount_path = self
.parent(storages)
.context("Can't find parent storage")?
.mount_path(device)
.context("Can't find mount path")?;
Ok(parent_mount_path.join(self.relative_path.clone().iter().collect::<PathBuf>()))
.mount_path(device)?;
Ok(parent_mount_path.join(self.relative_path.clone()))
}
}
@ -105,10 +98,12 @@ impl StorageExt for Directory {
self.local_infos.get(&device.name())
}
fn mount_path(&self, device: &devices::Device) -> Option<std::path::PathBuf> {
self.local_infos
fn mount_path(&self, device: &devices::Device) -> Result<path::PathBuf> {
Ok(self
.local_infos
.get(&device.name())
.map(|info| info.mount_path())
.context(format!("LocalInfo for storage: {} not found", &self.name()))?
.mount_path())
}
/// This method doesn't use `mount_path`.
@ -127,7 +122,7 @@ impl StorageExt for Directory {
}
// Get parent `&Storage` of directory.
fn parent<'a>(&'a self, storages: &'a Storages) -> Option<&'a Storage> {
fn parent<'a>(&'a self, storages: &'a Storages) -> Option<&Storage> {
storages.get(&self.parent)
}
}
@ -139,7 +134,7 @@ impl fmt::Display for Directory {
"S {name:<10} < {parent:<10}{relative_path:<10} : {notes}",
name = self.name(),
parent = self.parent,
relative_path = self.relative_path.iter().collect::<PathBuf>().display(),
relative_path = self.relative_path.display(),
notes = self.notes,
)
}
@ -147,7 +142,7 @@ impl fmt::Display for Directory {
#[cfg(test)]
mod test {
use std::{collections::BTreeMap, path::PathBuf};
use std::{collections::HashMap, path::PathBuf};
use crate::{
devices::Device,
@ -166,7 +161,7 @@ mod test {
let local_info_dir =
LocalInfo::new("dir_alias".to_string(), PathBuf::from("/mnt/sample/subdir"));
let device = Device::new("test_device".to_string());
let mut local_infos = BTreeMap::new();
let mut local_infos = HashMap::new();
local_infos.insert(device.name(), local_info_dir);
let physical = PhysicalDrivePartition::new(
"parent".to_string(),
@ -183,10 +178,11 @@ mod test {
"subdir".into(),
"some note".to_string(),
local_infos,
)
.unwrap();
);
let mut storages = Storages::new();
storages.add(storages::Storage::Physical(physical)).unwrap();
storages
.add(storages::Storage::Physical(physical))
.unwrap();
storages.add(Storage::SubDirectory(directory)).unwrap();
// assert_eq!(directory.name(), "test_name");
assert_eq!(

View file

@ -1,10 +1,9 @@
//! Online storage which is not a children of any physical drive.
use anyhow::Result;
use anyhow::{Context, Result};
use byte_unit::Byte;
use byte_unit::UnitType;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::fmt;
use std::path;
@ -24,7 +23,7 @@ pub struct OnlineStorage {
/// Capacity in bytes.
capacity: u64,
/// Device and local info pairs.
local_infos: BTreeMap<String, LocalInfo>,
local_infos: HashMap<String, LocalInfo>,
}
impl OnlineStorage {
@ -43,7 +42,7 @@ impl OnlineStorage {
name,
provider,
capacity,
local_infos: BTreeMap::from([(device.name(), local_info)]),
local_infos: HashMap::from([(device.name(), local_info)]),
}
}
}
@ -61,10 +60,15 @@ impl StorageExt for OnlineStorage {
self.local_infos.get(&device.name())
}
fn mount_path(&self, device: &devices::Device) -> Option<std::path::PathBuf> {
self.local_infos
fn mount_path(
&self,
device: &devices::Device,
) -> Result<std::path::PathBuf> {
Ok(self
.local_infos
.get(&device.name())
.map(|info| info.mount_path())
.context(format!("LocalInfo for storage: {} not found", &self.name()))?
.mount_path())
}
fn bound_on_device(
@ -92,9 +96,9 @@ impl fmt::Display for OnlineStorage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"O {name:<10} {size:<10.2} {provider:<10}",
"O {name:<10} {size:<10} {provider:<10}",
name = self.name(),
size = Byte::from_u64(self.capacity).get_appropriate_unit(UnitType::Binary),
size = Byte::from_bytes(self.capacity.into()).get_appropriate_unit(true),
provider = self.provider,
)
}

View file

@ -4,11 +4,11 @@ use crate::devices;
use crate::devices::Device;
use crate::storages::{Storage, StorageExt, Storages};
use anyhow::{anyhow, Context, Result};
use byte_unit::{Byte, UnitType};
use byte_unit::Byte;
use serde::{Deserialize, Serialize};
use std::path::{self, Path};
use std::{collections::BTreeMap, fmt};
use sysinfo::{Disk, Disks};
use std::{collections::HashMap, fmt};
use sysinfo::{Disk, DiskExt, SystemExt};
use super::local_info::{self, LocalInfo};
@ -20,9 +20,8 @@ pub struct PhysicalDrivePartition {
capacity: u64,
fs: String,
is_removable: bool,
// system_names: BTreeMap<String, String>,
/// [`Device`] name and [`LocalInfo`] mapping.
local_infos: BTreeMap<String, LocalInfo>,
// system_names: HashMap<String, String>,
local_infos: HashMap<String, LocalInfo>,
}
impl PhysicalDrivePartition {
@ -41,7 +40,7 @@ impl PhysicalDrivePartition {
capacity,
fs,
is_removable,
local_infos: BTreeMap::from([(device.name(), local_info)]),
local_infos: HashMap::from([(device.name(), local_info)]),
}
}
@ -58,10 +57,7 @@ impl PhysicalDrivePartition {
.to_string();
let fs = disk.file_system();
trace!("fs: {:?}", fs);
let fs: String = fs
.to_str()
.context("Failed to convert file_system osstr")?
.to_owned();
let fs = std::str::from_utf8(fs)?;
let local_info = LocalInfo::new(alias, disk.mount_point().to_path_buf());
Ok(PhysicalDrivePartition {
name,
@ -69,8 +65,8 @@ impl PhysicalDrivePartition {
capacity: disk.total_space(),
fs: fs.to_string(),
is_removable: disk.is_removable(),
// system_names: BTreeMap::from([(device.name(), alias)]),
local_infos: BTreeMap::from([(device.name(), local_info)]),
// system_names: HashMap::from([(device.name(), alias)]),
local_infos: HashMap::from([(device.name(), local_info)]),
})
}
@ -113,10 +109,12 @@ impl StorageExt for PhysicalDrivePartition {
self.local_infos.get(&device.name())
}
fn mount_path(&self, device: &devices::Device) -> Option<path::PathBuf> {
self.local_infos
fn mount_path(&self, device: &devices::Device) -> Result<path::PathBuf> {
Ok(self
.local_infos
.get(&device.name())
.map(|info| info.mount_path())
.context(format!("LocalInfo for storage: {} not found", &self.name()))?
.mount_path())
}
fn bound_on_device(
@ -145,9 +143,9 @@ impl fmt::Display for PhysicalDrivePartition {
let removable_indicator = if self.is_removable { "+" } else { "-" };
write!(
f,
"P {name:<10} {size:<10.2} {removable:<1} {kind:<6} {fs:<5}",
"P {name:<10} {size:<10} {removable:<1} {kind:<6} {fs:<5}",
name = self.name(),
size = Byte::from_u64(self.capacity).get_appropriate_unit(UnitType::Binary),
size = Byte::from_bytes(self.capacity.into()).get_appropriate_unit(true),
removable = removable_indicator,
kind = self.kind,
fs = self.fs,
@ -163,9 +161,13 @@ pub fn select_physical_storage(
) -> Result<PhysicalDrivePartition> {
trace!("select_physical_storage");
// get disk info from sysinfo
let sys_disks = Disks::new_with_refreshed_list();
let sys_disks =
sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_disks_list());
trace!("refresh");
// sys_disks.refresh_disks_list();
// sys_disks.refresh_disks();
trace!("Available disks");
for disk in &sys_disks {
for disk in sys_disks.disks() {
trace!("{:?}", disk)
}
let disk = select_sysinfo_disk(&sys_disks)?;
@ -173,19 +175,21 @@ pub fn select_physical_storage(
Ok(storage)
}
fn select_sysinfo_disk(disks: &sysinfo::Disks) -> Result<&Disk> {
let available_disks = disks
fn select_sysinfo_disk(sysinfo: &sysinfo::System) -> Result<&Disk> {
let available_disks = sysinfo
.disks()
.iter()
.enumerate()
.map(|(i, disk)| {
let name = disk.name().to_str().unwrap_or("");
let fs: &str = disk.file_system().to_str().unwrap_or("unknown");
let fs: &str = std::str::from_utf8(disk.file_system()).unwrap_or("unknown");
let kind = format!("{:?}", disk.kind());
let mount_path = disk.mount_point();
let total_space = byte_unit::Byte::from_u64(disk.total_space())
.get_appropriate_unit(UnitType::Binary);
let total_space = byte_unit::Byte::from_bytes(disk.total_space().into())
.get_appropriate_unit(true)
.to_string();
format!(
"{}: {} {:>+5.1} ({}, {}) {}",
"{}: {} {} ({}, {}) {}",
i,
name,
total_space,
@ -199,8 +203,10 @@ fn select_sysinfo_disk(disks: &sysinfo::Disks) -> Result<&Disk> {
let disk = inquire::Select::new("Select drive:", available_disks).prompt()?;
let disk_num: usize = disk.split(':').next().unwrap().parse().unwrap();
trace!("disk_num: {}", disk_num);
let disk = disks
.get(disk_num)
let disk = sysinfo
.disks()
.iter()
.nth(disk_num)
.context("no disk matched with selected one.")?;
trace!("selected disk: {:?}", disk);
Ok(disk)

View file

@ -1,8 +1,6 @@
use std::path::{self, PathBuf};
use anyhow::{Context, Result};
use chrono::TimeDelta;
use console::Style;
use crate::{
devices::Device,
@ -19,12 +17,12 @@ pub fn min_parent_storage<'a>(
.list
.iter()
.filter_map(|(k, storage)| {
let storage_path = storage.mount_path(device)?;
let storage_path = match storage.mount_path(device) {
Ok(path) => path,
Err(_) => return None,
};
let diff = pathdiff::diff_paths(path, storage_path)?;
if diff
.components()
.any(|c| matches!(c, path::Component::ParentDir | path::Component::Prefix(_)))
{
if diff.components().any(|c| c == path::Component::ParentDir) {
None
} else {
Some((k, diff))
@ -63,17 +61,6 @@ pub fn format_summarized_duration(dt: chrono::Duration) -> String {
}
}
pub fn duration_style(time: TimeDelta) -> Style {
match time {
x if x < TimeDelta::days(7) => Style::new().green(),
x if x < TimeDelta::days(14) => Style::new().yellow(),
x if x < TimeDelta::days(28) => Style::new().magenta(),
x if x < TimeDelta::days(28 * 3) => Style::new().red(),
x if x < TimeDelta::days(180) => Style::new().red().bold(),
_ => Style::new().on_red().black(),
}
}
#[cfg(test)]
mod test {
use anyhow::Result;

View file

@ -1,110 +1,22 @@
mod integrated_test {
use std::{
fs::{self, DirBuilder, File},
io::{self, BufWriter, Write},
path,
};
use std::fs::DirBuilder;
use anyhow::{Context, Ok, Result, anyhow};
use assert_cmd::{Command, assert::OutputAssertExt};
use anyhow::{Ok, Result};
use assert_cmd::{assert::OutputAssertExt, Command};
use git2::Repository;
use log::{debug, trace};
use predicates::{boolean::PredicateBooleanExt, prelude::predicate};
const IS_GIT_CONFIG_WRITABLE: &str = "XDBM_ENABLE_OVERWRITE_GITCONFIG";
/// Setup global gitconfig if it doesn't exist.
///
/// # Errors
///
/// This function will return an error if it failed to get git global config and environment
/// variable [XDBM_ENABLE_OVERWRITE_GITCONFIG](`IS_GIT_CONFIG_WRITABLE`) is not set.
fn setup_gitconfig() -> Result<()> {
let config = git2::Config::open_default().expect("failed to get default");
if config.get_string("user.name").is_ok() && config.get_string("user.email").is_ok() {
return Ok(());
};
match std::env::var_os(IS_GIT_CONFIG_WRITABLE) {
Some(_) => {
debug!(
"global git config not found & env var `{}` found",
IS_GIT_CONFIG_WRITABLE
);
}
None => {
eprintln!("Failed to get git global config");
eprintln!(
"Set env var `{}` to set automatically (mainly for CI)",
IS_GIT_CONFIG_WRITABLE
);
return Err(anyhow!("failed to get git global config"));
}
};
let config_file = git2::Config::find_global().map_or_else(
|e| {
trace!("global git config file not found: {e:?}");
Ok(dirs::home_dir()
.context("Failed to get home dir")?
.join(".gitconfig"))
},
Ok,
)?;
let f = match File::options()
.create(true)
.truncate(true)
.write(true)
.open(config_file)
{
io::Result::Ok(f) => f,
io::Result::Err(_err) => return Ok(()),
};
let mut buf = BufWriter::new(f);
buf.write_all(
r#"
[user]
email = "test@example.com"
name = "testuser"
"#
.as_bytes(),
)?;
Ok(())
}
fn run_sync_cmd(config_dir: &path::Path, use_cl: bool) -> Result<()> {
if use_cl {
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir)
.args(["sync", "-vvvv"])
.assert()
.success();
} else {
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir)
.args(["sync", "-vvvv", "-u"])
.assert()
.success();
}
Ok(())
}
use log::trace;
use predicates::prelude::predicate;
#[test]
fn single_device() -> Result<()> {
let config_dir = assert_fs::TempDir::new()?;
setup_gitconfig()?;
// init
let mut cmd = Command::cargo_bin("xdbm")?;
cmd.arg("-c")
.arg(config_dir.path())
.arg("init")
.arg("testdev")
.arg("-vvvv");
.arg("testdev");
cmd.assert().success().stdout(predicate::str::contains(""));
eprintln!("{:?}", fs::read_dir(config_dir.path())?.collect::<Vec<_>>());
assert_eq!(
std::fs::read_to_string(config_dir.path().join("devname"))?,
"testdev\n"
@ -191,7 +103,6 @@ mod integrated_test {
fn two_devices_with_same_name() -> Result<()> {
// 1st device
let config_dir_1 = assert_fs::TempDir::new()?;
setup_gitconfig()?;
let mut cmd1 = Command::cargo_bin("xdbm")?;
cmd1.arg("-c")
.arg(config_dir_1.path())
@ -201,30 +112,21 @@ mod integrated_test {
// bare-repo
let bare_repo_dir = assert_fs::TempDir::new()?;
let _bare_repo = Repository::init_bare(&bare_repo_dir)?;
let bare_repo = Repository::init_bare(&bare_repo_dir)?;
// push to bare repository
let repo_1 = Repository::open(&config_dir_1)?;
let upstream_name = "remote";
let mut repo_1_remote =
repo_1.remote(upstream_name, bare_repo_dir.path().to_str().unwrap())?;
repo_1_remote.push(&[repo_1.head().unwrap().name().unwrap()], None)?;
repo_1_remote.push(&["refs/heads/main"], None)?;
trace!("bare repo {:?}", bare_repo_dir.display());
println!("{:?}", bare_repo_dir.read_dir()?);
// set up upstream branch
let (mut repo_1_branch, _branch_type) = repo_1.branches(None)?.next().unwrap()?;
println!("head {}", repo_1.head().unwrap().name().unwrap());
repo_1_branch.set_upstream(Some(
format!(
"{}/{}",
upstream_name,
repo_1_branch.name().unwrap().unwrap()
)
.as_str(),
))?;
repo_1_branch.set_upstream(Some(format!("{}/{}", upstream_name, "main").as_str()))?;
// 2nd device
let config_dir_2 = assert_fs::TempDir::new()?;
setup_gitconfig()?;
let mut cmd2 = Command::cargo_bin("xdbm")?;
cmd2.arg("-c")
.arg(config_dir_2.path())
@ -240,7 +142,6 @@ mod integrated_test {
fn directory_without_parent() -> Result<()> {
// 1st device
let config_dir_1 = assert_fs::TempDir::new()?;
setup_gitconfig()?;
let mut cmd1 = Command::cargo_bin("xdbm")?;
cmd1.arg("-c")
.arg(config_dir_1.path())
@ -272,10 +173,7 @@ mod integrated_test {
#[test]
fn two_devices() -> Result<()> {
// 1st device
//
// devices: first
let config_dir_1 = assert_fs::TempDir::new()?;
setup_gitconfig()?;
let mut cmd1 = Command::cargo_bin("xdbm")?;
cmd1.arg("-c")
.arg(config_dir_1.path())
@ -285,31 +183,21 @@ mod integrated_test {
// bare-repo
let bare_repo_dir = assert_fs::TempDir::new()?;
let _bare_repo = Repository::init_bare(&bare_repo_dir)?;
let bare_repo = Repository::init_bare(&bare_repo_dir)?;
// push to bare repository
let repo_1 = Repository::open(&config_dir_1)?;
let upstream_name = "remote";
let mut repo_1_remote =
repo_1.remote(upstream_name, bare_repo_dir.path().to_str().unwrap())?;
repo_1_remote.push(&[repo_1.head().unwrap().name().unwrap()], None)?;
repo_1_remote.push(&["refs/heads/main"], None)?;
trace!("bare repo {:?}", bare_repo_dir.display());
println!("{:?}", bare_repo_dir.read_dir()?);
// set up upstream branch
let (mut repo_1_branch, _branch_type) = repo_1.branches(None)?.next().unwrap()?;
repo_1_branch.set_upstream(Some(
format!(
"{}/{}",
upstream_name,
repo_1_branch.name().unwrap().unwrap()
)
.as_str(),
))?;
repo_1_branch.set_upstream(Some(format!("{}/{}", upstream_name, "main").as_str()))?;
// 2nd device
//
// devices: first, second
let config_dir_2 = assert_fs::TempDir::new()?;
setup_gitconfig()?;
let mut cmd2 = Command::cargo_bin("xdbm")?;
cmd2.arg("-c")
.arg(config_dir_2.path())
@ -332,16 +220,15 @@ mod integrated_test {
assert!(config_dir_2.join("backups").join("first.yml").exists());
assert!(config_dir_2.join("backups").join("second.yml").exists());
// sync
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("sync")
.arg("-vvvv")
.arg("-u")
std::process::Command::new("git")
.arg("push")
.current_dir(&config_dir_2)
.assert()
.success()
.stderr(predicate::str::contains("successfully pushed"));
.success();
// let repo_2 = Repository::open(config_dir_2)?;
// // return Err(anyhow!("{:?}", repo_2.remotes()?.iter().collect::<Vec<_>>()));
// let mut repo_2_remote = repo_2.find_remote(repo_2.remotes()?.get(0).unwrap())?;
// repo_2_remote.push(&[] as &[&str], None)?;
std::process::Command::new("git")
.arg("pull")
.current_dir(&config_dir_1)
@ -349,11 +236,6 @@ mod integrated_test {
.success();
// Add storage
//
// devices: first, second
// storages:
// - gdrive @ sample_storage (online)
// - first: sample_storage
let sample_storage = assert_fs::TempDir::new()?;
let mut cmd_add_storage_1 = Command::cargo_bin("xdbm")?;
cmd_add_storage_1
@ -375,13 +257,6 @@ mod integrated_test {
.success()
.stdout(predicate::str::contains(""));
// Add storage (directory)
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
let sample_directory = &sample_storage.join("foo").join("bar");
DirBuilder::new().recursive(true).create(sample_directory)?;
Command::cargo_bin("xdbm")?
@ -401,18 +276,18 @@ mod integrated_test {
std::fs::read_to_string(config_dir_1.join("storages.yml"))?.contains("parent: gdrive1")
);
run_sync_cmd(&config_dir_1, false)?;
run_sync_cmd(&config_dir_2, false)?;
std::process::Command::new("git")
.arg("push")
.current_dir(&config_dir_1)
.assert()
.success();
std::process::Command::new("git")
.arg("pull")
.current_dir(&config_dir_2)
.assert()
.success();
// bind
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
@ -428,16 +303,6 @@ mod integrated_test {
.stdout(predicate::str::contains(""));
// storage 3
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - second: sample_storage_2
let sample_storage_2 = assert_fs::TempDir::new()?;
Command::cargo_bin("xdbm")?
.arg("-c")
@ -456,7 +321,6 @@ mod integrated_test {
.assert()
.success();
// storage list
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
@ -464,23 +328,8 @@ mod integrated_test {
.arg("list")
.arg("-l")
.assert()
.success()
.stdout(predicate::str::contains("gdrive_docs").and(predicate::str::contains("nas")));
.success();
// backup add
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs
let backup_src = &sample_storage_2.join("foo").join("bar");
DirBuilder::new().recursive(true).create(backup_src)?;
let backup_dest = &sample_directory.join("docs");
@ -501,7 +350,6 @@ mod integrated_test {
.assert()
.success();
// backup add but with existing name
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
@ -519,297 +367,6 @@ mod integrated_test {
.failure()
.stderr(predicate::str::contains("already"));
// backup list
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("backup")
.arg("list")
.assert()
.success()
.stdout(
predicate::str::contains("foodoc")
.and(predicate::str::contains("nas"))
.and(predicate::str::contains("gdrive_docs"))
.and(predicate::str::contains("---")),
);
// backup done
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs (done 1)
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("backup")
.arg("done")
.arg("foodoc")
.arg("0")
.assert()
.success();
// backup list after backup done
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("backup")
.arg("list")
.assert()
.success()
.stdout(
predicate::str::contains("foodoc")
.and(predicate::str::contains("nas"))
.and(predicate::str::contains("gdrive_docs"))
.and(predicate::str::contains("---").not()),
);
// status
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.assert()
.success();
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.arg("-s")
.arg(backup_src.clone().join("foo"))
.assert()
.success()
.stdout(predicate::str::contains("nas").and(predicate::str::contains("foodoc").not()));
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.arg("-sb")
.arg(backup_src.clone().join("foo"))
.assert()
.success()
.stdout(
predicate::str::contains("nas")
.and(predicate::str::contains("second"))
.and(predicate::str::contains("foodoc")),
);
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.arg("-sb")
.arg(backup_src.clone().parent().unwrap())
.assert()
.success()
.stdout(
predicate::str::contains("nas")
.and(predicate::str::contains("second").not())
.and(predicate::str::contains("foodoc").not()),
);
run_sync_cmd(&config_dir_2, true)?;
run_sync_cmd(&config_dir_1, true)?;
// bind
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - first: sample_storage_2_first_path
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs (done 1)
let sample_storage_2_first_path = assert_fs::TempDir::new()?;
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_1.path())
.arg("storage")
.arg("bind")
.arg("--alias")
.arg("sample2")
.arg("--path")
.arg(sample_storage_2_first_path.path())
.arg("nas")
.assert()
.success()
.stdout(predicate::str::contains(""));
// backup add
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - first: sample_storage_2_first_path
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs (done 1)
// - abcdbackup: first
// - sample_storage_2_first_path/abcd/efgh -> sample_storage/Downloads/abcd/efgh
let backup_src = &sample_storage_2_first_path.join("abcd").join("efgh");
DirBuilder::new().recursive(true).create(backup_src)?;
let backup_dest = &sample_storage.join("Downloads").join("abcd").join("efgh");
DirBuilder::new().recursive(true).create(backup_dest)?;
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_1.path())
.arg("backup")
.arg("add")
.arg("--src")
.arg(backup_src)
.arg("--dest")
.arg(backup_dest)
.arg("abcdbackup")
.arg("external")
.arg("rsync")
.arg("note: nonsense")
.assert()
.success();
// backup add
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - first: sample_storage_2_first_path
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs (done 1)
// - abcdbackup: first
// - sample_storage_2_first_path/abcd/efgh -> sample_storage/Downloads/abcd/efgh
// - abcdsubbackup: first
// - sample_storage_2_first_path/abcd/efgh/sub -> sample_storage/Downloads/abcd/efgh/sub
let backup_src = &sample_storage_2_first_path
.join("abcd")
.join("efgh")
.join("sub");
DirBuilder::new().recursive(true).create(backup_src)?;
let backup_dest = &sample_storage
.join("Downloads")
.join("abcd")
.join("efgh")
.join("sub");
DirBuilder::new().recursive(true).create(backup_dest)?;
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_1.path())
.arg("backup")
.arg("add")
.arg("--src")
.arg(backup_src)
.arg("--dest")
.arg(backup_dest)
.arg("abcdsubbackup")
.arg("external")
.arg("rsync")
.arg("note: only subdirectory")
.assert()
.success();
run_sync_cmd(&config_dir_1, false)?;
run_sync_cmd(&config_dir_2, false)?;
// backup add
//
// devices: first, second
// storages:
// - gdrive (online)
// - first: sample_storage
// - gdrive_docs (subdir of sample_storage/foo/bar)
// - first
// - second: sample_directory
// - nas (online)
// - first: sample_storage_2_first_path
// - second: sample_storage_2
// backups:
// - foodoc: second
// - sample_storage_2/foo/bar -> sample_directory/docs (done 1)
// - abcdbackup: first
// - sample_storage_2_first_path/abcd/efgh -> sample_storage/Downloads/abcd/efgh
// - abcdsubbackup: first
// - sample_storage_2_first_path/abcd/efgh/sub -> sample_storage/Downloads/abcd/efgh/sub
// - abcdbackup2: second
// - sample_storage_2/abcd/efgh -> sample_directory/Downloads/abcd/efgh
let backup_src = &sample_storage_2.join("abcd").join("efgh");
DirBuilder::new().recursive(true).create(backup_src)?;
let backup_dest = &sample_directory.join("Downloads").join("abcd").join("efgh");
DirBuilder::new().recursive(true).create(backup_dest)?;
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("backup")
.arg("add")
.arg("--src")
.arg(backup_src)
.arg("--dest")
.arg(backup_dest)
.arg("abcdbackup2")
.arg("external")
.arg("rsync")
.arg("note: only subdirectory")
.assert()
.success();
// status
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.arg("-sb")
.arg(backup_src)
.assert()
.success()
.stdout(
predicate::str::contains("nas")
.and(predicate::str::contains("first"))
.and(predicate::str::contains("abcdbackup"))
.and(predicate::str::contains("abcdsubbackup").not())
.and(predicate::str::contains("second"))
.and(predicate::str::contains("abcdbackup2")),
);
Command::cargo_bin("xdbm")?
.arg("-c")
.arg(config_dir_2.path())
.arg("status")
.arg("-sb")
.arg(backup_src.join("sub"))
.assert()
.success()
.stdout(
predicate::str::contains("nas")
.and(predicate::str::contains("first"))
.and(predicate::str::contains("abcdbackup"))
.and(predicate::str::contains("abcdsubbackup"))
.and(predicate::str::contains("second"))
.and(predicate::str::contains("abcdbackup2")),
);
Ok(())
}
}