1 Commits

Author SHA1 Message Date
dependabot[bot]
991fbd7f58 Bump rust-i18n from 3.1.3 to 3.1.5
Bumps [rust-i18n](https://github.com/longbridge/rust-i18n) from 3.1.3 to 3.1.5.
- [Release notes](https://github.com/longbridge/rust-i18n/releases)
- [Commits](https://github.com/longbridge/rust-i18n/compare/v3.1.3...v3.1.5)

---
updated-dependencies:
- dependency-name: rust-i18n
  dependency-version: 3.1.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-22 19:21:34 +00:00
10 changed files with 581 additions and 665 deletions

49
Cargo.lock generated
View File

@@ -17,12 +17,6 @@ version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "anyhow"
version = "1.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4"
[[package]] [[package]]
name = "arc-swap" name = "arc-swap"
version = "1.7.1" version = "1.7.1"
@@ -233,6 +227,7 @@ name = "kramer"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"clap", "clap",
"libc",
"ron", "ron",
"rust-i18n", "rust-i18n",
"serde", "serde",
@@ -245,14 +240,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]] [[package]]
name = "libyml" name = "libc"
version = "0.0.5" version = "0.2.171"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3302702afa434ffa30847a83305f0a69d6abd74293b6554c18ec85c7ef30c980" checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
dependencies = [
"anyhow",
"version_check",
]
[[package]] [[package]]
name = "log" name = "log"
@@ -342,9 +333,9 @@ dependencies = [
[[package]] [[package]]
name = "rust-i18n" name = "rust-i18n"
version = "3.1.3" version = "3.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71b3a6e1c6565b77c86d868eea3068b0eb39582510f9c78cfbd5c67bd36fda9b" checksum = "fda2551fdfaf6cc5ee283adc15e157047b92ae6535cf80f6d4962d05717dc332"
dependencies = [ dependencies = [
"globwalk", "globwalk",
"once_cell", "once_cell",
@@ -356,9 +347,9 @@ dependencies = [
[[package]] [[package]]
name = "rust-i18n-macro" name = "rust-i18n-macro"
version = "3.1.3" version = "3.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6180d8506af2b485ffc1eab7fc6d15678336a694f2b5efac5f2ca78c52928275" checksum = "22baf7d7f56656d23ebe24f6bb57a5d40d2bce2a5f1c503e692b5b2fa450f965"
dependencies = [ dependencies = [
"glob", "glob",
"once_cell", "once_cell",
@@ -367,15 +358,15 @@ dependencies = [
"rust-i18n-support", "rust-i18n-support",
"serde", "serde",
"serde_json", "serde_json",
"serde_yml", "serde_yaml",
"syn", "syn",
] ]
[[package]] [[package]]
name = "rust-i18n-support" name = "rust-i18n-support"
version = "3.1.3" version = "3.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938f16094e2b09e893b1f85c9da251739a832d4272a5957217977da3a0713bb6" checksum = "940ed4f52bba4c0152056d771e563b7133ad9607d4384af016a134b58d758f19"
dependencies = [ dependencies = [
"arc-swap", "arc-swap",
"base62", "base62",
@@ -388,7 +379,7 @@ dependencies = [
"regex", "regex",
"serde", "serde",
"serde_json", "serde_json",
"serde_yml", "serde_yaml",
"siphasher", "siphasher",
"toml", "toml",
"triomphe", "triomphe",
@@ -457,18 +448,16 @@ dependencies = [
] ]
[[package]] [[package]]
name = "serde_yml" name = "serde_yaml"
version = "0.0.12" version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"itoa", "itoa",
"libyml",
"memchr",
"ryu", "ryu",
"serde", "serde",
"version_check", "unsafe-libyaml",
] ]
[[package]] [[package]]
@@ -558,10 +547,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]] [[package]]
name = "version_check" name = "unsafe-libyaml"
version = "0.9.5" version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
[[package]] [[package]]
name = "walkdir" name = "walkdir"

View File

@@ -10,9 +10,9 @@ edition = "2021"
# #
# For clap info, see [dependencies.clap] # For clap info, see [dependencies.clap]
# For serde info, see [dependencies.serde] # For serde info, see [dependencies.serde]
libc = "0.2.171, ~0.2.169"
ron = "0.8.1, >=0.8, <0.9" ron = "0.8.1, >=0.8, <0.9"
rust-i18n = "3.1.3, ~3.1.3" rust-i18n = "3.1.5"
[dependencies.clap] [dependencies.clap]
version = "4.5, ~4.5.27" version = "4.5, ~4.5.27"

View File

@@ -1,15 +0,0 @@
use std::io::{self, Seek, SeekFrom};
/// Get length of data stream.
/// Physical length of data stream in bytes
/// (multiple of sector_size, rather than actual).
///
/// This will attempt to return the stream to its current read position.
pub fn get_stream_length<S: Seek>(stream: &mut S) -> io::Result<u64> {
let pos = stream.stream_position()?;
let len = stream.seek(SeekFrom::End(0));
stream.seek(SeekFrom::Start(pos))?;
len
}

View File

@@ -1,16 +1,20 @@
mod io;
mod mapping;
mod recovery; mod recovery;
mod mapping;
use std::fs::{File, OpenOptions};
use std::path::{Path, PathBuf};
use clap::Parser; use clap::Parser;
use libc::O_DIRECT;
use mapping::MapFile; use mapping::MapFile;
use recovery::Recover; use recovery::Recover;
use std::{
fs::{File, OpenOptions},
io::{self, Seek, SeekFrom},
os::unix::fs::OpenOptionsExt,
path::PathBuf,
};
const FB_SECTOR_SIZE: u16 = 2048; const FB_SECTOR_SIZE: u16 = 2048;
const FB_PAD_VALUE: u8 = 0;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
struct Args { struct Args {
@@ -39,6 +43,7 @@ struct Args {
sector_size: u16, sector_size: u16,
} }
fn main() { fn main() {
let config = Args::parse(); let config = Args::parse();
@@ -46,58 +51,69 @@ fn main() {
// I'm lazy and don't want to mess around with comparing error types. // I'm lazy and don't want to mess around with comparing error types.
// Thus, any error in I/O here should be treated as fatal. // Thus, any error in I/O here should be treated as fatal.
let mut input: File = OpenOptions::new() let mut input: File = {
match OpenOptions::new()
.custom_flags(O_DIRECT)
.read(true) .read(true)
.write(false)
.append(false)
.create(false)
.open(&config.input.as_path()) .open(&config.input.as_path())
.expect("Failed to open input file"); {
Ok(f) => f,
Err(err) => panic!("Failed to open input file: {:?}", err)
}
};
let output: File = { let mut output: File = {
// Keep this clean, make a short-lived binding. // Keep this clean, make a short-lived binding.
let path = get_path(&config.output, &config.input.to_str().unwrap(), "iso"); let path = get_path(
&config.output,
&config.input.to_str().unwrap(),
"iso"
);
OpenOptions::new() match OpenOptions::new()
.custom_flags(O_DIRECT)
.read(true) .read(true)
.write(true) .write(true)
.create(true) .create(true)
.open(path) .open(path)
.expect("Failed to open/create output file") {
Ok(f) => f,
Err(err) => panic!("Failed to open/create output file. {:?}", err)
}
}; };
// Check if output file is shorter than input. // Check if output file is shorter than input.
// If so, autoextend the output file. // If so, autoextend the output file.
//
// Is this actually needed? I don't think I need to preallocate the space.
/*
{ {
let input_len = let input_len = get_stream_length(&mut input)
get_stream_length(&mut input).expect("Failed to get the length of the input data."); .expect("Failed to get the length of the input data.");
let output_len = let output_len = get_stream_length(&mut output)
get_stream_length(&mut output).expect("Failed to get the length of the output file."); .expect("Failed to get the length of the output file.");
if output_len < input_len { if output_len < input_len {
output output.set_len(input_len)
.set_len(input_len)
.expect("Failed to autofill output file.") .expect("Failed to autofill output file.")
} }
} }
*/
/*
let map: MapFile = { let map: MapFile = {
let path = get_path( let path = get_path(
&config.output, &config.output,
&config &config.input.to_str().unwrap(),
.input "map"
.to_str()
.expect("Input path is not UTF-8 valid"),
"map",
); );
let file = OpenOptions::new() let file = match OpenOptions::new()
.read(true) .read(true)
.create(true) .create(true)
.open(path) .open(path)
.expect("Failed to open/create mapping file"); {
Ok(f) => f,
Err(err) => panic!("Failed to open/create mapping file. {:?}", err)
};
if let Ok(map) = MapFile::try_from(file) { if let Ok(map) = MapFile::try_from(file) {
map map
@@ -105,53 +121,46 @@ fn main() {
MapFile::new(config.sector_size) MapFile::new(config.sector_size)
} }
}; };
*/
let buf_capacity = let mut recover_tool = Recover::new(config, input, output, map);
crate::io::get_stream_length(&mut input).expect("Failed to get buffer capacity from input");
let mut recover_tool = Recover::new(config, input, output, MapFile::default(), buf_capacity);
recover_tool.run(); recover_tool.run();
//todo!("Recovery, Map saving, and closure of all files."); todo!("Recovery, Map saving, and closure of all files.");
/*
let mut buf: Vec<u8> = Vec::with_capacity(
get_stream_length(&mut input).expect("Failed to get the length of the input data.")
as usize,
);
println!(
"Read {} bytes",
input
.read_to_end(&mut buf)
.expect("Failed to read complete input stream.")
);
println!("Wrote {} bytes", {
output
.write_all(&buf)
.expect("Failed to write complete output stream.");
&buf.len()
});
*/
} }
/// Generates a file path if one not provided. /// Generates a file path if one not provided.
/// source_name for fallback name. /// source_name for fallback name.
fn get_path<P>(file_path: &Option<P>, source_name: &str, extension: &str) -> PathBuf fn get_path(
where output: &Option<PathBuf>,
P: AsRef<Path>, source_name: &str,
{ extention: &str
if let Some(f) = file_path { ) -> PathBuf {
f.as_ref().to_path_buf() if let Some(f) = output {
f.to_owned()
} else { } else {
PathBuf::from(format!("{:?}.{}", source_name, extension)) PathBuf::from(format!(
"{:?}.{}",
source_name,
extention,
))
.as_path() .as_path()
.to_owned() .to_owned()
} }
} }
/// Get length of data stream.
/// Physical length of data stream in bytes
/// (multiple of sector_size, rather than actual).
fn get_stream_length<S: Seek>(input: &mut S) -> io::Result<u64> {
let len = input.seek(SeekFrom::End(0))?;
let _ = input.seek(SeekFrom::Start(0));
Ok(len)
}
#[cfg(test)] #[cfg(test)]
#[allow(unused)] #[allow(unused)]
mod tests { mod tests {

446
src/mapping.rs Normal file
View File

@@ -0,0 +1,446 @@
use ron::de::{from_reader, SpannedError};
use serde::Deserialize;
use std::fs::File;
use crate::FB_SECTOR_SIZE;
/// Domain, in sectors.
/// Requires sector_size to be provided elsewhere for conversion to bytes.
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
pub struct Domain {
pub start: usize,
pub end: usize,
}
impl Default for Domain {
fn default() -> Self {
Domain { start: 0, end: 1 }
}
}
impl Domain {
/// Return length of domain in sectors.
pub fn len(self) -> usize {
self.end - self.start
}
}
/// A map for data stored in memory for processing and saving to disk.
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
pub struct Cluster {
domain: Domain,
stage: Stage,
}
impl Default for Cluster {
fn default() -> Self {
Cluster {
domain: Domain::default(),
stage: Stage::default()
}
}
}
impl Cluster {
/// Breaks apart into a vec of clusters,
/// each of cluster_size, excepting last.
pub fn subdivide(&mut self, cluster_len: usize) -> Vec<Cluster> {
let domain_len = self.domain.len();
let mut start = self.domain.start;
let mut clusters: Vec<Cluster> = vec![];
for _ in 0..(domain_len as f64 / cluster_len as f64).floor() as usize {
clusters.push(Cluster {
domain: Domain {
start,
end: start + cluster_len,
},
stage: self.stage,
});
start += cluster_len;
}
clusters.push(Cluster {
domain: Domain {
start,
end: self.domain.end,
},
stage: self.stage,
});
clusters
}
pub fn set_stage(&mut self, stage: Stage) -> &mut Self {
self.stage = stage;
self
}
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, PartialOrd)]
pub enum Stage {
Untested,
ForIsolation(u8),
Damaged,
}
impl Default for Stage {
fn default() -> Self {
Stage::Untested
}
}
#[derive(Clone, Debug, Deserialize, PartialEq)]
pub struct MapFile {
pub sector_size: u16,
pub domain: Domain,
pub map: Vec<Cluster>,
}
impl TryFrom<File> for MapFile {
type Error = SpannedError;
fn try_from(file: File) -> Result<Self, Self::Error> {
from_reader(file)
}
}
impl Default for MapFile {
fn default() -> Self {
MapFile {
sector_size: FB_SECTOR_SIZE,
domain: Domain::default(),
map: vec![Cluster {
domain: Domain::default(),
stage: Stage::Untested,
}],
}
}
}
impl MapFile {
pub fn new(sector_size: u16) -> Self {
MapFile::default()
.set_sector_size(sector_size)
.to_owned()
}
pub fn set_sector_size(&mut self, sector_size: u16) -> &mut Self {
self.sector_size = sector_size;
self
}
/// Recalculate cluster mappings.
fn update(&mut self, new_cluster: Cluster) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![Cluster::from(new_cluster.to_owned())];
for map_cluster in self.map.iter() {
let mut map_cluster = *map_cluster;
// If new_cluster doesn't start ahead and ends short, map_cluster is forgotten.
if new_cluster.domain.start < map_cluster.domain.start
&& new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster overlaps the start of map_cluster,
but ends short of map_cluster end.
ACTION: Crop map_cluster to start at end of new_cluster.
*/
map_cluster.domain.start = new_cluster.domain.end;
new_map.push(map_cluster);
} else if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster starts within map_cluster domain.
ACTION: Crop
*/
let domain_end = map_cluster.domain.end;
// Crop current object.
map_cluster.domain.end = new_cluster.domain.start;
new_map.push(map_cluster);
if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster is within map_cluster.
ACTION: Crop & Fracture map_cluster
NOTE: Crop completed above.
*/
new_map.push(Cluster {
domain: Domain {
start: new_cluster.domain.end,
end: domain_end,
},
stage: map_cluster.stage.to_owned()
});
}
} else {
/*
No overlap.
ACTION: Transfer
*/
new_map.push(map_cluster);
}
}
self.map = new_map;
self
}
/// Get current recovery stage.
pub fn get_stage(&self) -> Stage {
let mut recover_stage = Stage::Damaged;
for cluster in self.map.iter() {
match cluster.stage {
Stage::Untested => return Stage::Untested,
Stage::ForIsolation(_) => {
if recover_stage == Stage::Damaged
|| cluster.stage < recover_stage {
// Note that recover_stage after first condition is
// only ever Stage::ForIsolation(_), thus PartialEq,
// PartialOrd are useful for comparing the internal value.
recover_stage = cluster.stage
}
},
Stage::Damaged => (),
}
}
recover_stage
}
/// Get clusters of common stage.
pub fn get_clusters(&self, stage: Stage) -> Vec<Cluster> {
self.map.iter()
.filter_map(|mc| {
if mc.stage == stage { Some(mc.to_owned()) } else { None }
})
.collect()
}
/// Defragments cluster groups.
/// I.E. check forwards every cluster from current until stage changes,
/// then group at once.
fn defrag(&mut self) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![];
// Fetch first cluster.
let mut start_cluster = self.map.iter()
.find(|c| c.domain.start == 0)
.unwrap();
// Even though this would be initialized by its first read,
// the compiler won't stop whining, and idk how to assert that to it.
let mut end_cluster = Cluster::default();
let mut new_cluster: Cluster;
let mut stage_common: bool;
let mut is_finished = false;
while !is_finished {
stage_common = true;
// Start a new cluster based on the cluster following
// the end of last new_cluster.
new_cluster = start_cluster.to_owned();
// While stage is common, and not finished,
// find each trailing cluster.
while stage_common && !is_finished {
end_cluster = start_cluster.to_owned();
if end_cluster.domain.end != self.domain.end {
start_cluster = self.map.iter()
.find(|c| end_cluster.domain.end == c.domain.start)
.unwrap();
stage_common = new_cluster.stage == start_cluster.stage
} else {
is_finished = true;
}
}
// Set the new ending, encapsulating any clusters of common stage.
new_cluster.domain.end = end_cluster.domain.end;
new_map.push(new_cluster);
}
self.map = new_map;
self
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test for Cluster::subdivide()
// Test for MapFile::update()
// Test for MapFile::get_stage()
#[test]
fn test_get_stage() {
use std::vec;
let mut mf = MapFile::default();
let mut mf_stage = mf.get_stage();
// If this fails here, there's something SERIOUSLY wrong.
assert!(
mf_stage == Stage::Untested,
"Determined stage to be {:?}, when {:?} was expeccted.",
mf_stage, Stage::Untested
);
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
mf.map = vec![];
for stage in stages {
mf.map.push(*Cluster::default().set_stage(stage));
mf_stage = mf.get_stage();
assert!(
stage == mf_stage,
"Expected stage to be {:?}, determined {:?} instead.",
stage, mf_stage
)
}
}
// Test for MapFile::get_clusters()
#[test]
fn test_get_clusters() {
let mut mf = MapFile::default();
mf.map = vec![
*Cluster::default().set_stage(Stage::Damaged),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
Cluster::default(),
Cluster::default(),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::Damaged),
];
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
for stage in stages {
let expected = vec![
*Cluster::default().set_stage(stage),
*Cluster::default().set_stage(stage),
];
let recieved = mf.get_clusters(stage);
assert!(
expected == recieved,
"Expected clusters {:?}, got {:?}.",
expected, recieved
)
}
}
// Test for MapFile::defrag()
#[test]
fn test_defrag() {
let mut mf = MapFile {
sector_size: 1,
domain: Domain { start: 0, end: 8 },
map: vec![
Cluster {
domain: Domain { start: 0, end: 1 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 1, end: 2 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 2, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 4 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 4, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
],
};
let expected = vec![
Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
];
mf.defrag();
let recieved = mf.map;
assert!(
expected == recieved,
"Expected {:?} after defragging, got {:?}.",
expected, recieved
)
}
}

View File

@@ -1,56 +0,0 @@
use super::{Domain, Stage};
use serde::{Deserialize, Serialize};
/// A map for data stored in memory for processing and saving to disk.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)]
pub struct Cluster {
pub domain: Domain,
pub stage: Stage,
}
impl Default for Cluster {
fn default() -> Self {
Cluster {
domain: Domain::default(),
stage: Stage::default(),
}
}
}
impl Cluster {
/// Breaks apart into a vec of clusters,
/// each of cluster_size, excepting last.
pub fn subdivide(&mut self, cluster_len: usize) -> Vec<Cluster> {
let domain_len = self.domain.len();
let mut start = self.domain.start;
let mut clusters: Vec<Cluster> = vec![];
for _ in 0..(domain_len / cluster_len) {
clusters.push(Cluster {
domain: Domain {
start,
end: start + cluster_len,
},
stage: self.stage,
});
start += cluster_len;
}
clusters.push(Cluster {
domain: Domain {
start,
end: self.domain.end,
},
stage: self.stage,
});
clusters
}
pub fn set_stage(&mut self, stage: Stage) -> &mut Self {
self.stage = stage;
self
}
}

View File

@@ -1,22 +0,0 @@
use serde::{Deserialize, Serialize};
/// Domain, in sectors.
/// Requires sector_size to be provided elsewhere for conversion to bytes.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)]
pub struct Domain {
pub start: usize,
pub end: usize,
}
impl Default for Domain {
fn default() -> Self {
Domain { start: 0, end: 1 }
}
}
impl Domain {
/// Return length of domain in sectors.
pub fn len(self) -> usize {
self.end - self.start
}
}

View File

@@ -1,207 +0,0 @@
use std::fs::File;
use super::{Cluster, Domain, Stage};
use ron::de::from_reader;
use ron::error::SpannedError;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct MapFile {
pub sector_size: u16,
pub domain: Domain,
pub map: Vec<Cluster>,
}
impl TryFrom<File> for MapFile {
type Error = SpannedError;
fn try_from(file: File) -> Result<Self, Self::Error> {
from_reader(file)
}
}
impl Default for MapFile {
fn default() -> Self {
MapFile {
sector_size: crate::FB_SECTOR_SIZE,
domain: Domain::default(),
map: vec![Cluster {
domain: Domain::default(),
stage: Stage::Untested,
}],
}
}
}
impl MapFile {
pub fn new(sector_size: u16) -> Self {
MapFile::default().set_sector_size(sector_size).to_owned()
}
pub fn set_sector_size(&mut self, sector_size: u16) -> &mut Self {
self.sector_size = sector_size;
self
}
/// Recalculate cluster mappings.
pub fn update(&mut self, new_cluster: Cluster) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![Cluster::from(new_cluster.to_owned())];
for map_cluster in self.map.iter() {
let mut map_cluster = *map_cluster;
// If new_cluster doesn't start ahead and ends short, map_cluster is forgotten.
if new_cluster.domain.start < map_cluster.domain.start
&& new_cluster.domain.end < map_cluster.domain.end
{
/*
new_cluster overlaps the start of map_cluster,
but ends short of map_cluster end.
ACTION: Crop map_cluster to start at end of new_cluster.
*/
map_cluster.domain.start = new_cluster.domain.end;
new_map.push(map_cluster);
} else if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster starts within map_cluster domain.
ACTION: Crop
*/
let domain_end = map_cluster.domain.end;
// Crop current object.
map_cluster.domain.end = new_cluster.domain.start;
new_map.push(map_cluster);
if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster is within map_cluster.
ACTION: Crop & Fracture map_cluster
NOTE: Crop completed above.
*/
new_map.push(Cluster {
domain: Domain {
start: new_cluster.domain.end,
end: domain_end,
},
stage: map_cluster.stage.to_owned(),
});
}
} else {
/*
No overlap.
ACTION: Transfer
*/
new_map.push(map_cluster);
}
}
self.map = new_map;
self
}
/// Get current recovery stage.
pub fn get_stage(&self) -> Stage {
let mut recover_stage = Stage::Damaged;
for cluster in self.map.iter() {
match cluster.stage {
Stage::Untested => return Stage::Untested,
Stage::ForIsolation(_) => {
if recover_stage == Stage::Damaged || cluster.stage < recover_stage {
// Note that recover_stage after first condition is
// only ever Stage::ForIsolation(_), thus PartialEq,
// PartialOrd are useful for comparing the internal value.
recover_stage = cluster.stage
}
}
Stage::Damaged => (),
Stage::Intact => unreachable!(),
}
}
recover_stage
}
/// Get clusters of common stage.
pub fn get_clusters(&self, stage: Stage) -> Vec<Cluster> {
self.map
.iter()
.filter_map(|mc| {
if mc.stage == stage {
Some(mc.to_owned())
} else {
None
}
})
.collect()
}
/// Defragments cluster groups.
/// I.E. check forwards every cluster from current until stage changes,
/// then group at once.
pub fn defrag(&mut self) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![];
// Fetch first cluster.
let mut start_cluster = self.map.iter().find(|c| c.domain.start == 0).unwrap();
// Even though this would be initialized by its first read,
// the compiler won't stop whining, and idk how to assert that to it.
let mut end_cluster = Cluster::default();
let mut new_cluster: Cluster;
let mut stage_common: bool;
let mut is_finished = false;
while !is_finished {
stage_common = true;
// Start a new cluster based on the cluster following
// the end of last new_cluster.
new_cluster = start_cluster.to_owned();
// While stage is common, and not finished,
// find each trailing cluster.
while stage_common && !is_finished {
end_cluster = start_cluster.to_owned();
if end_cluster.domain.end != self.domain.end {
start_cluster = self
.map
.iter()
.find(|c| end_cluster.domain.end == c.domain.start)
.unwrap();
stage_common = new_cluster.stage == start_cluster.stage
} else {
is_finished = true;
}
}
// Set the new ending, encapsulating any clusters of common stage.
new_cluster.domain.end = end_cluster.domain.end;
new_map.push(new_cluster);
}
self.map = new_map;
self
}
}
pub fn write_map_to_file(file: File, map: &MapFile) -> ron::error::Result<String> {
ron::ser::to_string_pretty(
map,
ron::ser::PrettyConfig::new()
.new_line("\n".to_string())
.struct_names(true),
)
}

View File

@@ -1,187 +0,0 @@
pub mod cluster;
pub mod domain;
pub mod map;
use serde::{Deserialize, Serialize};
pub use cluster::Cluster;
pub use domain::Domain;
pub use map::MapFile;
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, PartialOrd)]
pub enum Stage {
Intact,
Untested,
ForIsolation(u8),
Damaged,
}
impl Default for Stage {
fn default() -> Self {
Stage::Untested
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test for Cluster::subdivide()
// Test for MapFile::update()
// Test for MapFile::get_stage()
#[test]
fn test_get_stage() {
use std::vec;
let mut mf = MapFile::default();
let mut mf_stage = mf.get_stage();
// If this fails here, there's something SERIOUSLY wrong.
assert!(
mf_stage == Stage::Untested,
"Determined stage to be {:?}, when {:?} was expeccted.",
mf_stage,
Stage::Untested
);
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
mf.map = vec![];
for stage in stages {
mf.map.push(*Cluster::default().set_stage(stage));
mf_stage = mf.get_stage();
assert!(
stage == mf_stage,
"Expected stage to be {:?}, determined {:?} instead.",
stage,
mf_stage
)
}
}
// Test for MapFile::get_clusters()
#[test]
fn test_get_clusters() {
let mut mf = MapFile::default();
mf.map = vec![
*Cluster::default().set_stage(Stage::Damaged),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
Cluster::default(),
Cluster::default(),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::Damaged),
];
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
for stage in stages {
let expected = vec![
*Cluster::default().set_stage(stage),
*Cluster::default().set_stage(stage),
];
let received = mf.get_clusters(stage);
assert!(
expected == received,
"Expected clusters {:?}, got {:?}.",
expected,
received
)
}
}
// Test for MapFile::defrag()
#[test]
fn test_defrag() {
let mut mf = MapFile {
sector_size: 1,
domain: Domain { start: 0, end: 8 },
map: vec![
Cluster {
domain: Domain { start: 0, end: 1 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 1, end: 2 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 2, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 4 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 4, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
],
};
let expected = vec![
Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
];
mf.defrag();
let received = mf.map;
assert!(
expected == received,
"Expected {:?} after defragging, got {:?}.",
expected,
received
)
}
}

View File

@@ -1,32 +1,46 @@
use std::fs::{File, OpenOptions}; use std::{
use std::io::{BufWriter, Read, Seek, SeekFrom, Write}; io::{BufReader, BufWriter},
use std::ptr::read; fs::File,
};
use crate::{
Args,
mapping::{Cluster, MapFile, Stage},
};
use crate::mapping::{Cluster, Domain, MapFile, Stage};
use crate::Args;
#[derive(Debug)] #[derive(Debug)]
#[allow(dead_code)]
pub struct Recover { pub struct Recover {
/// Buffer capacity in bytes. buf_capacity: usize,
buf_capacity: u64,
config: Args, config: Args,
input: BufReader<File>,
input: File,
output: BufWriter<File>, output: BufWriter<File>,
map: MapFile, map: MapFile,
stage: Stage, stage: Stage,
} }
impl Recover { impl Recover {
pub fn new(config: Args, input: File, output: File, map: MapFile, buf_capacity: u64) -> Self { pub fn new(
config: Args,
input: File,
output: File,
map: MapFile,
) -> Self {
let stage = map.get_stage(); let stage = map.get_stage();
// Temporarily make buffer length one sector.
let buf_capacity = config.sector_size as usize;
let mut r = Recover { let mut r = Recover {
buf_capacity, buf_capacity,
config, config,
input: input, input: BufReader::with_capacity(
output: BufWriter::with_capacity(buf_capacity as usize, output), buf_capacity,
input,
),
output: BufWriter::with_capacity(
buf_capacity,
output,
),
map, map,
stage: stage, stage: stage,
}; };
@@ -37,82 +51,27 @@ impl Recover {
} }
/// Recover media. /// Recover media.
pub fn run(&mut self) -> () { pub fn run(&mut self) -> &mut Self {
self.copy_untested();
/*
let mut is_finished = false; let mut is_finished = false;
while !is_finished { while !is_finished {
match self.map.get_stage() { match self.map.get_stage() {
Stage::Untested => { Stage::Untested => { self.copy_untested(); },
self.copy_untested(); Stage::ForIsolation(level) => { self.copy_isolate(level); },
}
Stage::ForIsolation(level) => {
self.copy_isolate(level);
}
Stage::Damaged => { Stage::Damaged => {
println!("Cannot recover further."); println!("Cannot recover further.");
is_finished = true is_finished = true
},
} }
} }
}
*/
// return recovered_bytes self
} }
/// Attempt to copy all untested blocks. /// Attempt to copy all untested blocks.
fn copy_untested(&mut self) -> &mut Self { fn copy_untested(&mut self) -> &mut Self {
let mut buf = vec![crate::FB_PAD_VALUE; self.buf_capacity as usize];
// Purely caching.
let mut read_position = 0_u64;
let last_read_position = crate::io::get_stream_length(&mut self.input)
.expect("Failed to get length of input stream")
- self.buf_capacity;
while read_position < last_read_position {
dbg!(read_position);
if let Err(err) = self.input.read_exact(&mut buf) {
println!("Hit error: {:?}", err);
self.input
.seek_relative(self.buf_capacity as i64)
.expect("Failed to seek input by buf_capacity to skip previous error");
} else {
self.output
.write_all(buf.as_slice())
.expect("Failed to write data to output file");
self.map.update(Cluster {
domain: Domain {
start: read_position as usize,
end: (read_position + self.buf_capacity) as usize,
},
stage: Stage::Intact,
});
}
read_position += self.buf_capacity;
}
crate::mapping::map::write_map_to_file(
OpenOptions::new()
.create(true)
.write(true)
.open(crate::get_path(
&self.config.map,
self.config.input.to_str().unwrap(),
"map",
))
.expect("Failed to open map file"),
&self.map,
)
.expect("Failed to write map file");
/*
let mut untested: Vec<Cluster> = vec![]; let mut untested: Vec<Cluster> = vec![];
for cluster in self.map.get_clusters(Stage::Untested).iter_mut() { for cluster in self.map.get_clusters(Stage::Untested).iter_mut() {
@@ -121,13 +80,12 @@ impl Recover {
todo!("Read and save data."); todo!("Read and save data.");
*/
self self
} }
/// Attempt to copy blocks via isolation at pass level. /// Attempt to copy blocks via isolation at pass level.
fn copy_isolate(&mut self, level: u8) -> &mut Self { fn copy_isolate(&mut self, level: u8) -> &mut Self {
todo!(); todo!();
self self
@@ -136,12 +94,13 @@ impl Recover {
/// Set buffer capacities as cluster length in bytes. /// Set buffer capacities as cluster length in bytes.
/// Varies depending on the recovery stage. /// Varies depending on the recovery stage.
fn set_buf_capacity(&mut self) -> &mut Self { fn set_buf_capacity(&mut self) -> &mut Self {
self.buf_capacity = self.config.sector_size as u64 * self.config.cluster_length as u64; self.buf_capacity = (self.config.sector_size * self.config.cluster_length) as usize;
self self
} }
} }
#[cfg(test)] #[cfg(test)]
#[allow(unused)] #[allow(unused)]
mod tests { mod tests {