CLeanup, and get domain overlap and mapping adjustment working.

This commit is contained in:
Cutieguwu
2025-12-31 11:07:24 -05:00
parent 4b5460f754
commit c28fee9f82
9 changed files with 662 additions and 140 deletions

View File

@@ -1,6 +1,8 @@
use std::io::{self, Seek, SeekFrom}; use std::io::{self, Seek, SeekFrom};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use anyhow::Context;
/// Get length of data stream. /// Get length of data stream.
/// Physical length of data stream in bytes /// Physical length of data stream in bytes
/// (multiple of sector_size, rather than actual). /// (multiple of sector_size, rather than actual).
@@ -17,20 +19,26 @@ pub fn get_stream_length<S: Seek>(stream: &mut S) -> io::Result<u64> {
/// Generates a file path if one not provided. /// Generates a file path if one not provided.
/// source_name for fallback name. /// source_name for fallback name.
pub fn get_path<P>(file_path: &Option<P>, source_name: &P, extension: &str) -> Option<PathBuf> pub fn get_path<P>(
file_path: &Option<P>,
source_name: &P,
extension: &str,
) -> anyhow::Result<PathBuf>
where where
P: AsRef<Path>, P: AsRef<Path>,
{ {
if let Some(f) = file_path { if let Some(f) = file_path {
return f.as_ref().to_path_buf().into(); return Ok(f.as_ref().to_path_buf());
} }
PathBuf::from(format!( Ok(PathBuf::from(format!(
"{:?}.{}", "{:?}.{}",
source_name.as_ref().to_str()?, source_name
.as_ref()
.to_str()
.context("source_name path was not UTF-8 valid.")?,
extension extension
)) ))
.as_path() .as_path()
.to_owned() .to_owned())
.into()
} }

View File

@@ -18,10 +18,6 @@ const FB_PAD_VALUE: u8 = 0;
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let config = Args::parse(); let config = Args::parse();
// Live with it, prefer to use expect() here.
// I'm lazy and don't want to mess around with comparing error types.
// Thus, any error in I/O here should be treated as fatal.
let mut input: File = { let mut input: File = {
let input_path = &config.input.as_path(); let input_path = &config.input.as_path();
@@ -44,37 +40,23 @@ fn main() -> anyhow::Result<()> {
.with_context(|| format!("Failed to open/create output file at: {}", path.display()))? .with_context(|| format!("Failed to open/create output file at: {}", path.display()))?
}; };
/*
let map: MapFile = { let map: MapFile = {
let path = get_path( let path = crate::io::get_path(&config.output, &config.input, "map")
&config.output, .context("Failed to generate map path.")?;
&config
.input
.to_str()
.expect("Input path is not UTF-8 valid"),
"map",
);
let file = OpenOptions::new() MapFile::try_from(
OpenOptions::new()
.read(true) .read(true)
.create(true) .create(true)
.open(path) .open(&path)
.expect("Failed to open/create mapping file"); .with_context(|| {
format!("Failed to open/create mapping file at: {}", path.display())
if let Ok(map) = MapFile::try_from(file) { })?,
map )
} else { .unwrap_or(MapFile::new(config.sector_size))
MapFile::new(config.sector_size)
}
}; };
*/
let mut recover_tool = { let mut recover_tool = Recover::new(&config, &mut input, output, map)?;
let buf_capacity = crate::io::get_stream_length(&mut input)
.context("Failed to get buffer capacity from input")?;
Recover::new(&config, input, output, MapFile::default(), buf_capacity)
};
recover_tool.run(); recover_tool.run();

View File

@@ -3,7 +3,7 @@ use super::{Domain, Stage};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// A map for data stored in memory for processing and saving to disk. /// A map for data stored in memory for processing and saving to disk.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Cluster { pub struct Cluster {
pub domain: Domain, pub domain: Domain,
pub stage: Stage, pub stage: Stage,

View File

@@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
/// Domain, in sectors. /// Domain, in sectors.
/// Requires sector_size to be provided elsewhere for conversion to bytes. /// Requires sector_size to be provided elsewhere for conversion to bytes.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Domain { pub struct Domain {
pub start: usize, pub start: usize,
pub end: usize, pub end: usize,
@@ -19,6 +19,34 @@ impl Domain {
pub fn len(self) -> usize { pub fn len(self) -> usize {
self.end - self.start self.end - self.start
} }
/// Returns the type of overlap between this domain and another.
pub fn overlap(&self, other: Domain) -> DomainOverlap {
if self.end <= other.start || other.end <= self.start {
// Cases 7, 8, 12, and 13 of map::tests::test_update
DomainOverlap::None
} else if other.start >= self.start && other.end <= self.end {
// Cases 3, 5, 9, and 11 of map::tests::test_update
DomainOverlap::SelfEngulfsOther
} else if other.start <= self.start && other.end >= self.end {
// Cases 4, 6, and 10 of map::tests::test_update
DomainOverlap::OtherEngulfsSelf
} else if self.start < other.start {
// Case 1 of map::tests::test_update
DomainOverlap::OtherOverlapsEnd
} else {
// Case 2 of map::tests::test_update
DomainOverlap::OtherOverlapsStart
}
}
}
pub enum DomainOverlap {
None,
SelfEngulfsOther,
OtherEngulfsSelf,
OtherOverlapsStart,
OtherOverlapsEnd,
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,7 +1,7 @@
use std::fs::File; use std::fs::File;
use std::io::Write; use std::io::Write;
use super::{Cluster, Domain, Stage}; use super::{Cluster, Domain, DomainOverlap, Stage};
use anyhow; use anyhow;
use ron::de::from_reader; use ron::de::from_reader;
@@ -47,66 +47,32 @@ impl MapFile {
} }
/// Recalculate cluster mappings. /// Recalculate cluster mappings.
pub fn update(&mut self, new_cluster: Cluster) -> &mut Self { pub fn update(&mut self, new: Cluster) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![Cluster::from(new_cluster.to_owned())]; let mut map: Vec<Cluster> = vec![Cluster::from(new.clone())];
for map_cluster in self.map.iter() { for old in self.map.iter() {
let mut map_cluster = *map_cluster; let mut old = *old;
// If new_cluster doesn't start ahead and ends short, map_cluster is forgotten. match new.domain.overlap(old.domain) {
if new_cluster.domain.start < map_cluster.domain.start DomainOverlap::None => map.push(old),
&& new_cluster.domain.end < map_cluster.domain.end DomainOverlap::SelfEngulfsOther => (),
{ DomainOverlap::OtherEngulfsSelf => {
/* other_engulfs_self_update(new, &mut old, &mut map)
new_cluster overlaps the start of map_cluster,
but ends short of map_cluster end.
ACTION: Crop map_cluster to start at end of new_cluster.
*/
map_cluster.domain.start = new_cluster.domain.end;
new_map.push(map_cluster);
} else if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster starts within map_cluster domain.
ACTION: Crop
*/
let domain_end = map_cluster.domain.end;
// Crop current object.
map_cluster.domain.end = new_cluster.domain.start;
new_map.push(map_cluster);
if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster is within map_cluster.
ACTION: Crop & Fracture map_cluster
NOTE: Crop completed above.
*/
new_map.push(Cluster {
domain: Domain {
start: new_cluster.domain.end,
end: domain_end,
},
stage: map_cluster.stage.to_owned(),
});
} }
} else { DomainOverlap::OtherOverlapsEnd => {
/* // Case 1
No overlap. old.domain.start = new.domain.end;
map.push(old);
ACTION: Transfer
*/
new_map.push(map_cluster);
} }
DomainOverlap::OtherOverlapsStart => {
// Case 2
old.domain.end = new.domain.start;
map.push(old);
}
};
} }
self.map = new_map; self.map = map;
self self
} }
@@ -126,7 +92,7 @@ impl MapFile {
} }
} }
Stage::Damaged => (), Stage::Damaged => (),
Stage::Intact => unreachable!(), Stage::Intact => (),
} }
} }
@@ -197,6 +163,29 @@ impl MapFile {
self.map = new_map; self.map = new_map;
self self
} }
/// Extend the domain of the MapFile.
/// Returns None if the domain cannot be changed or is unchanged.
/// Returns the delta of the previous domain end and the new end.
pub fn extend(&mut self, end: usize) -> Option<usize> {
if end <= self.domain.end {
return None;
}
let old_end = self.domain.end;
let delta = end - old_end;
self.domain.end = end;
// Add new data as untested.
self.update(Cluster {
domain: Domain {
start: old_end,
end: self.domain.end,
},
..Default::default()
});
Some(delta)
}
} }
pub fn write_map_to_file(file: &mut File, map: &MapFile) -> anyhow::Result<usize> { pub fn write_map_to_file(file: &mut File, map: &MapFile) -> anyhow::Result<usize> {
@@ -213,17 +202,522 @@ pub fn write_map_to_file(file: &mut File, map: &MapFile) -> anyhow::Result<usize
Ok(written_bytes) Ok(written_bytes)
} }
fn other_engulfs_self_update(new: Cluster, old: &mut Cluster, map: &mut Vec<Cluster>) {
if new.domain.start == old.domain.start {
// Case 6 of map::tests::test_update
old.domain.start = new.domain.end;
} else {
// Case 4 and part of 10
let old_end = old.domain.end;
old.domain.end = new.domain.start;
if new.domain.end != old_end {
// Case 10 of map::tests::test_update
map.push(Cluster {
domain: Domain {
start: new.domain.end,
end: old_end,
},
stage: old.stage,
})
}
}
map.push(old.to_owned())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashSet;
use super::*; use super::*;
// Test for MapFile::update() /// Test for MapFile::update()
// Test for MapFile::get_stage()
#[test] #[test]
fn test_get_stage() { fn update_1_new_overlaps_start() {
use std::vec; // Case 1:
// |----new----|
// |----old----|
//
// | --> |-old-|
// Solution: old.start = new.end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_2_new_overlaps_end() {
// Case 2:
// |----new----|
// |----old----|
//
// |-old-| <-- |
// Solution: old.end = new.start
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_3_new_engulfs_common_end() {
// Case 3:
// |----new----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_4_old_engulfs_common_end() {
// Case 4:
// |--new--|
// |-----old-----|
//
// |-old-| <---- |
// Solution: old.end = new.start
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_5_new_engulfs_common_start() {
// Case 5:
// |-----new----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_6_old_engulfs_common_start() {
// Case 6:
// |--new--|
// |-----old-----|
//
// | ----> |-old-|
// Solution: old.start = new.end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_7_new_precedes() {
// Case 7:
// |--new--|
// |--old--|
//
// Solution: Leave unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_8_new_trails() {
// Case 8:
// |--new--|
// |--old--|
// Solution: Leave unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_9_new_engulfs() {
// Case 9:
// |-----new-----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_10_old_engulfs() {
// Case 10:
// |--new--|
// |--------------old--------------|
//
// |----old----| <---- |
// + |--fracture-|
// Solution: old.end = new.start
// && fracture:
// with fracture.start = new.end
// && fracture.end = old.original_end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_11_common_start_and_end() {
// Case 11:
// |--new--|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Intact,
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Intact
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_12_new_out_of_range_preceding() {
// Case 12:
// |--new--|
// |--old--|
//
// Solution: Leave Unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_13_new_out_of_range_trailing() {
// Case 13:
// |--new--|
// |--old--|
//
// Solution: Leave Unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::get_stage()
#[test]
fn get_stage() {
let mut mf = MapFile::default(); let mut mf = MapFile::default();
let mut mf_stage = mf.get_stage(); let mut mf_stage = mf.get_stage();
@@ -258,9 +752,9 @@ mod tests {
} }
} }
// Test for MapFile::get_clusters() /// Test for MapFile::get_clusters()
#[test] #[test]
fn test_get_clusters() { fn get_clusters() {
let mut mf = MapFile::default(); let mut mf = MapFile::default();
mf.map = vec![ mf.map = vec![
@@ -297,9 +791,9 @@ mod tests {
} }
} }
// Test for MapFile::defrag() /// Test for MapFile::defrag()
#[test] #[test]
fn test_defrag() { fn defrag() {
let mut mf = MapFile { let mut mf = MapFile {
sector_size: 1, sector_size: 1,
domain: Domain { start: 0, end: 8 }, domain: Domain { start: 0, end: 8 },

View File

@@ -7,6 +7,6 @@ pub mod prelude;
pub mod stage; pub mod stage;
pub use cluster::Cluster; pub use cluster::Cluster;
pub use domain::Domain; pub use domain::{Domain, DomainOverlap};
pub use map::MapFile; pub use map::MapFile;
pub use stage::Stage; pub use stage::Stage;

View File

@@ -1,6 +1,6 @@
#![allow(unused_imports)] #![allow(unused_imports)]
pub use super::cluster::Cluster; pub use super::cluster::Cluster;
pub use super::domain::Domain; pub use super::domain::{Domain, DomainOverlap};
pub use super::map::{write_map_to_file, MapFile}; pub use super::map::{write_map_to_file, MapFile};
pub use super::stage::Stage; pub use super::stage::Stage;

View File

@@ -1,11 +1,11 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, PartialOrd)] #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub enum Stage { pub enum Stage {
Intact,
Untested, Untested,
ForIsolation(u8), ForIsolation(u8),
Damaged, Damaged,
Intact,
} }
impl Default for Stage { impl Default for Stage {

View File

@@ -9,42 +9,47 @@ use crate::mapping::prelude::*;
#[derive(Debug)] #[derive(Debug)]
#[allow(dead_code)] #[allow(dead_code)]
pub struct Recover<'a> { pub struct Recover<'a> {
/// Buffer capacity in bytes.
buf_capacity: u64,
config: &'a Args, config: &'a Args,
stream_len: u64,
input: File, buf: Vec<u8>,
input: &'a mut File,
output: BufWriter<File>, output: BufWriter<File>,
map: MapFile, map: MapFile,
stage: Stage,
} }
impl<'a> Recover<'a> { impl<'a> Recover<'a> {
pub fn new( pub fn new(
config: &'a Args, config: &'a Args,
input: File, input: &'a mut File,
output: File, output: File,
map: MapFile, map: MapFile,
buf_capacity: u64, ) -> anyhow::Result<Self> {
) -> Self { let stream_len =
let stage = map.get_stage(); crate::io::get_stream_length(input).context("Failed to get input stream length.")?;
let mut r = Recover { let mut r = Recover {
buf_capacity,
config, config,
input: input, stream_len,
output: BufWriter::with_capacity(buf_capacity as usize, output), buf: vec![],
input,
output: BufWriter::with_capacity(stream_len as usize, output),
map, map,
stage: stage,
}; };
// Ensure that buffer capacity is adjusted based on progress. r.restore();
r.set_buf_capacity();
r Ok(r)
} }
/// Recover media. /// Recover media.
pub fn run(&mut self) -> () { pub fn run(&mut self) -> () {
// From start, read to end or error.
//
// If all data recovered, return early.
// Else, read from end to error.
self.copy_untested(); self.copy_untested();
/* /*
@@ -70,40 +75,47 @@ impl<'a> Recover<'a> {
// return recovered_bytes // return recovered_bytes
} }
/// Restore current progress based on MapFile.
/// Also updates MapFile if needed, such as to extend the MapFile domain.
pub fn restore(&mut self) {
self.map.extend(self.stream_len as usize);
}
/// Attempt to copy all untested blocks. /// Attempt to copy all untested blocks.
fn copy_untested(&mut self) -> anyhow::Result<()> { fn copy_untested(&mut self) -> anyhow::Result<()> {
let mut buf = vec![crate::FB_PAD_VALUE; self.buf_capacity as usize]; // Caching.
let buf_capacity = self.get_buf_capacity();
// Purely caching. self.buf = vec![crate::FB_PAD_VALUE; buf_capacity as usize];
// Yet more caching.
let mut read_position = 0_u64; let mut read_position = 0_u64;
let last_read_position = crate::io::get_stream_length(&mut self.input) let final_read_position = self.stream_len - buf_capacity;
.context("Failed to get length of input stream")?
- self.buf_capacity;
while read_position < last_read_position { while read_position < final_read_position {
if let Err(err) = self.input.read_exact(&mut buf) { if let Err(err) = self.input.read_exact(&mut self.buf) {
println!("Hit error: {:?}", err); println!("Hit error: {:?}", err);
self.input self.input
.seek_relative(self.buf_capacity as i64) .seek_relative(buf_capacity as i64)
.context("Failed to seek input by buf_capacity to skip previous error")?; .context("Failed to seek input by buf_capacity to skip previous error")?;
} else { } else {
self.output self.output
.write_all(buf.as_slice()) .write_all(self.buf.as_slice())
.context("Failed to write data to output file")?; .context("Failed to write data to output file")?;
self.map.update(Cluster { self.map.update(Cluster {
domain: Domain { domain: Domain {
start: read_position as usize, start: read_position as usize,
end: (read_position + self.buf_capacity) as usize, end: (read_position + buf_capacity) as usize,
}, },
stage: Stage::Intact, stage: Stage::Intact,
}); });
} }
read_position += self.buf_capacity; read_position += buf_capacity;
} }
crate::mapping::map::write_map_to_file( write_map_to_file(
{ {
let map_path = crate::io::get_path(&self.config.map, &self.config.input, "map") let map_path = crate::io::get_path(&self.config.map, &self.config.input, "map")
.context("Failed to generate map path.")?; .context("Failed to generate map path.")?;
@@ -125,10 +137,8 @@ impl<'a> Recover<'a> {
/// Set buffer capacity as cluster length in bytes. /// Set buffer capacity as cluster length in bytes.
/// Varies depending on the recovery stage. /// Varies depending on the recovery stage.
fn set_buf_capacity(&mut self) -> &mut Self { fn get_buf_capacity(&mut self) -> u64 {
self.buf_capacity = self.config.sector_size as u64 * self.config.cluster_length as u64; self.config.sector_size as u64 * self.config.cluster_length as u64
self
} }
} }