12 Commits

Author SHA1 Message Date
Cutieguwu
2da0ab11e5 Tons of cleanup. More useless git notices. 2025-12-31 17:22:57 -05:00
Cutieguwu
ae3b5d8855 Major fixes. Make git history even messier. 2025-12-31 15:27:41 -05:00
Cutieguwu
43454d1c8a Cleanup. 2025-12-31 11:10:20 -05:00
Cutieguwu
c2decbc1ba Cleanup. 2025-12-31 11:08:35 -05:00
Cutieguwu
c28fee9f82 CLeanup, and get domain overlap and mapping adjustment working. 2025-12-31 11:07:24 -05:00
Cutieguwu
4b5460f754 Huge refactor. Introduce anyhow for error handling. 2025-12-29 15:31:01 -05:00
Cutieguwu
e98383d9e5 Delete dependabot.yml 2025-12-29 12:56:15 -05:00
Olivia Brooks
e08e2a0017 Rework reading from device and clean up. 2025-12-29 11:02:13 -05:00
Olivia Brooks
824d01be95 Clean up and reformat. 2025-12-26 16:51:44 -05:00
Olivia Brooks
3d1273981c Merge pull request #12 from Cutieguwu/insert-license
Create LICENSE
2025-03-25 08:17:26 -04:00
Olivia Brooks
45566784a5 Create LICENSE 2025-03-25 08:17:10 -04:00
Olivia Brooks
c2767c6547 Merge pull request #10 from Cutieguwu/rapid-dev
Merge rapid-dev edits into Main.
2025-03-11 18:25:19 -04:00
17 changed files with 1416 additions and 1115 deletions

View File

@@ -1,11 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "daily"

475
Cargo.lock generated
View File

@@ -3,14 +3,20 @@
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.3"
name = "addr2line"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"memchr",
"gimli",
]
[[package]]
name = "adler2"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "anstyle"
version = "1.0.10"
@@ -19,23 +25,26 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "anyhow"
version = "1.0.96"
version = "1.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4"
[[package]]
name = "arc-swap"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
[[package]]
name = "base62"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10e52a7bcb1d6beebee21fb5053af9e3cbb7a7ed1a4909e534040e676437ab1f"
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
dependencies = [
"rustversion",
"backtrace",
]
[[package]]
name = "backtrace"
version = "0.3.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
"windows-targets",
]
[[package]]
@@ -44,12 +53,6 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.8.0"
@@ -60,14 +63,10 @@ dependencies = [
]
[[package]]
name = "bstr"
version = "1.11.3"
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0"
dependencies = [
"memchr",
"serde",
]
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "clap"
@@ -109,77 +108,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
name = "gimli"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "either"
version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d"
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "glob"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
[[package]]
name = "globset"
version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19"
dependencies = [
"aho-corasick",
"bstr",
"log",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "globwalk"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93e3af942408868f6934a7b85134a3230832b9977cf66125df2f9edcfce4ddcc"
dependencies = [
"bitflags 1.3.2",
"ignore",
"walkdir",
]
[[package]]
name = "hashbrown"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "heck"
@@ -187,106 +119,45 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "ignore"
version = "0.4.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b"
dependencies = [
"crossbeam-deque",
"globset",
"log",
"memchr",
"regex-automata",
"same-file",
"walkdir",
"winapi-util",
]
[[package]]
name = "indexmap"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "itertools"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "kramer"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"libc",
"ron",
"rust-i18n",
"serde",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.171"
version = "0.2.177"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
[[package]]
name = "libyml"
version = "0.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3302702afa434ffa30847a83305f0a69d6abd74293b6554c18ec85c7ef30c980"
dependencies = [
"anyhow",
"version_check",
]
[[package]]
name = "log"
version = "0.4.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e"
checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
[[package]]
name = "memchr"
version = "2.7.4"
version = "2.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
[[package]]
name = "normpath"
version = "1.3.0"
name = "miniz_oxide"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8911957c4b1549ac0dc74e30db9c8b0e66ddcd6d7acc33098f4c63a64a6d7ed"
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
dependencies = [
"windows-sys",
"adler2",
]
[[package]]
name = "once_cell"
version = "1.20.3"
name = "object"
version = "0.36.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e"
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
"memchr",
]
[[package]]
name = "proc-macro2"
@@ -306,35 +177,6 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "ron"
version = "0.8.1"
@@ -342,85 +184,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94"
dependencies = [
"base64",
"bitflags 2.8.0",
"bitflags",
"serde",
"serde_derive",
]
[[package]]
name = "rust-i18n"
version = "3.1.3"
name = "rustc-demangle"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71b3a6e1c6565b77c86d868eea3068b0eb39582510f9c78cfbd5c67bd36fda9b"
dependencies = [
"globwalk",
"once_cell",
"regex",
"rust-i18n-macro",
"rust-i18n-support",
"smallvec",
]
[[package]]
name = "rust-i18n-macro"
version = "3.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6180d8506af2b485ffc1eab7fc6d15678336a694f2b5efac5f2ca78c52928275"
dependencies = [
"glob",
"once_cell",
"proc-macro2",
"quote",
"rust-i18n-support",
"serde",
"serde_json",
"serde_yml",
"syn",
]
[[package]]
name = "rust-i18n-support"
version = "3.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938f16094e2b09e893b1f85c9da251739a832d4272a5957217977da3a0713bb6"
dependencies = [
"arc-swap",
"base62",
"globwalk",
"itertools",
"lazy_static",
"normpath",
"once_cell",
"proc-macro2",
"regex",
"serde",
"serde_json",
"serde_yml",
"siphasher",
"toml",
"triomphe",
]
[[package]]
name = "rustversion"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
[[package]]
name = "ryu"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f"
[[package]]
name = "serde"
@@ -442,60 +215,6 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.139"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
[[package]]
name = "serde_yml"
version = "0.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd"
dependencies = [
"indexmap",
"itoa",
"libyml",
"memchr",
"ryu",
"serde",
"version_check",
]
[[package]]
name = "siphasher"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "smallvec"
version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "strsim"
version = "0.11.1"
@@ -513,91 +232,12 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "toml"
version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.22.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "triomphe"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85"
dependencies = [
"arc-swap",
"serde",
"stable_deref_trait",
]
[[package]]
name = "unicode-ident"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
@@ -661,12 +301,3 @@ name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winnow"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1"
dependencies = [
"memchr",
]

View File

@@ -10,12 +10,18 @@ edition = "2021"
#
# For clap info, see [dependencies.clap]
# For serde info, see [dependencies.serde]
libc = "0.2.171, ~0.2.169"
ron = "0.8.1, >=0.8, <0.9"
rust-i18n = "3.1.3, ~3.1.3"
#rust-i18n = "3.1.3"
[dependencies.anyhow]
version = "1.0"
features = [
"backtrace",
]
[dependencies.clap]
version = "4.5, ~4.5.27"
version = "4.5"
default-features = false
features = [
# From default features collection
@@ -30,5 +36,5 @@ features = [
]
[dependencies.serde]
version = "1.0.219, ~1.0.217"
version = "1.0.219"
features = ["derive"]

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Olivia Bridie Alexandria Millicent Ivette Brooks
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

17
imports_order.rs Normal file
View File

@@ -0,0 +1,17 @@
// Acknowledge sister/child
mod module;
// std
use std::*;
// sister/child
use module1::*;
// parent
use super::*;
// ancestor of parent
use crate::*;
// external
use external::*;

42
src/cli.rs Normal file
View File

@@ -0,0 +1,42 @@
use std::path::PathBuf;
use std::sync::LazyLock;
use crate::FB_SECTOR_SIZE;
use clap::{ArgAction, Parser};
pub static CONFIG: LazyLock<Args> = LazyLock::new(|| Args::parse());
#[derive(Parser, Debug, Clone)]
pub struct Args {
/// Path to source file or block device
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
pub input: PathBuf,
/// Path to output file. Defaults to {input}.iso
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
pub output: Option<PathBuf>,
/// Path to rescue map. Defaults to {input}.map
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
pub map: Option<PathBuf>,
/// Max number of consecutive sectors to test as a group
#[arg(short, long, default_value_t = 128)]
pub cluster_length: usize,
/// Number of brute force read passes
#[arg(short, long, default_value_t = 2)]
pub brute_passes: usize,
/// Sector size
#[arg(short, long, default_value_t = FB_SECTOR_SIZE)]
pub sector_size: usize,
// Behaviour is backwards.
// ArgAction::SetFalse by default evaluates to true,
// ArgAction::SetTrue by default evaluates to false.
/// Whether to reopen the file on a read error or not.
#[arg(short, long, action=ArgAction::SetTrue)]
pub reopen_on_error: bool,
}

67
src/io.rs Normal file
View File

@@ -0,0 +1,67 @@
use std::fs::{File, OpenOptions};
use std::io::{self, Seek, SeekFrom};
use crate::cli::CONFIG;
use anyhow::Context;
/// Get length of data stream.
/// Physical length of data stream in bytes
/// (multiple of sector_size, rather than actual).
///
/// This will attempt to return the stream to its current read position.
pub fn get_stream_length<S: Seek>(stream: &mut S) -> io::Result<u64> {
let pos = stream.stream_position()?;
let len = stream.seek(SeekFrom::End(0));
stream.seek(SeekFrom::Start(pos))?;
len
}
pub fn load_input() -> anyhow::Result<File> {
OpenOptions::new()
.read(true)
.open(&CONFIG.input)
.with_context(|| format!("Failed to open input file: {}", &CONFIG.input.display()))
}
pub fn load_output() -> anyhow::Result<File> {
OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(crate::path::OUTPUT_PATH.clone())
.with_context(|| {
format!(
"Failed to open/create output file at: {}",
crate::path::OUTPUT_PATH.display()
)
})
}
pub fn load_map_read() -> anyhow::Result<File> {
OpenOptions::new()
.read(true)
.open(crate::path::MAP_PATH.clone())
.with_context(|| {
format!(
"Failed to open/create mapping file at: {}",
crate::path::MAP_PATH.display()
)
})
}
pub fn load_map_write() -> anyhow::Result<File> {
OpenOptions::new()
.write(true)
.create(true)
.truncate(true) // Wipe old map. Should really make a backup first.
.open(crate::path::MAP_PATH.clone())
.with_context(|| {
format!(
"Failed to open map file at: {}",
crate::path::MAP_PATH.display()
)
})
}

View File

@@ -1,176 +1,19 @@
mod recovery;
mod cli;
mod io;
mod mapping;
mod path;
mod recovery;
use clap::Parser;
use libc::O_DIRECT;
use mapping::MapFile;
use recovery::Recover;
use std::{
fs::{File, OpenOptions},
io::{self, Seek, SeekFrom},
os::unix::fs::OpenOptionsExt,
path::PathBuf,
};
use anyhow;
const FB_SECTOR_SIZE: u16 = 2048;
const FB_SECTOR_SIZE: usize = 2048;
const FB_NULL_VALUE: u8 = 0;
fn main() -> anyhow::Result<()> {
let mut recover_tool = Recover::new()?;
recover_tool.run()?;
#[derive(Parser, Debug)]
struct Args {
/// Path to source file or block device
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
input: PathBuf,
/// Path to output file. Defaults to {input}.iso
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
output: Option<PathBuf>,
/// Path to rescue map. Defaults to {input}.map
#[arg(short, long, value_hint = clap::ValueHint::DirPath)]
map: Option<PathBuf>,
/// Max number of consecutive sectors to test as a group
#[arg(short, long, default_value_t = 128)]
cluster_length: u16,
/// Number of brute force read passes
#[arg(short, long, default_value_t = 2)]
brute_passes: usize,
/// Sector size
#[arg(short, long, default_value_t = FB_SECTOR_SIZE)]
sector_size: u16,
}
fn main() {
let config = Args::parse();
// Live with it, prefer to use expect() here.
// I'm lazy and don't want to mess around with comparing error types.
// Thus, any error in I/O here should be treated as fatal.
let mut input: File = {
match OpenOptions::new()
.custom_flags(O_DIRECT)
.read(true)
.write(false)
.append(false)
.create(false)
.open(&config.input.as_path())
{
Ok(f) => f,
Err(err) => panic!("Failed to open input file: {:?}", err)
}
};
let mut output: File = {
// Keep this clean, make a short-lived binding.
let path = get_path(
&config.output,
&config.input.to_str().unwrap(),
"iso"
);
match OpenOptions::new()
.custom_flags(O_DIRECT)
.read(true)
.write(true)
.create(true)
.open(path)
{
Ok(f) => f,
Err(err) => panic!("Failed to open/create output file. {:?}", err)
}
};
// Check if output file is shorter than input.
// If so, autoextend the output file.
{
let input_len = get_stream_length(&mut input)
.expect("Failed to get the length of the input data.");
let output_len = get_stream_length(&mut output)
.expect("Failed to get the length of the output file.");
if output_len < input_len {
output.set_len(input_len)
.expect("Failed to autofill output file.")
}
}
let map: MapFile = {
let path = get_path(
&config.output,
&config.input.to_str().unwrap(),
"map"
);
let file = match OpenOptions::new()
.read(true)
.create(true)
.open(path)
{
Ok(f) => f,
Err(err) => panic!("Failed to open/create mapping file. {:?}", err)
};
if let Ok(map) = MapFile::try_from(file) {
map
} else {
MapFile::new(config.sector_size)
}
};
let mut recover_tool = Recover::new(config, input, output, map);
recover_tool.run();
todo!("Recovery, Map saving, and closure of all files.");
}
/// Generates a file path if one not provided.
/// source_name for fallback name.
fn get_path(
output: &Option<PathBuf>,
source_name: &str,
extention: &str
) -> PathBuf {
if let Some(f) = output {
f.to_owned()
} else {
PathBuf::from(format!(
"{:?}.{}",
source_name,
extention,
))
.as_path()
.to_owned()
}
}
/// Get length of data stream.
/// Physical length of data stream in bytes
/// (multiple of sector_size, rather than actual).
fn get_stream_length<S: Seek>(input: &mut S) -> io::Result<u64> {
let len = input.seek(SeekFrom::End(0))?;
let _ = input.seek(SeekFrom::Start(0));
Ok(len)
}
#[cfg(test)]
#[allow(unused)]
mod tests {
use super::*;
// Test for get_path
// Need to determine how to package files to test with, or at least
// how to test with PathBuf present.
// Test must also check unwrapping of file name, not just generation.
// Test for get_stream_length
// Need to determine how to test with Seek-able objects.
Ok(())
}

View File

@@ -1,446 +0,0 @@
use ron::de::{from_reader, SpannedError};
use serde::Deserialize;
use std::fs::File;
use crate::FB_SECTOR_SIZE;
/// Domain, in sectors.
/// Requires sector_size to be provided elsewhere for conversion to bytes.
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
pub struct Domain {
pub start: usize,
pub end: usize,
}
impl Default for Domain {
fn default() -> Self {
Domain { start: 0, end: 1 }
}
}
impl Domain {
/// Return length of domain in sectors.
pub fn len(self) -> usize {
self.end - self.start
}
}
/// A map for data stored in memory for processing and saving to disk.
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
pub struct Cluster {
domain: Domain,
stage: Stage,
}
impl Default for Cluster {
fn default() -> Self {
Cluster {
domain: Domain::default(),
stage: Stage::default()
}
}
}
impl Cluster {
/// Breaks apart into a vec of clusters,
/// each of cluster_size, excepting last.
pub fn subdivide(&mut self, cluster_len: usize) -> Vec<Cluster> {
let domain_len = self.domain.len();
let mut start = self.domain.start;
let mut clusters: Vec<Cluster> = vec![];
for _ in 0..(domain_len as f64 / cluster_len as f64).floor() as usize {
clusters.push(Cluster {
domain: Domain {
start,
end: start + cluster_len,
},
stage: self.stage,
});
start += cluster_len;
}
clusters.push(Cluster {
domain: Domain {
start,
end: self.domain.end,
},
stage: self.stage,
});
clusters
}
pub fn set_stage(&mut self, stage: Stage) -> &mut Self {
self.stage = stage;
self
}
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, PartialOrd)]
pub enum Stage {
Untested,
ForIsolation(u8),
Damaged,
}
impl Default for Stage {
fn default() -> Self {
Stage::Untested
}
}
#[derive(Clone, Debug, Deserialize, PartialEq)]
pub struct MapFile {
pub sector_size: u16,
pub domain: Domain,
pub map: Vec<Cluster>,
}
impl TryFrom<File> for MapFile {
type Error = SpannedError;
fn try_from(file: File) -> Result<Self, Self::Error> {
from_reader(file)
}
}
impl Default for MapFile {
fn default() -> Self {
MapFile {
sector_size: FB_SECTOR_SIZE,
domain: Domain::default(),
map: vec![Cluster {
domain: Domain::default(),
stage: Stage::Untested,
}],
}
}
}
impl MapFile {
pub fn new(sector_size: u16) -> Self {
MapFile::default()
.set_sector_size(sector_size)
.to_owned()
}
pub fn set_sector_size(&mut self, sector_size: u16) -> &mut Self {
self.sector_size = sector_size;
self
}
/// Recalculate cluster mappings.
fn update(&mut self, new_cluster: Cluster) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![Cluster::from(new_cluster.to_owned())];
for map_cluster in self.map.iter() {
let mut map_cluster = *map_cluster;
// If new_cluster doesn't start ahead and ends short, map_cluster is forgotten.
if new_cluster.domain.start < map_cluster.domain.start
&& new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster overlaps the start of map_cluster,
but ends short of map_cluster end.
ACTION: Crop map_cluster to start at end of new_cluster.
*/
map_cluster.domain.start = new_cluster.domain.end;
new_map.push(map_cluster);
} else if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster starts within map_cluster domain.
ACTION: Crop
*/
let domain_end = map_cluster.domain.end;
// Crop current object.
map_cluster.domain.end = new_cluster.domain.start;
new_map.push(map_cluster);
if new_cluster.domain.end < map_cluster.domain.end {
/*
new_cluster is within map_cluster.
ACTION: Crop & Fracture map_cluster
NOTE: Crop completed above.
*/
new_map.push(Cluster {
domain: Domain {
start: new_cluster.domain.end,
end: domain_end,
},
stage: map_cluster.stage.to_owned()
});
}
} else {
/*
No overlap.
ACTION: Transfer
*/
new_map.push(map_cluster);
}
}
self.map = new_map;
self
}
/// Get current recovery stage.
pub fn get_stage(&self) -> Stage {
let mut recover_stage = Stage::Damaged;
for cluster in self.map.iter() {
match cluster.stage {
Stage::Untested => return Stage::Untested,
Stage::ForIsolation(_) => {
if recover_stage == Stage::Damaged
|| cluster.stage < recover_stage {
// Note that recover_stage after first condition is
// only ever Stage::ForIsolation(_), thus PartialEq,
// PartialOrd are useful for comparing the internal value.
recover_stage = cluster.stage
}
},
Stage::Damaged => (),
}
}
recover_stage
}
/// Get clusters of common stage.
pub fn get_clusters(&self, stage: Stage) -> Vec<Cluster> {
self.map.iter()
.filter_map(|mc| {
if mc.stage == stage { Some(mc.to_owned()) } else { None }
})
.collect()
}
/// Defragments cluster groups.
/// I.E. check forwards every cluster from current until stage changes,
/// then group at once.
fn defrag(&mut self) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![];
// Fetch first cluster.
let mut start_cluster = self.map.iter()
.find(|c| c.domain.start == 0)
.unwrap();
// Even though this would be initialized by its first read,
// the compiler won't stop whining, and idk how to assert that to it.
let mut end_cluster = Cluster::default();
let mut new_cluster: Cluster;
let mut stage_common: bool;
let mut is_finished = false;
while !is_finished {
stage_common = true;
// Start a new cluster based on the cluster following
// the end of last new_cluster.
new_cluster = start_cluster.to_owned();
// While stage is common, and not finished,
// find each trailing cluster.
while stage_common && !is_finished {
end_cluster = start_cluster.to_owned();
if end_cluster.domain.end != self.domain.end {
start_cluster = self.map.iter()
.find(|c| end_cluster.domain.end == c.domain.start)
.unwrap();
stage_common = new_cluster.stage == start_cluster.stage
} else {
is_finished = true;
}
}
// Set the new ending, encapsulating any clusters of common stage.
new_cluster.domain.end = end_cluster.domain.end;
new_map.push(new_cluster);
}
self.map = new_map;
self
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test for Cluster::subdivide()
// Test for MapFile::update()
// Test for MapFile::get_stage()
#[test]
fn test_get_stage() {
use std::vec;
let mut mf = MapFile::default();
let mut mf_stage = mf.get_stage();
// If this fails here, there's something SERIOUSLY wrong.
assert!(
mf_stage == Stage::Untested,
"Determined stage to be {:?}, when {:?} was expeccted.",
mf_stage, Stage::Untested
);
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
mf.map = vec![];
for stage in stages {
mf.map.push(*Cluster::default().set_stage(stage));
mf_stage = mf.get_stage();
assert!(
stage == mf_stage,
"Expected stage to be {:?}, determined {:?} instead.",
stage, mf_stage
)
}
}
// Test for MapFile::get_clusters()
#[test]
fn test_get_clusters() {
let mut mf = MapFile::default();
mf.map = vec![
*Cluster::default().set_stage(Stage::Damaged),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
Cluster::default(),
Cluster::default(),
*Cluster::default().set_stage(Stage::ForIsolation(1)),
*Cluster::default().set_stage(Stage::ForIsolation(0)),
*Cluster::default().set_stage(Stage::Damaged),
];
let stages = vec![
Stage::Damaged,
Stage::ForIsolation(1),
Stage::ForIsolation(0),
Stage::Untested,
];
for stage in stages {
let expected = vec![
*Cluster::default().set_stage(stage),
*Cluster::default().set_stage(stage),
];
let recieved = mf.get_clusters(stage);
assert!(
expected == recieved,
"Expected clusters {:?}, got {:?}.",
expected, recieved
)
}
}
// Test for MapFile::defrag()
#[test]
fn test_defrag() {
let mut mf = MapFile {
sector_size: 1,
domain: Domain { start: 0, end: 8 },
map: vec![
Cluster {
domain: Domain { start: 0, end: 1 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 1, end: 2 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 2, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 4 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 4, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
],
};
let expected = vec![
Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 5 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation(1),
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation(0),
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
];
mf.defrag();
let recieved = mf.map;
assert!(
expected == recieved,
"Expected {:?} after defragging, got {:?}.",
expected, recieved
)
}
}

63
src/mapping/cluster.rs Normal file
View File

@@ -0,0 +1,63 @@
use super::{Domain, Stage};
use serde::{Deserialize, Serialize};
/// A map for data stored in memory for processing and saving to disk.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Cluster {
pub domain: Domain,
pub stage: Stage,
}
impl Default for Cluster {
fn default() -> Self {
Cluster {
domain: Domain::default(),
stage: Stage::default(),
}
}
}
impl Cluster {
/// Breaks apart into a vec of clusters,
/// each of cluster_size, excepting last.
pub fn subdivide(&mut self, cluster_len: usize) -> Vec<Cluster> {
let domain_len = self.domain.len();
let mut start = self.domain.start;
let mut clusters: Vec<Cluster> = vec![];
for _ in 0..(domain_len / cluster_len) {
clusters.push(Cluster {
domain: Domain {
start,
end: start + cluster_len,
},
stage: self.stage,
});
start += cluster_len;
}
clusters.push(Cluster {
domain: Domain {
start,
end: self.domain.end,
},
stage: self.stage,
});
clusters
}
pub fn set_stage(&mut self, stage: Stage) -> &mut Self {
self.stage = stage;
self
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test for Cluster::subdivide()
}

55
src/mapping/domain.rs Normal file
View File

@@ -0,0 +1,55 @@
use serde::{Deserialize, Serialize};
/// Domain, in sectors.
/// Requires sector_size to be provided elsewhere for conversion to bytes.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Domain {
pub start: usize,
pub end: usize,
}
impl Default for Domain {
fn default() -> Self {
Domain { start: 0, end: 1 }
}
}
impl Domain {
/// Return length of domain in sectors.
pub fn len(self) -> usize {
self.end - self.start
}
/// Returns the type of overlap between this domain and another.
pub fn overlap(&self, other: Domain) -> DomainOverlap {
if self.end <= other.start || other.end <= self.start {
// Cases 7, 8, 12, and 13 of map::tests::test_update
DomainOverlap::None
} else if other.start >= self.start && other.end <= self.end {
// Cases 3, 5, 9, and 11 of map::tests::test_update
DomainOverlap::SelfEngulfsOther
} else if other.start <= self.start && other.end >= self.end {
// Cases 4, 6, and 10 of map::tests::test_update
DomainOverlap::OtherEngulfsSelf
} else if self.start < other.start {
// Case 1 of map::tests::test_update
DomainOverlap::OtherOverlapsEnd
} else {
// Case 2 of map::tests::test_update
DomainOverlap::OtherOverlapsStart
}
}
}
pub enum DomainOverlap {
None,
SelfEngulfsOther,
OtherEngulfsSelf,
OtherOverlapsStart,
OtherOverlapsEnd,
}
#[cfg(test)]
mod tests {
use super::*;
}

873
src/mapping/map.rs Normal file
View File

@@ -0,0 +1,873 @@
use std::fs::File;
use std::io::Write;
use super::{Cluster, Domain, DomainOverlap, Stage};
use anyhow;
use ron::de::from_reader;
use ron::error::SpannedError;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct MapFile {
pub sector_size: usize,
pub domain: Domain,
pub map: Vec<Cluster>,
}
impl TryFrom<File> for MapFile {
type Error = SpannedError;
fn try_from(file: File) -> Result<Self, Self::Error> {
from_reader(file)
}
}
impl Default for MapFile {
fn default() -> Self {
MapFile {
sector_size: crate::FB_SECTOR_SIZE,
domain: Domain::default(),
map: vec![Cluster {
domain: Domain::default(),
stage: Stage::Untested,
}],
}
}
}
impl MapFile {
pub fn new(sector_size: usize) -> Self {
MapFile::default().set_sector_size(sector_size).to_owned()
}
pub fn set_sector_size(&mut self, sector_size: usize) -> &mut Self {
self.sector_size = sector_size;
self
}
/// Recalculate cluster mappings.
pub fn update(&mut self, new: Cluster) -> &mut Self {
let mut map: Vec<Cluster> = vec![Cluster::from(new.clone())];
for old in self.map.iter() {
let mut old = *old;
match new.domain.overlap(old.domain) {
DomainOverlap::None => map.push(old),
DomainOverlap::SelfEngulfsOther => (),
DomainOverlap::OtherEngulfsSelf => {
other_engulfs_self_update(new, &mut old, &mut map)
}
DomainOverlap::OtherOverlapsEnd => {
// Case 1
old.domain.start = new.domain.end;
map.push(old);
}
DomainOverlap::OtherOverlapsStart => {
// Case 2
old.domain.end = new.domain.start;
map.push(old);
}
};
}
self.map = map;
self
}
/// Get current recovery stage.
pub fn get_stage(&self) -> Stage {
let mut recover_stage = Stage::Damaged;
for cluster in self.map.iter() {
match cluster.stage {
Stage::Untested => return Stage::Untested,
Stage::ForIsolation { .. } => {
if recover_stage == Stage::Damaged || cluster.stage < recover_stage {
// Note that recover_stage after first condition is
// only ever Stage::ForIsolation(_), thus PartialEq,
// PartialOrd are useful for comparing the internal value.
recover_stage = cluster.stage
}
}
Stage::Damaged => (),
Stage::Intact => (),
}
}
recover_stage
}
/// Get clusters of common stage.
pub fn get_clusters(&self, stage: Stage) -> Vec<Cluster> {
self.map
.iter()
.filter_map(|mc| {
if mc.stage == stage {
Some(mc.to_owned())
} else {
None
}
})
.collect()
}
/// Defragments cluster groups.
/// I.E. check forwards every cluster from current until stage changes,
/// then group at once.
pub fn defrag(&mut self) -> &mut Self {
let mut new_map: Vec<Cluster> = vec![];
// Fetch first cluster.
let mut start_cluster = self.map.iter().find(|c| c.domain.start == 0).unwrap();
// Even though this would be initialized by its first read,
// the compiler won't stop whining, and idk how to assert that to it.
let mut end_cluster = Cluster::default();
let mut new_cluster: Cluster;
let mut stage_common: bool;
let mut is_finished = false;
while !is_finished {
stage_common = true;
// Start a new cluster based on the cluster following
// the end of last new_cluster.
new_cluster = start_cluster.to_owned();
// While stage is common, and not finished,
// find each trailing cluster.
while stage_common && !is_finished {
end_cluster = start_cluster.to_owned();
if end_cluster.domain.end != self.domain.end {
start_cluster = self
.map
.iter()
.find(|c| end_cluster.domain.end == c.domain.start)
.unwrap();
stage_common = new_cluster.stage == start_cluster.stage
} else {
is_finished = true;
}
}
// Set the new ending, encapsulating any clusters of common stage.
new_cluster.domain.end = end_cluster.domain.end;
new_map.push(new_cluster);
}
self.map = new_map;
self
}
/// Extend the domain of the MapFile.
/// Returns None if the domain cannot be changed or is unchanged.
/// Returns the delta of the previous domain end and the new end.
pub fn extend(&mut self, end: usize) -> Option<usize> {
if end <= self.domain.end {
return None;
}
let old_end = self.domain.end;
let delta = end - old_end;
self.domain.end = end;
// Add new data as untested.
self.update(Cluster {
domain: Domain {
start: old_end,
end: self.domain.end,
},
..Default::default()
});
Some(delta)
}
/// Writes the map to the provided item implementing `Write` trait.
/// Usually a file.
pub fn write_to<W: Write>(&mut self, file: &mut W) -> anyhow::Result<usize> {
self.defrag();
let written_bytes = file.write(
ron::ser::to_string_pretty(
self,
ron::ser::PrettyConfig::new()
.new_line("\n".to_string())
.struct_names(true),
)?
.as_bytes(),
)?;
Ok(written_bytes)
}
}
// This is split out for a shred of readability.
fn other_engulfs_self_update(new: Cluster, old: &mut Cluster, map: &mut Vec<Cluster>) {
if new.domain.start == old.domain.start {
// Case 6 of map::tests::test_update
old.domain.start = new.domain.end;
} else {
// Case 4 and part of 10
let old_end = old.domain.end;
old.domain.end = new.domain.start;
if new.domain.end != old_end {
// Case 10 of map::tests::test_update
map.push(Cluster {
domain: Domain {
start: new.domain.end,
end: old_end,
},
stage: old.stage,
})
}
}
map.push(old.to_owned())
}
#[cfg(test)]
mod tests {
use super::*;
/// Test for MapFile::update()
#[test]
fn update_1_new_overlaps_start() {
// Case 1:
// |----new----|
// |----old----|
//
// | --> |-old-|
// Solution: old.start = new.end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_2_new_overlaps_end() {
// Case 2:
// |----new----|
// |----old----|
//
// |-old-| <-- |
// Solution: old.end = new.start
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_3_new_engulfs_common_end() {
// Case 3:
// |----new----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_4_old_engulfs_common_end() {
// Case 4:
// |--new--|
// |-----old-----|
//
// |-old-| <---- |
// Solution: old.end = new.start
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_5_new_engulfs_common_start() {
// Case 5:
// |-----new----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_6_old_engulfs_common_start() {
// Case 6:
// |--new--|
// |-----old-----|
//
// | ----> |-old-|
// Solution: old.start = new.end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_7_new_precedes() {
// Case 7:
// |--new--|
// |--old--|
//
// Solution: Leave unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_8_new_trails() {
// Case 8:
// |--new--|
// |--old--|
// Solution: Leave unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_9_new_engulfs() {
// Case 9:
// |-----new-----|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_10_old_engulfs() {
// Case 10:
// |--new--|
// |--------------old--------------|
//
// |----old----| <---- |
// + |--fracture-|
// Solution: old.end = new.start
// && fracture:
// with fracture.start = new.end
// && fracture.end = old.original_end
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 1, end: 2 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_11_common_start_and_end() {
// Case 11:
// |--new--|
// |--old--|
//
// Solution: Remove old.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Intact,
});
map.map.sort();
assert_eq!(
map.map,
vec![Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Intact
}]
);
}
/// Test for MapFile::update()
#[test]
fn update_12_new_out_of_range_preceding() {
// Case 12:
// |--new--|
// |--old--|
//
// Solution: Leave Unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::update()
#[test]
fn update_13_new_out_of_range_trailing() {
// Case 13:
// |--new--|
// |--old--|
//
// Solution: Leave Unchanged.
let mut map = MapFile {
map: vec![Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
}],
..Default::default()
};
map.update(Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
});
map.map.sort();
assert_eq!(
map.map,
vec![
Cluster {
domain: Domain { start: 0, end: 1 },
..Default::default()
},
Cluster {
domain: Domain { start: 2, end: 3 },
..Default::default()
}
]
);
}
/// Test for MapFile::get_stage()
#[test]
fn get_stage() {
let mut mf = MapFile::default();
let mut mf_stage = mf.get_stage();
// If this fails here, there's something SERIOUSLY wrong.
assert!(
mf_stage == Stage::Untested,
"Determined stage to be {:?}, when {:?} was expeccted.",
mf_stage,
Stage::Untested
);
let stages = vec![
Stage::Damaged,
Stage::ForIsolation { level: 1 },
Stage::ForIsolation { level: 0 },
Stage::Untested,
];
mf.map = vec![];
for stage in stages {
mf.map.push(*Cluster::default().set_stage(stage));
mf_stage = mf.get_stage();
assert!(
stage == mf_stage,
"Expected stage to be {:?}, determined {:?} instead.",
stage,
mf_stage
)
}
}
/// Test for MapFile::get_clusters()
#[test]
fn get_clusters() {
let mut mf = MapFile::default();
mf.map = vec![
*Cluster::default().set_stage(Stage::Damaged),
*Cluster::default().set_stage(Stage::ForIsolation { level: 0 }),
*Cluster::default().set_stage(Stage::ForIsolation { level: 1 }),
Cluster::default(),
Cluster::default(),
*Cluster::default().set_stage(Stage::ForIsolation { level: 1 }),
*Cluster::default().set_stage(Stage::ForIsolation { level: 0 }),
*Cluster::default().set_stage(Stage::Damaged),
];
let stages = vec![
Stage::Damaged,
Stage::ForIsolation { level: 1 },
Stage::ForIsolation { level: 0 },
Stage::Untested,
];
for stage in stages {
let expected = vec![
*Cluster::default().set_stage(stage),
*Cluster::default().set_stage(stage),
];
let received = mf.get_clusters(stage);
assert!(
expected == received,
"Expected clusters {:?}, got {:?}.",
expected,
received
)
}
}
/// Test for MapFile::defrag()
#[test]
fn defrag() {
let mut mf = MapFile {
sector_size: 1,
domain: Domain { start: 0, end: 8 },
map: vec![
Cluster {
domain: Domain { start: 0, end: 1 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 1, end: 2 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 2, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 4 },
stage: Stage::ForIsolation { level: 0 },
},
Cluster {
domain: Domain { start: 4, end: 5 },
stage: Stage::ForIsolation { level: 0 },
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation { level: 1 },
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation { level: 0 },
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
],
};
let expected = vec![
Cluster {
domain: Domain { start: 0, end: 3 },
stage: Stage::Untested,
},
Cluster {
domain: Domain { start: 3, end: 5 },
stage: Stage::ForIsolation { level: 0 },
},
Cluster {
domain: Domain { start: 5, end: 6 },
stage: Stage::ForIsolation { level: 1 },
},
Cluster {
domain: Domain { start: 6, end: 7 },
stage: Stage::ForIsolation { level: 0 },
},
Cluster {
domain: Domain { start: 7, end: 8 },
stage: Stage::Damaged,
},
];
mf.defrag();
let received = mf.map;
assert!(
expected == received,
"Expected {:?} after defragging, got {:?}.",
expected,
received
)
}
}

12
src/mapping/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
#![allow(unused_imports)]
pub mod cluster;
pub mod domain;
pub mod map;
pub mod prelude;
pub mod stage;
pub use cluster::Cluster;
pub use domain::{Domain, DomainOverlap};
pub use map::MapFile;
pub use stage::Stage;

6
src/mapping/prelude.rs Normal file
View File

@@ -0,0 +1,6 @@
#![allow(unused_imports)]
pub use super::cluster::Cluster;
pub use super::domain::{Domain, DomainOverlap};
pub use super::map::MapFile;
pub use super::stage::Stage;

20
src/mapping/stage.rs Normal file
View File

@@ -0,0 +1,20 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub enum Stage {
Untested,
ForIsolation { level: u8 },
Damaged,
Intact,
}
impl Default for Stage {
fn default() -> Self {
Stage::Untested
}
}
#[cfg(test)]
mod tests {
use super::*;
}

40
src/path.rs Normal file
View File

@@ -0,0 +1,40 @@
use std::path::{Path, PathBuf};
use std::sync::LazyLock;
use crate::cli::CONFIG;
use anyhow::{self, Context};
/// Generates a file path if one not provided.
/// root_path for fallback name.
pub fn get_path<P>(path: &Option<P>, root_path: &P, extension: &str) -> anyhow::Result<PathBuf>
where
P: AsRef<Path>,
{
if let Some(f) = path {
return Ok(f.as_ref().to_path_buf());
}
Ok(PathBuf::from(format!(
"{}.{}",
root_path
.as_ref()
.to_str()
.context("source_name path was not UTF-8 valid.")?,
extension
))
.as_path()
.to_owned())
}
pub static MAP_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
get_path(&CONFIG.map, &CONFIG.input, "map")
.context("Failed to generate map path.")
.unwrap()
});
pub static OUTPUT_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
get_path(&CONFIG.output, &CONFIG.input, "iso")
.context("Failed to generate output path.")
.unwrap()
});

View File

@@ -1,105 +1,167 @@
use std::{
io::{BufReader, BufWriter},
fs::File,
};
use std::fs::File;
use std::io::{BufWriter, Read, Seek, SeekFrom, Write};
use std::usize;
use crate::{
Args,
mapping::{Cluster, MapFile, Stage},
};
use anyhow::Context;
use crate::cli::CONFIG;
use crate::mapping::prelude::*;
#[derive(Debug)]
#[allow(dead_code)]
pub struct Recover {
buf_capacity: usize,
config: Args,
input: BufReader<File>,
input: File,
output: BufWriter<File>,
map: MapFile,
stage: Stage,
}
impl Recover {
pub fn new(
config: Args,
input: File,
output: File,
map: MapFile,
) -> Self {
let stage = map.get_stage();
pub fn new() -> anyhow::Result<Self> {
let input: File = crate::io::load_input()?;
// Temporarily make buffer length one sector.
let buf_capacity = config.sector_size as usize;
let mut r = Recover {
buf_capacity,
config,
input: BufReader::with_capacity(
buf_capacity,
input,
),
output: BufWriter::with_capacity(
buf_capacity,
output,
),
map,
stage: stage,
let output: File = crate::io::load_output()?;
let map: MapFile = {
crate::io::load_map_read()?
.try_into()
.unwrap_or(MapFile::new(CONFIG.sector_size))
};
// Ensure that buffer capacity is adjusted based on progress.
r.set_buf_capacity();
r
let mut r = Recover {
input,
output: BufWriter::with_capacity(map.domain.end as usize, output),
map,
};
r.restore()?;
Ok(r)
}
/// Recover media.
pub fn run(&mut self) -> &mut Self {
pub fn run(&mut self) -> anyhow::Result<usize> {
// From start, read to end or error.
//
// If all data recovered, return early.
// Else, read from end to error.
let mut is_finished = false;
while !is_finished {
self.map.defrag();
match self.map.get_stage() {
Stage::Untested => { self.copy_untested(); },
Stage::ForIsolation(level) => { self.copy_isolate(level); },
Stage::Damaged => {
Stage::Untested => self.copy_untested()?,
Stage::ForIsolation { .. } => todo!(),
Stage::Damaged | Stage::Intact => {
println!("Cannot recover further.");
is_finished = true
},
}
};
// Need to reset seek position between algorithms.
self.input
.rewind()
.context("Failed to reset input seek position.")?;
self.output
.rewind()
.context("Failed to reset output seek position")?;
}
self
// Temporary.
let recovered_bytes = usize::MIN;
Ok(recovered_bytes)
}
/// Restore current progress based on MapFile.
/// Also updates MapFile if needed, such as to extend the MapFile domain.
pub fn restore(&mut self) -> anyhow::Result<()> {
self.map.extend(
crate::io::get_stream_length(&mut self.input)
.context("Failed to get input stream length.")? as usize,
);
Ok(())
}
/// Attempt to copy all untested blocks.
fn copy_untested(&mut self) -> &mut Self {
fn copy_untested(&mut self) -> anyhow::Result<()> {
for untested in self.map.get_clusters(Stage::Untested) {
// Caching.
let mut read_position: usize;
let mut cluster: Cluster;
let mut buf_capacity = self.get_buf_capacity() as usize;
let mut buf = vec![crate::FB_NULL_VALUE; buf_capacity];
let mut untested: Vec<Cluster> = vec![];
read_position = untested.domain.start;
for cluster in self.map.get_clusters(Stage::Untested).iter_mut() {
untested.append(&mut cluster.subdivide(self.map.sector_size as usize));
while read_position < untested.domain.end {
dbg!(read_position);
cluster = Cluster {
domain: Domain {
start: read_position,
end: read_position + buf_capacity,
},
stage: Stage::Intact,
};
buf_capacity = buf_capacity.min(untested.domain.end - read_position);
if let Err(err) = self.input.read_exact(&mut buf) {
// If buf were zeroed out before every read, one could theoretically recover
// part of that read given the assumption that all null values from the end to
// the first non-null value are unread, and some further padding from the last
// values are potentially invalid.
//
// That padding should have a cli arg to control it.
println!("Hit error: {:?}", err);
if CONFIG.reopen_on_error {
self.reload_input()
.context("Failed to reload input file after previous error.")?;
}
todo!("Read and save data.");
self.input
.seek_relative((read_position + buf_capacity) as i64)
.context("Failed to seek input by buf_capacity to skip previous error")?;
self
// I don't remember what level was for.
cluster.stage = Stage::ForIsolation { level: 1 };
}
/// Attempt to copy blocks via isolation at pass level.
fn copy_isolate(&mut self, level: u8) -> &mut Self {
todo!();
self
if cluster.stage == Stage::Intact {
self.output
.write_all(&buf[0..buf_capacity])
.context("Failed to write data to output file")?;
}
/// Set buffer capacities as cluster length in bytes.
self.map.update(cluster);
read_position += buf_capacity;
}
}
self.map.write_to(&mut crate::io::load_map_write()?)?;
Ok(())
}
/// Set buffer capacity as cluster length in bytes.
/// Varies depending on the recovery stage.
fn set_buf_capacity(&mut self) -> &mut Self {
self.buf_capacity = (self.config.sector_size * self.config.cluster_length) as usize;
self
}
fn get_buf_capacity(&mut self) -> u64 {
CONFIG.sector_size as u64 * CONFIG.cluster_length as u64
}
/// Reloads the input and restores the seek position.
fn reload_input(&mut self) -> anyhow::Result<()> {
let seek_pos = self.input.stream_position()?;
self.input = crate::io::load_input()?;
self.input.seek(SeekFrom::Start(seek_pos))?;
Ok(())
}
}
#[cfg(test)]
#[allow(unused)]