Newer
Older
/*
* Copyright (c) 2021, Luca Fulchir <luker@fenrirproject.org>
*
* This file is part of dfim.
*
* dfim is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* dfim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with dfim. If not, see <https://www.gnu.org/licenses/>.
*/
mod flags;
mod part_type;
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, Debug, PartialEq)]
#[serde(deny_unknown_fields)]
pub enum PartId {
#[serde_as(as = "serde_with::hex::Hex")]
GPTFdisk(u16),
Def(part_type::PartType),
UUID(::uuid::Uuid),
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, PartialEq)]
// This is an enum to force ron to use Alignemnt(u64) so the user will never
// confuse the fields
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, PartialEq)]
#[serde(deny_unknown_fields)]
pub enum Alignment {
Alignment(Index),
}
impl Alignment {
fn warn_nonstandard(&self, logger: &::slog::Logger) {
let Alignment::Alignment(al) = self;
match al {
Index::LBA(l) => {
if l % 2048 != 0 {
::slog::warn!(
logger,
"LBA Alignment \"{}\" not a multiple of 2048 sectors \
(common GPT alignment)",
l
);
}
Index::Bytes(b) => {
if b % 512 != 0 {
::slog::warn!(
logger,
"Byte Alignment \"{}\" might be changed: not a \
multiple of 512/4K (common sector sizes). Common \
alignment: 2048 sectors",
b
);
/// raw function to align any stuff
pub fn align_raw(to_align: u64, align_to: u64) -> u64 {
match align_to {
0 => to_align,
_ => ((to_align + align_to - 1) / align_to) * align_to,
}
}
/// take an index and align it to the blocksize(AKA:LBA) return the raw u64
fn align_idx_to_block(unaligned: Index, blocksize: u32) -> u64 {
let blocksize = blocksize as u64;
match unaligned {
Index::LBA(l) => l,
Index::Bytes(0) => 0,
Index::Bytes(b) => align_raw(b, blocksize),
}
}
/// take an index(LBA||Bytes) and align it to the given alignment.
/// alignment goes to ne next aligned value.
/// keep everything bounded by min and max lba
fn align_next_or_prev(
unaligned: Index,
al: Alignment,
blocksize: u32,
min_lba: u64,
max_lba: u64,
) -> (Index, Alignment) {
// translate into LBA
let to_align_lba = align_idx_to_block(unaligned, blocksize);
let align_to_lba = match al {
Alignment::Alignment(Index::LBA(0))
| Alignment::Alignment(Index::Bytes(0)) => 0,
Alignment::Alignment(idx) => align_idx_to_block(idx, blocksize),
};
let idx = align_raw(to_align_lba, align_to_lba);
let idx = ::std::cmp::max(idx, min_lba);
let idx = ::std::cmp::min(idx, max_lba);
(
Index::LBA(idx),
Alignment::Alignment(Index::LBA(align_to_lba)),
)
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, PartialEq)]
#[serde(deny_unknown_fields)]
pub enum PartFrom {
/// specify LBA number directly
/// space to leave free before the partition
SkipFree(Index, Alignment),
/// percent of free space to leave free before the partition
impl PartFrom {
pub fn lba_align(&mut self, blocksize: u32, min_lba: u64, max_lba: u64) {
match self {
PartFrom::Index(idx, al) => {
let realigned = align_next_or_prev(
idx.clone(),
al.clone(),
blocksize,
min_lba,
max_lba,
);
*self = PartFrom::Index(realigned.0, realigned.1);
}
PartFrom::SkipFree(idx, Alignment::Alignment(al)) => {
*self = PartFrom::SkipFree(
Index::LBA(align_idx_to_block(idx.clone(), blocksize)),
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
PartFrom::SkipFreePercent(perc, Alignment::Alignment(al)) => {
*self = PartFrom::SkipFreePercent(
*perc,
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
}
}
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone, PartialEq)]
#[serde(deny_unknown_fields)]
pub enum PartTo {
/// the size of the partition, to be converted to the actual LBA
Size(Index, Alignment),
impl PartTo {
pub fn lba_align(&mut self, blocksize: u32, min_lba: u64, max_lba: u64) {
match self {
PartTo::Index(idx, al) => {
let realigned = align_next_or_prev(
idx.clone(),
al.clone(),
blocksize,
min_lba,
max_lba,
);
*self = PartTo::Index(realigned.0, realigned.1);
}
PartTo::Size(idx, Alignment::Alignment(al)) => {
*self = PartTo::Size(
Index::LBA(align_idx_to_block(idx.clone(), blocksize)),
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
PartTo::Leave(idx, Alignment::Alignment(al)) => {
*self = PartTo::Leave(
Index::LBA(align_idx_to_block(idx.clone(), blocksize)),
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
PartTo::Percent(perc, Alignment::Alignment(al)) => {
*self = PartTo::Percent(
*perc,
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
PartTo::LeavePercent(perc, Alignment::Alignment(al)) => {
*self = PartTo::LeavePercent(
*perc,
Alignment::Alignment(Index::LBA(align_idx_to_block(
al.clone(),
blocksize,
))),
);
}
}
}
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone)]
#[serde(deny_unknown_fields)]
pub enum PartUUID {
Random,
UUID(::uuid::Uuid),
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone)]
#[serde(deny_unknown_fields)]
pub struct Partition {
pub flags: Vec<flags::Flag>,
pub from: PartFrom,
pub to: PartTo,
impl Partition {
pub fn flag_bits(&self) -> u64 {
let mut ret: u64 = 0;
for f in &self.flags {
ret |= f.bits();
}
ret
}
}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
impl PartialEq<Partition> for Partition {
fn eq(&self, other: &Self) -> bool {
if self.label != other.label {
return false;
}
if self.number != other.number {
return false;
}
if self.part_type != other.part_type {
return false;
}
match self.part_uuid {
// part_uuid can be random, only check if both
// are UUIDs
PartUUID::UUID(self_uuid) => match other.part_uuid {
PartUUID::UUID(other_uuid) => {
if self_uuid != other_uuid {
return false;
}
}
_ => {}
},
_ => {}
}
if self.flag_bits() != other.flag_bits() {
return false;
}
if self.from != other.from || self.to != other.to {
return false;
}
// do not check the targets, we only care about the
// actual GPT here
true
}
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone)]
#[serde(deny_unknown_fields)]
pub enum GptUuid {
Random,
UUID(::uuid::Uuid),
}
const fn def_max_partitions() -> u32 {
128
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone)]
#[serde(deny_unknown_fields)]
pub struct GPT {
#[serde(default = "def_max_partitions")]
pub max_partitions: u32,
fn id(&self) -> &str {
self.id.as_str()
}
fn targets(&self) -> Vec<String> {
let mut ret = Vec::with_capacity(self.partitions.len());
for p in &self.partitions {
if let trgt::TargetId::Id(t) = &p.target {
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
impl PartialEq<GPT> for GPT {
fn eq(&self, other: &Self) -> bool {
// do not compare self.id,
// since we compare what we have in the conf with
// what we find on the disk, where we generate out id
// GPT guid can be random,
// so only compare if both are UUIDs
match self.guid {
GptUuid::UUID(self_uuid) => match other.guid {
GptUuid::UUID(other_uuid) => {
if self_uuid != other_uuid {
return false;
}
}
_ => {}
},
_ => {}
}
if self.max_partitions != other.max_partitions
|| self.partitions.len() != other.partitions.len()
{
return false;
}
for (p_idx, p) in self.partitions.iter().enumerate() {
let other_p = match other.partitions.get(p_idx) {
Some(other_p) => other_p,
None => return false,
};
if p != other_p {
return false;
}
}
return true;
}
}
#[derive(::serde::Deserialize, ::serde::Serialize, Clone)]
#[serde(deny_unknown_fields)]
// somehow marking this "untagged" breaks the parsing of Gpt :/
//#[serde(untagged)]
pub enum GptId {
impl crate::config::CfgVerify for GPT {
fn standardize(&mut self) {
for p in &mut self.partitions {
p.part_type = match &mut p.part_type {
uuid @ PartId::UUID(_) => uuid.clone(),
PartId::Def(name) => PartId::UUID((*name).into()),
PartId::GPTFdisk(gptfdisk) => {
//let name = gptfdisk as PartType;
//PartId::UUID(name.uuid())
let maybe_part: Result<PartType, _> =
(*gptfdisk).try_into();
match maybe_part {
Ok(part) => PartId::UUID(part.into()),
Err(_) => PartId::GPTFdisk(*gptfdisk),
}
}
}
}
}
fn check_consistency(
&self,
logger: &slog::Logger,
) -> Result<(), errors::ConfigError> {
// FIXME: points of failure:
// * partitions with same self.number (done)
// * overlapping partitions
// * warn if the partitions are not aligned to 2K sector
for (p1_idx, p1) in self.partitions.iter().enumerate() {
if p1.number == 0 {
::slog::error!(
logger,
"GPT: partition number must be strictly positive. target: \
\"{}\" partition: {}",
self.id,
p1.number
);
return Err(errors::ConfigError::Consistency(
"Partition numbers must be strictly positive".to_owned(),
));
}
match p1.part_type {
PartId::UUID(_) => {}
_ => {
::slog::error!(
logger,
"GPT: partition type not recognized: \"{:?}\"",
p1.part_type
);
return Err(errors::ConfigError::Consistency(
"Partition type not recognized".to_owned(),
));
}
}
for (p2_idx, p2) in self.partitions.iter().enumerate() {
if p1_idx == p2_idx {
continue;
}
"GPT: multiple partitions with the same number ({}) \
in GPT: \"{}\"",
p1.number,
self.id
);
return Err(errors::ConfigError::Consistency(
"multiple partitions with the same number".to_owned(),
// warn used on non standard alignment
match &p1.from {
PartFrom::Index(_, al) => {
al.warn_nonstandard(logger);
}
PartFrom::SkipFree(_, al) => {
al.warn_nonstandard(logger);
}
PartFrom::SkipFreePercent(_, al) => {
al.warn_nonstandard(logger);
}
}
match &p1.to {
PartTo::Index(_, al) => {
al.warn_nonstandard(logger);
}
PartTo::Size(_, al) => {
al.warn_nonstandard(logger);
}
PartTo::Percent(_, al) => {
al.warn_nonstandard(logger);
}
PartTo::Leave(_, al) => {
al.warn_nonstandard(logger);
}
PartTo::LeavePercent(_, al) => {
al.warn_nonstandard(logger);
}
}