tartan_arch/x86_common/
protection.rs

1//! Support for protected mode operation.
2//!
3//! This includes the minimal support for segmented memory and hardware task management
4//! that is required to operate in protected mode with a flat memory model.
5
6use core::arch::asm;
7use core::fmt;
8use core::num::NonZeroU16;
9use static_assertions::const_assert_eq;
10use tartan_bitfield::{
11    bitfield, bitfield_accessors, bitfield_without_debug, get_bit, set_bit, Bitfield,
12};
13use tartan_c_enum::c_enum;
14
15#[cfg(doc)]
16use super::FlagRegister;
17#[cfg(doc)]
18use crate::x86_64::protection::TaskStateSegmentHeader;
19#[cfg(doc)]
20use crate::x86_64::ExtendedFeatureEnableRegister;
21
22
23/// `GDTR`: Points to the memory range of the global descriptor table (GDT).
24#[repr(C, packed)]
25#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
26pub struct GlobalDescriptorTableRegister {
27    /// The inclusive maximum address offset (i.e., size - 1) of the descriptor table.
28    pub limit: u16,
29    /// Base address of the descriptor table
30    pub address: usize,
31}
32
33impl GlobalDescriptorTableRegister {
34    /// Retrieve the current value of this register
35    pub fn get() -> Self {
36        let mut value = Self::default();
37        unsafe {
38            asm!(
39                "sgdt [{0}]",
40                in(reg) &raw mut value,
41            );
42        }
43        value
44    }
45
46    /// Update the register to the given value.
47    ///
48    /// # Safety
49    /// This register fundamentally affects memory accesses and can have an impact on
50    /// memory safety.
51    pub unsafe fn set(value: &Self) {
52        asm!(
53            "lgdt [{0}]",
54            in(reg) value,
55        );
56    }
57
58    /// Update the global descriptor table pointer and all segment registers.
59    ///
60    /// # Safety
61    /// This register fundamentally affects memory accesses and can have an impact on
62    /// memory safety.
63    #[allow(named_asm_labels)]
64    pub unsafe fn set_with_segments(
65        gdt_pointer: &Self,
66        code_selector: Selector,
67        data_selector: Selector,
68    ) {
69        // TODO: Disable interrupts?
70
71        #[cfg(target_arch = "x86")]
72        asm!(
73            "
74            // Set the global descriptor table register
75            lgdt [{gdt_pointer}]
76
77            // Update the data segment selectors
78            mov ds, {data_selector:e}
79            mov es, {data_selector:e}
80            mov fs, {data_selector:e}
81            mov gs, {data_selector:e}
82            mov ss, {data_selector:e}
83
84            // Update the code segment selector. Directly loading the CS register doesn't
85            // work, so we use a far return. The 'return' is really a jump to the next
86            // instruction using a far pointer we push onto the stack, consisting of:
87            //   * [rsp]     = 32-bit offset
88            //   * [rsp + 4] = 32-bit segment selector (zero-extended from 16 bits)
89            push {code_selector:e}
90            mov eax, offset .dummy_target
91            push eax
92            retf
93        .dummy_target:
94            ",
95            gdt_pointer = in(reg) gdt_pointer,
96            data_selector = in(reg) u16::from(data_selector),
97            code_selector = in(reg) u16::from(code_selector),
98            out("eax") _,
99        );
100
101        #[cfg(target_arch = "x86_64")]
102        asm!(
103            "
104            // Set the global descriptor table register
105            lgdt [{gdt_pointer}]
106
107            // Update the data segment selectors
108            mov ds, {data_selector:r}
109            mov es, {data_selector:r}
110            mov fs, {data_selector:r}
111            mov gs, {data_selector:r}
112            mov ss, {data_selector:r}
113
114            // Update the code segment selector. Directly loading the CS register doesn't
115            // work, so we use a far return. The 'return' is really a jump to the next
116            // instruction using a far pointer we push onto the stack, consisting of:
117            //   * [rsp]     = 64-bit offset
118            //   * [rsp + 8] = 64-bit segment selector (zero-extended from 16 bits)
119            push {code_selector:r}
120            lea rax, [rip + .dummy_target]
121            push rax
122            rex64 retf
123        .dummy_target:
124            ",
125            gdt_pointer = in(reg) gdt_pointer,
126            data_selector = in(reg) u16::from(data_selector),
127            code_selector = in(reg) u16::from(code_selector),
128            out("rax") _,
129        );
130    }
131}
132
133
134/// `LDTR`: Contains a [`Selector`] referencing a [`SegmentDescriptor`] that points to the
135/// the local descriptor table (LDT).
136///
137/// Note that this type cannot be instantiated. It simply serves as a namespace for the
138/// [`get`](Self::get) and [`set`](Self::set) methods, which work on a [`Selector`]
139/// instance.
140#[allow(clippy::empty_enum)]
141pub enum LocalDescriptorTableRegister {}
142
143impl LocalDescriptorTableRegister {
144    /// Retrieve the current value of this register
145    pub fn get() -> Selector {
146        let mut value = Selector(0);
147        unsafe {
148            asm!(
149                "sldt {0:x}",
150                out(reg) value.0,
151            );
152        }
153        value
154    }
155
156    /// Update the register with the value in this struct.
157    ///
158    /// The value must point to an entry in the global descriptor table
159    /// ([`local`](Selector::local) == `false`) with the
160    /// [`SystemDescriptorType::LocalDescriptorTable`] type.
161    ///
162    /// # Safety
163    /// This register fundamentally affects memory accesses and can have an impact on
164    /// memory safety.
165    pub unsafe fn set(selector: Selector) {
166        asm!(
167            "lldt {0:x}",
168            in(reg) selector.0,
169        );
170    }
171
172    /// Get a pointer to the [`SegmentDescriptor`] for the currently-loaded local
173    /// descriptor table, using the value of this register.
174    ///
175    /// # Safety
176    /// The LDTR must not loaded with a selector for a valid LDR segment descriptor. The
177    /// returned pointer is only valid until the LDTR is modified.
178    pub unsafe fn current_descriptor() -> *const SegmentDescriptor {
179        let address = LocalDescriptorTableRegister::get().descriptor_address();
180        address as *const SegmentDescriptor
181    }
182}
183
184
185/// `TR`: Contains a [`Selector`] referencing a [`SegmentDescriptor`] that points to the
186/// current task state segment (TSS).
187///
188/// Note that this type cannot be instantiated. It simply serves as a namespace for the
189/// [`get`](Self::get) and [`set`](Self::set) methods, which work on a [`Selector`]
190/// instance.
191#[allow(clippy::empty_enum)]
192pub enum TaskRegister {}
193
194impl TaskRegister {
195    /// Retrieve the current value of this register
196    pub fn get() -> Selector {
197        let mut value = Selector(0);
198        unsafe {
199            asm!(
200                "str {0:x}",
201                out(reg) value.0,
202            );
203        }
204        value
205    }
206
207    /// Update the register with the provided selector value.
208    ///
209    /// The value must point to an entry in the global descriptor table
210    /// ([`local`](Selector::local) == `false`) with one of the
211    /// `SystemDescriptorType::TaskState*` types.
212    ///
213    /// # Safety
214    /// This register fundamentally affects memory accesses and can have an impact on
215    /// memory safety.
216    pub unsafe fn set(selector: Selector) {
217        asm!(
218            "ltr {0:x}",
219            in(reg) selector.0,
220        );
221    }
222}
223
224
225/// Standard segment registers (`CS`, `DS`, `SS`, etc.), which contain [`Selector`]s.
226pub enum SegmentRegister {
227    /// `CS` register, which controls instruction loading
228    Code,
229    /// `DS` register, which controls the default segment for load/store instructions
230    Data,
231    /// `SS` segment register, which controls the location of the stack pointer and stack
232    /// push and pop instructions.
233    Stack,
234    /// `ES` segment register, which can be used as an additional data segment.
235    Extra,
236    /// `FS` segment register, which can be used as an additional data segment.
237    ExtraF,
238    /// `GS` segment register, which can be used as an additional data segment.
239    ExtraG,
240}
241
242impl SegmentRegister {
243    /// Retrieve the current value of this register
244    pub fn get(self) -> Selector {
245        let mut value = Selector(0);
246        unsafe {
247            match self {
248                Self::Code => asm!("mov {0:x}, cs", out(reg) value.0),
249                Self::Data => asm!("mov {0:x}, ds", out(reg) value.0),
250                Self::Stack => asm!("mov {0:x}, ss", out(reg) value.0),
251                Self::Extra => asm!("mov {0:x}, es", out(reg) value.0),
252                Self::ExtraF => asm!("mov {0:x}, fs", out(reg) value.0),
253                Self::ExtraG => asm!("mov {0:x}, gs", out(reg) value.0),
254            }
255        }
256        value
257    }
258
259    /// Update the register with the provided selector value.
260    ///
261    /// # Safety
262    /// This register fundamentally affects memory accesses and can have an impact on
263    /// memory safety.
264    pub unsafe fn set(self, selector: Selector) {
265        match self {
266            Self::Code => asm!("mov cs, {0:x}", in(reg) selector.0),
267            Self::Data => asm!("mov ds, {0:x}", in(reg) selector.0),
268            Self::Stack => asm!("mov ss, {0:x}", in(reg) selector.0),
269            Self::Extra => asm!("mov es, {0:x}", in(reg) selector.0),
270            Self::ExtraF => asm!("mov fs, {0:x}", in(reg) selector.0),
271            Self::ExtraG => asm!("mov gs, {0:x}", in(reg) selector.0),
272        }
273    }
274}
275
276
277bitfield! {
278    /// A reference to an entry in a segment descriptor table.
279    ///
280    /// Used as the value of the segment registers `CS`, `DS`, etc. as well as the
281    /// [`LocalDescriptorTableRegister`] and [`TaskRegister`].
282    pub struct Selector(u16) {
283        /// `RPL`: The privilege level "requested" when accessing the referenced segment.
284        ///
285        /// This may be different than the current privilege level (CPL), in which case
286        /// the access must allowed for *both* the CPL and RPL. This is intended to allow
287        /// OS code to limit its privileges when executing on behalf of user code.
288        [0..2] pub privilege_level: u8,
289
290        /// Indicates that this selector references a descriptor in the local descriptor
291        /// table (LDT). Otherwise, it references the global descriptor table (GDT).
292        [2] pub local,
293    }
294}
295
296impl Selector {
297    const OFFSET_MASK: u16 = 0xfff8;
298
299    /// Create a new selector with the given field values
300    pub const fn new(offset: u16, privilege_level: u8, local: bool) -> Self {
301        // TODO: Rework bitfield crate to allow creating in const contexts
302        let value = (offset & Self::OFFSET_MASK)
303            | ((privilege_level & 0b11) as u16)
304            | ((local as u16) << 2);
305        Self(value)
306    }
307
308    /// Create a null selector
309    pub const fn null() -> Self {
310        Self(0)
311    }
312
313    /// The offset of the referenced segment entry in the descriptor table.
314    pub fn offset(self) -> u16 {
315        self.0 & Self::OFFSET_MASK
316    }
317
318    /// Update the offset of the referenced entry in the descriptor table.
319    ///
320    /// # Panics
321    /// Panics if the new offset is not aligned on an 8-byte boundary.
322    pub fn set_offset(&mut self, offset: u16) {
323        assert!(
324            offset & !Self::OFFSET_MASK == 0,
325            "Descriptor offset {offset} is not aligned on an 8-byte boundary"
326        );
327        self.0 &= !Self::OFFSET_MASK;
328        self.0 |= offset;
329    }
330
331    /// Calculate the address of the descriptor referenced by this selector
332    pub fn descriptor_address(self) -> usize {
333        let table_address = if self.local() {
334            let local_table =
335                unsafe { &*(LocalDescriptorTableRegister::current_descriptor()) };
336            local_table.address()
337        } else {
338            GlobalDescriptorTableRegister::get().address
339        };
340        table_address + self.offset() as usize
341    }
342}
343
344
345/// Settings common to [`SegmentDescriptor`]s and [`GateDescriptor`]s.
346pub trait DescriptorFlags: Bitfield<u32> {
347    bitfield_accessors! {
348        /// If this is a system descriptor, indicates which type.
349        ///
350        /// Only applies if [`is_application`](Self::is_application) is false.
351        [ 8..12] system_type: u8 as SystemDescriptorType,
352
353        /// The processor sets this bit whenever a segment register points to this
354        /// segment.
355        ///
356        /// The process never resets this itself, but it can be cleared manually.
357        ///
358        /// Only applies if [`is_application`](Self::is_application) is true.
359        [ 8] application_accessed,
360
361        /// If this is a code descriptor, indicates that the segment can be read.
362        /// Otherwise, it is execute-only.
363        ///
364        /// Only applies if [`is_application`](Self::is_application) and
365        /// [`is_code`](Self::is_code) are true.
366        [ 9] code_readable,
367
368        /// If this is a data descriptor, indicates that this segment is writable.
369        ///
370        /// Required for stack segments.
371        ///
372        /// Only applies if [`is_application`](Self::is_application) is true and
373        /// [`code_descriptor`](Self::is_code) is false.
374        [ 9] data_writable,
375
376        /// If this is a code descriptor, indicates that this segment can be executed
377        /// with lower privileges than [`privilege_level`](Self::privilege_level).
378        ///
379        /// Only applies if [`is_application`](Self::is_application) and
380        /// [`is_code`](Self::is_code) are true.
381        [10] code_conforming,
382
383        /// If this is a data descriptor, indicates that the segment expands toward lower
384        /// addresses (stack-like) if its limit is changed.
385        ///
386        /// Only applies if [`is_application`](Self::is_application) is true and
387        /// [`is_code`](Self::is_code) is false.
388        [10] data_expand_down,
389
390        /// Indicates whether this is a code (true) or data (false) descriptor.
391        ///
392        /// Only applies if [`is_application`](Self::is_application) is true.
393        [11] is_code,
394
395        /// `S`: Indicates that this is an application section descriptor if true.
396        /// Otherwise, this is a system descriptor.
397        [12] is_application,
398
399        /// `DPL`: The privilege level associated with the segment.
400        ///
401        /// This has several meanings depending on the segment type:
402        ///   * For stack segments, this is the *exact* privilege level required to use
403        ///     the segment as a stack.
404        ///   * For data segments, this is the minimum privilege (maximum number) required
405        ///     to access the segment.
406        ///   * For call gates, this is the minimum privilege (maximum number) required to
407        ///     use the gate.
408        ///   * For code segments:
409        ///     * If the segment is accessed through a call gate _or_
410        ///       [`code_conforming`](Self::code_conforming) is true, this is the
411        ///       *maximum* privilege level (minimum number) that can execute the code.
412        ///     * Otherwise, it is the *exact* privilege level required to execute the
413        ///       code.
414        [13..15] privilege_level: u8,
415
416        /// `P`: Indicates that the segment is defined.
417        [15] present,
418
419        /// `L`: If this is a code segment, indicates that it should be executed in 64-bit
420        /// mode.
421        ///
422        /// Mutually exclusive with [`mode_32`](Self::mode_32).
423        ///
424        /// Only applies if all of the following are true:
425        ///   * [`is_application`](Self::is_application)
426        ///   * [`is_code`](Self::is_code)
427        ///   * [`ExtendedFeatureEnableRegister::long_mode_active`]
428        #[cfg(any(target_arch = "x86_64", doc))]
429        #[doc(cfg(target_arch = "x86_64"))]
430        [21] code_mode_64,
431
432        /// `D`/`B`: Indicates that the segment uses 32-bit mode. Otherwise, it is 16-bit,
433        /// unless [`code_mode_64`](Self::code_mode_64) is set.
434        ///
435        /// Besides the address/operand sizes for instructions in code segments, this also
436        /// affects the upper bound of stack-like data segments with the
437        /// [`data_expand_down`](Self::data_expand_down) flag set.
438        ///
439        /// Only applies if [`is_application`](Self::is_application) is true.
440        [22] application_mode_32,
441    }
442
443    /// Indicates that this is a [`GateDescriptor`].
444    fn is_gate(&self) -> bool {
445        !self.is_application() && self.system_type().is_gate()
446    }
447}
448
449
450c_enum! {
451    /// Discriminate types of segment descriptors that are not code or data.
452    pub enum SystemDescriptorType(u8) {
453        /// A [`SegmentDescriptor`] for a 16-bit task state segment (TSS) that is not
454        /// currently running or waiting on a call to another task.
455        #[cfg(any(target_arch = "x86", doc))]
456        #[doc(cfg(target_arch = "x86"))]
457        TaskStateAvailable16Bit = 1,
458
459        /// A [`SegmentDescriptor`] for a segment that contains a local descriptor table.
460        LocalDescriptorTable = 2,
461
462        /// A [`SegmentDescriptor`] for a 16-bit task state segment (TSS) that is either
463        /// running or waiting on a call to another task.
464        #[cfg(any(target_arch = "x86", doc))]
465        #[doc(cfg(target_arch = "x86"))]
466        TaskStateBusy16Bit = 3,
467
468        /// A [`GateDescriptor`] for a call to 16-bit code.
469        #[cfg(any(target_arch = "x86", doc))]
470        #[doc(cfg(target_arch = "x86"))]
471        CallGate16Bit = 4,
472
473        /// A [`GateDescriptor`] for task switching.
474        #[cfg(any(target_arch = "x86", doc))]
475        #[doc(cfg(target_arch = "x86"))]
476        TaskGate = 5,
477
478        /// A [`GateDescriptor`] for a 16-bit interrupt handler.
479        #[cfg(any(target_arch = "x86", doc))]
480        #[doc(cfg(target_arch = "x86"))]
481        InterruptGate16Bit = 6,
482
483        /// A [`GateDescriptor`] for a 16-bit trap handler.
484        #[cfg(any(target_arch = "x86", doc))]
485        #[doc(cfg(target_arch = "x86"))]
486        TrapGate16Bit = 7,
487
488        /// A [`SegmentDescriptor`] for a 32/64-bit task state segment (TSS) that is not
489        /// currently running or waiting on a call to another task.
490        TaskStateAvailable = 9,
491
492        /// A [`SegmentDescriptor`] for a 32/64-bit task state segment (TSS) that is
493        /// either running or waiting on a call to another task.
494        TaskStateBusy = 11,
495
496        /// A [`GateDescriptor`] for a call to 32/64-bit code.
497        CallGate = 12,
498
499        /// A [`GateDescriptor`] for a 32/64-bit interrupt handler.
500        InterruptGate = 14,
501
502        /// A [`GateDescriptor`] for a 32/64-bit interrupt handler.
503        ///
504        /// A trap gate works identically to an interrupt gate, except that the processor
505        /// does not automatically clear [`FlagRegister::interrupt_enabled`] when it
506        /// invokes the handler through a trap gate.
507        TrapGate = 15,
508    }
509}
510
511impl SystemDescriptorType {
512    /// Indicates that this is a [`GateDescriptor`].
513    pub fn is_gate(self) -> bool {
514        #[cfg(target_arch = "x86")]
515        {
516            matches!(
517                self,
518                Self::CallGate
519                    | Self::CallGate16Bit
520                    | Self::InterruptGate
521                    | Self::InterruptGate16Bit
522                    | Self::TrapGate
523                    | Self::TrapGate16Bit
524                    | Self::TaskGate
525            )
526        }
527
528        #[cfg(target_arch = "x86_64")]
529        {
530            matches!(self, Self::CallGate | Self::InterruptGate | Self::TrapGate)
531        }
532    }
533}
534
535
536
537/// Generic entry in a global/local/interrupt descriptor table. Can be a
538/// [`SegmentDescriptor`] or [`GateDescriptor`], depending on the type flags.
539#[repr(C)]
540#[derive(Debug, Clone, Copy, PartialEq, Eq)]
541pub struct GenericDescriptor {
542    lower: u32,
543
544    /// Common descriptor settings.
545    pub flags: GenericDescriptorFlags,
546
547    #[cfg(any(target_arch = "x86_64", doc))]
548    #[doc(cfg(target_arch = "x86_64"))]
549    upper: u32,
550
551    // NOTE: In some cases, The processor verifies that this isn't a 32-bit descriptor by
552    // looking for the type field (bits 8..13) in this DWord and making sure it is 0.
553    #[cfg(any(target_arch = "x86_64", doc))]
554    #[doc(cfg(target_arch = "x86_64"))]
555    _reserved: u32,
556}
557
558#[cfg(target_arch = "x86")]
559const_assert_eq!(8, core::mem::size_of::<GenericDescriptor>());
560
561#[cfg(target_arch = "x86_64")]
562const_assert_eq!(16, core::mem::size_of::<GenericDescriptor>());
563
564
565bitfield_without_debug! {
566    /// Settings for [`GenericDescriptor`]s.
567    pub struct GenericDescriptorFlags(u32) {}
568}
569
570impl fmt::Debug for GenericDescriptorFlags {
571    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
572        let mut struct_fmt = f.debug_struct("GenericDescriptorFlags");
573        struct_fmt.field("<value>", &self.0);
574        self.fmt_fields(&mut struct_fmt);
575        <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
576        struct_fmt.finish()
577    }
578}
579
580impl DescriptorFlags for GenericDescriptorFlags {}
581
582/// An entry in a segment descriptor table that defines a new segment. This includes code,
583/// data, task state (TSS), and local descriptor table (LDT) segments.
584///
585/// Notes on the size of this structure:
586///   * In 32-bit mode, this structure is always 8 bytes and contains a 32-bit base
587///     address.
588///   * In 64-bit mode:
589///     * Task state and local descriptor table segment descriptors are expanded to 16
590///       bytes with 64-bit base addresses.
591///     * From the processor's perspective, code and data segments remain 8 bytes and the
592///       address and limit are both ignored. However, this structure is still defined as
593///       16 bytes. This is fine, since the first 8 bytes are compatible and the rest will
594///       be ignored.
595#[repr(C)]
596#[derive(Clone, Copy, PartialEq, Eq)]
597pub struct SegmentDescriptor {
598    lower: u32,
599
600    /// Common segment descriptor settings.
601    pub flags: SegmentDescriptorFlags,
602
603    #[cfg(any(target_arch = "x86_64", doc))]
604    #[doc(cfg(target_arch = "x86_64"))]
605    upper: u32,
606
607    // NOTE: In some cases, The processor verifies that this isn't a 32-bit descriptor by
608    // looking for the type field (bits 8..13) in this DWord and making sure it is 0.
609    #[cfg(any(target_arch = "x86_64", doc))]
610    #[doc(cfg(target_arch = "x86_64"))]
611    _reserved: u32,
612}
613
614#[cfg(target_arch = "x86")]
615const_assert_eq!(8, core::mem::size_of::<SegmentDescriptor>());
616
617#[cfg(target_arch = "x86_64")]
618const_assert_eq!(16, core::mem::size_of::<SegmentDescriptor>());
619
620impl SegmentDescriptor {
621    /// The maximum supported value of the [`limit`](Self::limit) field (20 bits).
622    pub const LIMIT_MAX: u32 = 0x000f_ffff;
623    const LIMIT_MASK_LOWER: u32 = 0x0000_ffff;
624    const LIMIT_MASK_FLAGS: u32 = 0x000f_0000;
625
626    const ADDRESS_MASK_LOWER: u32 = 0xffff_0000;
627    const ADDRESS_MASK_FLAGS_LOWER: u32 = 0x0000_00ff;
628    const ADDRESS_MASK_FLAGS_UPPER: u32 = 0xff00_0000;
629
630    /// Create a zero-initialized descriptor
631    pub const fn new() -> Self {
632        #[cfg(target_arch = "x86")]
633        let value = Self { lower: 0, flags: SegmentDescriptorFlags(0) };
634
635        #[cfg(target_arch = "x86_64")]
636        let value =
637            Self { lower: 0, flags: SegmentDescriptorFlags(0), upper: 0, _reserved: 0 };
638
639        value
640    }
641
642    /// Base virtual address of the segment, to which offsets are added.
643    ///
644    /// In 64-bit mode, this is ignored and assumed to be 0 for code and data segments,
645    /// but it still applies to task state and local descriptor table segments.
646    pub fn address(self) -> usize {
647        let mut address = ((self.lower & Self::ADDRESS_MASK_LOWER) >> 16) as usize;
648        address |= ((Self::ADDRESS_MASK_FLAGS_LOWER & self.flags.value()) << 16) as usize;
649        address |= (Self::ADDRESS_MASK_FLAGS_UPPER & self.flags.value()) as usize;
650        #[cfg(target_arch = "x86_64")]
651        {
652            address |= (self.upper as usize) << 32;
653        }
654        address
655    }
656
657    /// Update the base address.
658    pub fn set_address(&mut self, address: usize) {
659        #![allow(clippy::cast_possible_truncation)]
660
661        self.lower &= !Self::ADDRESS_MASK_LOWER;
662        self.lower |= (address << 16) as u32 & Self::ADDRESS_MASK_LOWER;
663
664        self.flags.0 &= !Self::ADDRESS_MASK_FLAGS_LOWER & !Self::ADDRESS_MASK_FLAGS_UPPER;
665        self.flags.0 |= Self::ADDRESS_MASK_FLAGS_LOWER & (address >> 16) as u32;
666        self.flags.0 |= Self::ADDRESS_MASK_FLAGS_UPPER & address as u32;
667
668        #[cfg(target_arch = "x86_64")]
669        {
670            self.upper = (address >> 32) as u32;
671        }
672    }
673
674    /// The "limit" of the segment, which is a maximum or minimum offset from the base
675    /// address.
676    ///
677    /// If this is a stack-like data segment
678    /// ([`data_expand_down`](DescriptorFlags::data_expand_down)), then this value is the
679    /// *exclusive minimum* offset value. Otherwise, this is the *inclusive maximum*
680    /// offset value (i.e., size - 1).
681    ///
682    /// This value may be in bytes or in 4KB units, depending on
683    /// [`flags.granularity`](SegmentDescriptorFlags::granularity).
684    ///
685    /// In 64-bit mode, this is ignored (all limit checks are disabled) for code and data
686    /// segments, but it still applies to task state and local descriptor table segments.
687    pub fn limit(self) -> u32 {
688        let mut limit = self.lower & Self::LIMIT_MASK_LOWER;
689        limit |= self.flags.value() & Self::LIMIT_MASK_FLAGS;
690        limit
691    }
692
693    /// Update the segment limit.
694    ///
695    /// # Panics
696    /// Panics if the limit is greater than [`LIMIT_MAX`](Self::LIMIT_MAX).
697    pub fn set_limit(&mut self, limit: u32) {
698        assert!(limit <= Self::LIMIT_MAX, "Segment limit too large: {limit:#x}");
699
700        self.lower &= !Self::LIMIT_MASK_LOWER;
701        self.lower |= Self::LIMIT_MASK_LOWER & limit;
702
703        self.flags.0 &= !Self::LIMIT_MASK_FLAGS;
704        self.flags.0 |= Self::LIMIT_MASK_FLAGS & limit;
705    }
706}
707
708impl fmt::Debug for SegmentDescriptor {
709    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
710        f.debug_struct("SegmentDescriptor")
711            .field("address", &self.address())
712            .field("limit", &self.limit())
713            .field("flags", &self.flags)
714            .finish_non_exhaustive()
715    }
716}
717
718impl Default for SegmentDescriptor {
719    fn default() -> Self {
720        Self::new()
721    }
722}
723
724
725bitfield_without_debug! {
726    /// Settings for [`SegmentDescriptor`]s.
727    pub struct SegmentDescriptorFlags(u32) {
728        /// `AVL`: Ignored bit that can be used by the operating system.
729        ///
730        /// Does not apply to call gates.
731        [20] pub os_defined,
732
733        /// `G`: Indicates that the segment limit is in units of 4KB. Otherwise, it is in
734        /// bytes.
735        [23] pub granularity,
736    }
737}
738
739impl fmt::Debug for SegmentDescriptorFlags {
740    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
741        let mut struct_fmt = f.debug_struct("SegmentDescriptorFlags");
742        struct_fmt.field("<value>", &self.0);
743        self.fmt_fields(&mut struct_fmt);
744        <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
745        struct_fmt.finish()
746    }
747}
748
749impl DescriptorFlags for SegmentDescriptorFlags {}
750
751
752/// An entry in a segment descriptor table that points to an existing segment rather than
753/// defining a new one.
754///
755/// This includes:
756///   * Call gates, which allow controlled access to routines defined in a code segment
757///     with a different priority level or word size.
758///   * Interrupt and trap gates, which define handlers for interrupt vectors.
759///   * Task gates, which support hardware task switching. These are not supported in
760///     64-bit mode.
761#[repr(C)]
762#[derive(Clone, Copy, PartialEq, Eq)]
763pub struct GateDescriptor {
764    lower: u32,
765
766    /// Common gate descriptor settings.
767    pub flags: GateDescriptorFlags,
768
769    #[cfg(any(target_arch = "x86_64", doc))]
770    #[doc(cfg(target_arch = "x86_64"))]
771    upper: u32,
772
773    // NOTE: In some cases, The processor verifies that this isn't a 32-bit descriptor by
774    // looking for the type field (bits 8..13) in this DWord and making sure it is 0.
775    #[cfg(any(target_arch = "x86_64", doc))]
776    #[doc(cfg(target_arch = "x86_64"))]
777    _reserved: u32,
778}
779
780#[cfg(target_arch = "x86")]
781const_assert_eq!(8, core::mem::size_of::<GateDescriptor>());
782
783#[cfg(target_arch = "x86_64")]
784const_assert_eq!(16, core::mem::size_of::<GateDescriptor>());
785
786impl GateDescriptor {
787    const SELECTOR_MASK: u32 = 0xffff_0000;
788    const OFFSET_MASK_LOWER: u32 = 0x0000_ffff;
789    const OFFSET_MASK_UPPER: u32 = 0xffff_0000;
790
791    /// Create a zero-initialized descriptor
792    pub const fn new() -> Self {
793        #[cfg(target_arch = "x86")]
794        let value = Self { lower: 0, flags: GateDescriptorFlags(0) };
795
796        #[cfg(target_arch = "x86_64")]
797        let value =
798            Self { lower: 0, flags: GateDescriptorFlags(0), upper: 0, _reserved: 0 };
799
800        value
801    }
802
803    /// Selector that points to the code or task state segment to be accessed through this
804    /// gate.
805    ///
806    /// For call, interrupt, and trap gates, this points to the code segment that contains
807    /// the routine to be executed. For task gates, this points to a task state segment
808    /// representing the task to activate.
809    pub fn selector(self) -> Selector {
810        #![allow(clippy::cast_possible_truncation)]
811        Selector(((self.lower & Self::SELECTOR_MASK) >> 16) as u16)
812    }
813
814    /// Update the selector pointing to the segment to be accessed through this gate.
815    pub fn set_selector(&mut self, selector: Selector) {
816        self.lower &= !Self::SELECTOR_MASK;
817        self.lower |= Self::SELECTOR_MASK & (u32::from(selector.value()) << 16);
818    }
819
820    /// Offset of the entry point in code segment referenced by
821    /// [`selector`](Self::selector).
822    ///
823    /// Only applies to call, interrupt, and trap gates.
824    pub fn entry_point_offset(self) -> usize {
825        let mut offset = (self.lower & Self::OFFSET_MASK_LOWER) as usize;
826        offset |= (self.flags.value() & Self::OFFSET_MASK_UPPER) as usize;
827        #[cfg(target_arch = "x86_64")]
828        {
829            offset |= (self.upper as usize) << 32;
830        }
831        offset
832    }
833
834    /// Update the offset of the entry point within the referenced code segment.
835    pub fn set_entry_point_offset(&mut self, offset: usize) {
836        #![allow(clippy::cast_possible_truncation)]
837
838        self.lower &= !Self::OFFSET_MASK_LOWER;
839        self.lower |= Self::OFFSET_MASK_LOWER & (offset as u32);
840
841        self.flags.0 &= !Self::OFFSET_MASK_UPPER;
842        self.flags.0 |= Self::OFFSET_MASK_UPPER & (offset as u32);
843
844        #[cfg(target_arch = "x86_64")]
845        {
846            self.upper = (offset >> 32) as u32;
847        }
848    }
849}
850
851impl fmt::Debug for GateDescriptor {
852    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
853        f.debug_struct("GateDescriptor")
854            .field("selector", &self.selector())
855            .field("entry_point_offset", &self.entry_point_offset())
856            .field("flags", &self.flags)
857            .finish_non_exhaustive()
858    }
859}
860
861impl Default for GateDescriptor {
862    fn default() -> Self {
863        Self::new()
864    }
865}
866
867
868bitfield_without_debug! {
869    /// Settings for [`GateDescriptor`]s.
870    pub struct GateDescriptorFlags(u32) {
871        /// Number of stack parameters to copy if the code segment referenced by a call
872        /// gate uses a different stack segment.
873        ///
874        /// Only applies to call gates.
875        [0..4] pub call_param_count: u8,
876
877        /// One-based index of the
878        /// [`interrupt_stack`](x86_64::TaskStateSegment::interrupt_stack) pointer to use
879        /// when handling an interrupt though this gate.
880        ///
881        /// If this value is zero, then the stack segment is set to null.
882        ///
883        /// Only applies to interrupt and trap gates.
884        #[cfg(any(target_arch = "x86_64", doc))]
885        #[doc(cfg(target_arch = "x86_64"))]
886        [0..2] pub interrupt_stack_index: u8,
887    }
888}
889
890impl fmt::Debug for GateDescriptorFlags {
891    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
892        let mut struct_fmt = f.debug_struct("GateDescriptorFlags");
893        struct_fmt.field("<value>", &self.0);
894        self.fmt_fields(&mut struct_fmt);
895        <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
896        struct_fmt.finish()
897    }
898}
899
900impl DescriptorFlags for GateDescriptorFlags {}
901
902
903/// Bitmap that controls which I/O port addresses are covered by privilege checks.
904///
905/// For bit `N` of this structure (bit `N mod 8` of byte `floor(N / 8)`), up to
906/// [`max_port`](Self::max_port):
907///   * If the bit is set, then access to I/O port `N` is not allowed at privilege levels
908///     below [`FlagRegister::io_privilege_level`] (greater numbers).
909///   * If the bit is unset, then access to I/O port `N` is allowed from any privilege
910///     level.
911///
912/// This structure is stored in the task state segment (TSS) at a variable location
913/// indicated by its
914/// [`io_permission_map_offset`](TaskStateSegmentHeader::io_permission_map_offset). It is
915/// also variable-sized: if there are fewer than [`MAX_SIZE`](Self::MAX_SIZE) bytes
916/// between the start of the permission map and the [`limit`](SegmentDescriptor::limit) of
917/// the containing TSS, then the processor acts as though the bits for all ports past
918/// `max_port_for_size(limit - offset)` are set.
919#[derive(Debug, PartialEq, Eq)]
920pub struct IOPermissionBitmap<T = [u8]>(T)
921where
922    T: AsRef<[u8]> + AsMut<[u8]> + Eq + ?Sized;
923
924impl IOPermissionBitmap {
925    /// Size in bytes required to map all I/O ports
926    pub const MAX_SIZE: usize = Self::required_size(u16::MAX);
927
928    /// Calculate the size in bytes of a map that has bits for ports up to and including
929    /// `max_port`.
930    ///
931    /// Note that because of the way the processor reads this map, it requires an extra
932    /// byte at the end that does not map any ports.
933    ///
934    /// ```
935    /// # use tartan_arch::x86_common::protection::IOPermissionBitmap;
936    /// #
937    /// assert_eq!(IOPermissionBitmap::required_size(0), 2);
938    /// assert_eq!(IOPermissionBitmap::required_size(7), 2);
939    /// assert_eq!(IOPermissionBitmap::required_size(8), 3);
940    /// assert_eq!(IOPermissionBitmap::required_size(0xa587), 0x14b2);
941    /// assert_eq!(IOPermissionBitmap::required_size(0xffff), 0x2001);
942    /// ```
943    pub const fn required_size(max_port: u16) -> usize {
944        max_port as usize / 8 + 2
945    }
946
947    /// Calculate the last I/O port that is covered by a map of the given size.
948    ///
949    /// Note that because of the way the processor reads this map, it requires an extra
950    /// byte at the end that does not map any ports. Therefore, empty or single-byte maps
951    /// cannot represent _any_ ports, and this function returns `None` in those cases.
952    ///
953    /// ```
954    /// # use core::num::NonZeroU16;
955    /// # use tartan_arch::x86_common::protection::IOPermissionBitmap;
956    /// #
957    /// assert_eq!(IOPermissionBitmap::max_port_for_size(0), None);
958    /// assert_eq!(IOPermissionBitmap::max_port_for_size(1), None);
959    /// assert_eq!(IOPermissionBitmap::max_port_for_size(2), NonZeroU16::new(7));
960    /// assert_eq!(IOPermissionBitmap::max_port_for_size(4), NonZeroU16::new(23));
961    /// assert_eq!(IOPermissionBitmap::max_port_for_size(0x2000), NonZeroU16::new(0xfff7));
962    /// assert_eq!(IOPermissionBitmap::max_port_for_size(0x2001), NonZeroU16::new(0xffff));
963    /// assert_eq!(IOPermissionBitmap::max_port_for_size(0x2002), NonZeroU16::new(0xffff));
964    /// assert_eq!(IOPermissionBitmap::max_port_for_size(usize::MAX), NonZeroU16::new(0xffff));
965    /// ```
966    pub const fn max_port_for_size(size: usize) -> Option<NonZeroU16> {
967        #[allow(clippy::cast_possible_truncation)]
968        if size <= 1 {
969            None
970        } else if size >= Self::MAX_SIZE {
971            NonZeroU16::new(u16::MAX)
972        } else {
973            NonZeroU16::new((((size - 1) * 8) - 1) as u16)
974        }
975    }
976}
977
978impl<T> IOPermissionBitmap<T>
979where
980    T: AsRef<[u8]> + AsMut<[u8]> + Eq + ?Sized,
981{
982    /// The number of bytes in this structure.
983    pub fn size(&self) -> usize {
984        self.0.as_ref().len()
985    }
986
987    /// The last I/O port that is covered by this map. The bits for all ports greater than
988    /// this value are assumed to be set.
989    pub fn max_port(&self) -> Option<NonZeroU16> {
990        IOPermissionBitmap::max_port_for_size(self.size())
991    }
992
993    fn is_port_mapped(&self, port: u16) -> bool {
994        match self.max_port() {
995            None => false,
996            Some(max) => port <= max.get(),
997        }
998    }
999
1000    /// Get the value in this bitmap that indicates whether the port should be subject to
1001    /// privilege level checks.
1002    ///
1003    /// If the given port is beyond the range covered by this map
1004    /// [`max_port`](Self::max_port), this will return true, in line with the processor's
1005    /// behavior.
1006    pub fn is_port_checked(&self, port: u16) -> bool {
1007        if self.is_port_mapped(port) {
1008            let byte = (port / 8) as usize;
1009            #[allow(clippy::cast_possible_truncation)]
1010            let bit = (port % 8) as u8;
1011            get_bit(self.0.as_ref()[byte], bit)
1012        } else {
1013            // All remaining ports are assumed to be covered by privilege checks
1014            true
1015        }
1016    }
1017
1018    /// Set the value in this bitmap that indicates whether the port should be subject
1019    /// to privilege level checks.
1020    ///
1021    /// # Panics
1022    /// Panics if the given port is greater than [`max_port`](Self::max_port).
1023    pub fn set_port_checked(&mut self, port: u16, value: bool) {
1024        assert!(
1025            self.is_port_mapped(port),
1026            "Port {:x} is beyond the maximum value {:x?} supported by this map",
1027            port,
1028            self.max_port(),
1029        );
1030        let byte = (port / 8) as usize;
1031        #[allow(clippy::cast_possible_truncation)]
1032        let bit = (port % 8) as u8;
1033        let map = self.0.as_mut();
1034        map[byte] = set_bit(map[byte], bit, value);
1035    }
1036}
1037
1038
1039#[cfg(test)]
1040mod test {
1041    use super::*;
1042    use core::mem;
1043
1044    #[cfg(target_arch = "x86_64")]
1045    fn seg_desc_from_bytes(bytes: [u8; 16]) -> SegmentDescriptor {
1046        unsafe { mem::transmute(bytes) }
1047    }
1048
1049    #[cfg(target_arch = "x86_64")]
1050    fn bytes_from_seg_desc(desc: SegmentDescriptor) -> [u8; 16] {
1051        unsafe { mem::transmute(desc) }
1052    }
1053
1054    #[test]
1055    #[cfg(target_arch = "x86_64")]
1056    #[rustfmt::skip]
1057    fn test_descriptor_limit() {
1058        assert_eq!(0x0000_0000, seg_desc_from_bytes([
1059            0x00, 0x00, 0x00, 0x00,
1060            0x00, 0x00, 0x00, 0x00,
1061            0x00, 0x00, 0x00, 0x00,
1062            0x00, 0x00, 0x00, 0x00,
1063        ]).limit());
1064        assert_eq!(0x0000_0000, seg_desc_from_bytes([
1065            0x00, 0x00, 0xff, 0xff,
1066            0xff, 0xff, 0xf0, 0xff,
1067            0xff, 0xff, 0xff, 0xff,
1068            0xff, 0xff, 0xff, 0xff,
1069        ]).limit());
1070        assert_eq!(0x000f_ffff, seg_desc_from_bytes([
1071            0xff, 0xff, 0x00, 0x00,
1072            0x00, 0x00, 0x0f, 0x00,
1073            0x00, 0x00, 0x00, 0x00,
1074            0x00, 0x00, 0x00, 0x00,
1075        ]).limit());
1076        assert_eq!(0x000a_bcde, seg_desc_from_bytes([
1077            0xde, 0xbc, 0x74, 0x48,
1078            0x52, 0x93, 0x8a, 0x51,
1079            0x72, 0x89, 0x73, 0x21,
1080            0x28, 0x05, 0x86, 0x85,
1081        ]).limit());
1082
1083        let mut desc = seg_desc_from_bytes([
1084            0x00, 0x00, 0x00, 0x00,
1085            0x00, 0x00, 0x00, 0x00,
1086            0x00, 0x00, 0x00, 0x00,
1087            0x00, 0x00, 0x00, 0x00,
1088        ]);
1089        desc.set_limit(0x000f_ffff);
1090        assert_eq!(bytes_from_seg_desc(desc), [
1091            0xff, 0xff, 0x00, 0x00,
1092            0x00, 0x00, 0x0f, 0x00,
1093            0x00, 0x00, 0x00, 0x00,
1094            0x00, 0x00, 0x00, 0x00,
1095        ]);
1096
1097        let mut desc = seg_desc_from_bytes([
1098            0x00, 0x00, 0x00, 0x00,
1099            0x00, 0x00, 0x00, 0x00,
1100            0x00, 0x00, 0x00, 0x00,
1101            0x00, 0x00, 0x00, 0x00,
1102        ]);
1103        desc.set_limit(0x000a_bcde);
1104        assert_eq!(bytes_from_seg_desc(desc), [
1105            0xde, 0xbc, 0x00, 0x00,
1106            0x00, 0x00, 0x0a, 0x00,
1107            0x00, 0x00, 0x00, 0x00,
1108            0x00, 0x00, 0x00, 0x00,
1109        ]);
1110
1111        let mut desc = seg_desc_from_bytes([
1112            0xff, 0xff, 0xff, 0xff,
1113            0xff, 0xff, 0xff, 0xff,
1114            0xff, 0xff, 0xff, 0xff,
1115            0xff, 0xff, 0xff, 0xff,
1116        ]);
1117        desc.set_limit(0x0000_0000);
1118        assert_eq!(bytes_from_seg_desc(desc), [
1119            0x00, 0x00, 0xff, 0xff,
1120            0xff, 0xff, 0xf0, 0xff,
1121            0xff, 0xff, 0xff, 0xff,
1122            0xff, 0xff, 0xff, 0xff,
1123        ]);
1124
1125        let mut desc = seg_desc_from_bytes([
1126            0xff, 0xff, 0xff, 0xff,
1127            0xff, 0xff, 0xff, 0xff,
1128            0xff, 0xff, 0xff, 0xff,
1129            0xff, 0xff, 0xff, 0xff,
1130        ]);
1131        desc.set_limit(0x000a_bcde);
1132        assert_eq!(bytes_from_seg_desc(desc), [
1133            0xde, 0xbc, 0xff, 0xff,
1134            0xff, 0xff, 0xfa, 0xff,
1135            0xff, 0xff, 0xff, 0xff,
1136            0xff, 0xff, 0xff, 0xff,
1137        ]);
1138    }
1139
1140    #[test]
1141    #[should_panic(expected = "Segment limit too large: 0x100000")]
1142    #[cfg(target_arch = "x86_64")]
1143    #[rustfmt::skip]
1144    fn test_descriptor_limit_out_of_range() {
1145        let mut desc = seg_desc_from_bytes([
1146            0x00, 0x00, 0x00, 0x00,
1147            0x00, 0x00, 0x00, 0x00,
1148            0x00, 0x00, 0x00, 0x00,
1149            0x00, 0x00, 0x00, 0x00,
1150        ]);
1151        desc.set_limit(0x0010_0000);
1152    }
1153
1154    #[test]
1155    #[cfg(target_arch = "x86_64")]
1156    #[rustfmt::skip]
1157    fn test_descriptor_address() {
1158        assert_eq!(0x0000_0000_0000_0000_usize, seg_desc_from_bytes([
1159            0x00, 0x00, 0x00, 0x00,
1160            0x00, 0x00, 0x00, 0x00,
1161            0x00, 0x00, 0x00, 0x00,
1162            0x00, 0x00, 0x00, 0x00,
1163        ]).address());
1164        assert_eq!(0x0000_0000_0000_0000_usize, seg_desc_from_bytes([
1165            0xff, 0xff, 0x00, 0x00,
1166            0x00, 0xff, 0xff, 0x00,
1167            0x00, 0x00, 0x00, 0x00,
1168            0xff, 0xff, 0xff, 0xff,
1169        ]).address());
1170        assert_eq!(0xffff_ffff_ffff_ffff_usize, seg_desc_from_bytes([
1171            0x00, 0x00, 0xff, 0xff,
1172            0xff, 0x00, 0x00, 0xff,
1173            0xff, 0xff, 0xff, 0xff,
1174            0x00, 0x00, 0x00, 0x00,
1175        ]).address());
1176        assert_eq!(0xaabb_ccdd_1122_3344_usize, seg_desc_from_bytes([
1177            0x8f, 0x97, 0x44, 0x33,
1178            0x22, 0x68, 0x5e, 0x11,
1179            0xdd, 0xcc, 0xbb, 0xaa,
1180            0xf8, 0x76, 0x89, 0xe5,
1181        ]).address());
1182
1183        let mut desc = seg_desc_from_bytes([
1184            0x00, 0x00, 0x00, 0x00,
1185            0x00, 0x00, 0x00, 0x00,
1186            0x00, 0x00, 0x00, 0x00,
1187            0x00, 0x00, 0x00, 0x00,
1188        ]);
1189        desc.set_address(0xffff_ffff_ffff_ffff_usize);
1190        assert_eq!(bytes_from_seg_desc(desc), [
1191            0x00, 0x00, 0xff, 0xff,
1192            0xff, 0x00, 0x00, 0xff,
1193            0xff, 0xff, 0xff, 0xff,
1194            0x00, 0x00, 0x00, 0x00,
1195        ]);
1196
1197        let mut desc = seg_desc_from_bytes([
1198            0x00, 0x00, 0x00, 0x00,
1199            0x00, 0x00, 0x00, 0x00,
1200            0x00, 0x00, 0x00, 0x00,
1201            0x00, 0x00, 0x00, 0x00,
1202        ]);
1203        desc.set_address(0xaabb_ccdd_1122_3344_usize);
1204        assert_eq!(bytes_from_seg_desc(desc), [
1205            0x00, 0x00, 0x44, 0x33,
1206            0x22, 0x00, 0x00, 0x11,
1207            0xdd, 0xcc, 0xbb, 0xaa,
1208            0x00, 0x00, 0x00, 0x00,
1209        ]);
1210
1211        let mut desc = seg_desc_from_bytes([
1212            0xff, 0xff, 0xff, 0xff,
1213            0xff, 0xff, 0xff, 0xff,
1214            0xff, 0xff, 0xff, 0xff,
1215            0xff, 0xff, 0xff, 0xff,
1216        ]);
1217        desc.set_address(0x0000_0000_0000_0000_usize);
1218        assert_eq!(bytes_from_seg_desc(desc), [
1219            0xff, 0xff, 0x00, 0x00,
1220            0x00, 0xff, 0xff, 0x00,
1221            0x00, 0x00, 0x00, 0x00,
1222            0xff, 0xff, 0xff, 0xff,
1223        ]);
1224
1225        let mut desc = seg_desc_from_bytes([
1226            0xff, 0xff, 0xff, 0xff,
1227            0xff, 0xff, 0xff, 0xff,
1228            0xff, 0xff, 0xff, 0xff,
1229            0xff, 0xff, 0xff, 0xff,
1230        ]);
1231        desc.set_address(0xaabb_ccdd_1122_3344_usize);
1232        assert_eq!(bytes_from_seg_desc(desc), [
1233            0xff, 0xff, 0x44, 0x33,
1234            0x22, 0xff, 0xff, 0x11,
1235            0xdd, 0xcc, 0xbb, 0xaa,
1236            0xff, 0xff, 0xff, 0xff,
1237        ]);
1238    }
1239
1240    #[cfg(target_arch = "x86_64")]
1241    fn gate_desc_from_bytes(bytes: [u8; 16]) -> GateDescriptor {
1242        unsafe { mem::transmute(bytes) }
1243    }
1244
1245    #[cfg(target_arch = "x86_64")]
1246    fn bytes_from_gate_desc(desc: GateDescriptor) -> [u8; 16] {
1247        unsafe { mem::transmute(desc) }
1248    }
1249
1250    #[test]
1251    #[cfg(target_arch = "x86_64")]
1252    #[rustfmt::skip]
1253    fn test_descriptor_offset() {
1254        assert_eq!(0x0000_0000_0000_0000_usize, gate_desc_from_bytes([
1255            0x00, 0x00, 0x00, 0x00,
1256            0x00, 0x00, 0x00, 0x00,
1257            0x00, 0x00, 0x00, 0x00,
1258            0x00, 0x00, 0x00, 0x00,
1259        ]).entry_point_offset());
1260        assert_eq!(0x0000_0000_0000_0000_usize, gate_desc_from_bytes([
1261            0x00, 0x00, 0xff, 0xff,
1262            0xff, 0xff, 0x00, 0x00,
1263            0x00, 0x00, 0x00, 0x00,
1264            0xff, 0xff, 0xff, 0xff,
1265        ]).entry_point_offset());
1266        assert_eq!(0xffff_ffff_ffff_ffff_usize, gate_desc_from_bytes([
1267            0xff, 0xff, 0x00, 0x00,
1268            0x00, 0x00, 0xff, 0xff,
1269            0xff, 0xff, 0xff, 0xff,
1270            0x00, 0x00, 0x00, 0x00,
1271        ]).entry_point_offset());
1272        assert_eq!(0xaabb_ccdd_1122_3344_usize, gate_desc_from_bytes([
1273            0x44, 0x33, 0x8f, 0x97,
1274            0x68, 0x5e, 0x22, 0x11,
1275            0xdd, 0xcc, 0xbb, 0xaa,
1276            0xf8, 0x76, 0x89, 0xe5,
1277        ]).entry_point_offset());
1278
1279        let mut desc = gate_desc_from_bytes([
1280            0x00, 0x00, 0x00, 0x00,
1281            0x00, 0x00, 0x00, 0x00,
1282            0x00, 0x00, 0x00, 0x00,
1283            0x00, 0x00, 0x00, 0x00,
1284        ]);
1285        desc.set_entry_point_offset(0xffff_ffff_ffff_ffff_usize);
1286        assert_eq!(bytes_from_gate_desc(desc), [
1287            0xff, 0xff, 0x00, 0x00,
1288            0x00, 0x00, 0xff, 0xff,
1289            0xff, 0xff, 0xff, 0xff,
1290            0x00, 0x00, 0x00, 0x00,
1291        ]);
1292
1293        let mut desc = gate_desc_from_bytes([
1294            0x00, 0x00, 0x00, 0x00,
1295            0x00, 0x00, 0x00, 0x00,
1296            0x00, 0x00, 0x00, 0x00,
1297            0x00, 0x00, 0x00, 0x00,
1298        ]);
1299        desc.set_entry_point_offset(0xaabb_ccdd_1122_3344_usize);
1300        assert_eq!(bytes_from_gate_desc(desc), [
1301            0x44, 0x33, 0x00, 0x00,
1302            0x00, 0x00, 0x22, 0x11,
1303            0xdd, 0xcc, 0xbb, 0xaa,
1304            0x00, 0x00, 0x00, 0x00,
1305        ]);
1306
1307        let mut desc = gate_desc_from_bytes([
1308            0xff, 0xff, 0xff, 0xff,
1309            0xff, 0xff, 0xff, 0xff,
1310            0xff, 0xff, 0xff, 0xff,
1311            0xff, 0xff, 0xff, 0xff,
1312        ]);
1313        desc.set_entry_point_offset(0x0000_0000_0000_0000_usize);
1314        assert_eq!(bytes_from_gate_desc(desc), [
1315            0x00, 0x00, 0xff, 0xff,
1316            0xff, 0xff, 0x00, 0x00,
1317            0x00, 0x00, 0x00, 0x00,
1318            0xff, 0xff, 0xff, 0xff,
1319        ]);
1320
1321        let mut desc = gate_desc_from_bytes([
1322            0xff, 0xff, 0xff, 0xff,
1323            0xff, 0xff, 0xff, 0xff,
1324            0xff, 0xff, 0xff, 0xff,
1325            0xff, 0xff, 0xff, 0xff,
1326        ]);
1327        desc.set_entry_point_offset(0xaabb_ccdd_1122_3344_usize);
1328        assert_eq!(bytes_from_gate_desc(desc), [
1329            0x44, 0x33, 0xff, 0xff,
1330            0xff, 0xff, 0x22, 0x11,
1331            0xdd, 0xcc, 0xbb, 0xaa,
1332            0xff, 0xff, 0xff, 0xff,
1333        ]);
1334    }
1335
1336    #[test]
1337    #[cfg(target_arch = "x86_64")]
1338    #[rustfmt::skip]
1339    fn test_descriptor_selector() {
1340        assert_eq!(Selector(0x0000), gate_desc_from_bytes([
1341            0x00, 0x00, 0x00, 0x00,
1342            0x00, 0x00, 0x00, 0x00,
1343            0x00, 0x00, 0x00, 0x00,
1344            0x00, 0x00, 0x00, 0x00,
1345        ]).selector());
1346        assert_eq!(Selector(0x0000), gate_desc_from_bytes([
1347            0xff, 0xff, 0x00, 0x00,
1348            0xff, 0xff, 0xff, 0xff,
1349            0xff, 0xff, 0xff, 0xff,
1350            0xff, 0xff, 0xff, 0xff,
1351        ]).selector());
1352        assert_eq!(Selector(0xffff), gate_desc_from_bytes([
1353            0x00, 0x00, 0xff, 0xff,
1354            0x00, 0x00, 0x00, 0x00,
1355            0x00, 0x00, 0x00, 0x00,
1356            0x00, 0x00, 0x00, 0x00,
1357        ]).selector());
1358        assert_eq!(Selector(0xabcd), gate_desc_from_bytes([
1359            0x94, 0x82, 0xcd, 0xab,
1360            0x52, 0x93, 0x83, 0x51,
1361            0x72, 0x89, 0x73, 0x21,
1362            0x28, 0x05, 0x86, 0x85,
1363        ]).selector());
1364
1365        let mut desc = gate_desc_from_bytes([
1366            0x00, 0x00, 0x00, 0x00,
1367            0x00, 0x00, 0x00, 0x00,
1368            0x00, 0x00, 0x00, 0x00,
1369            0x00, 0x00, 0x00, 0x00,
1370        ]);
1371        desc.set_selector(0xffff.into());
1372        assert_eq!(bytes_from_gate_desc(desc), [
1373            0x00, 0x00, 0xff, 0xff,
1374            0x00, 0x00, 0x00, 0x00,
1375            0x00, 0x00, 0x00, 0x00,
1376            0x00, 0x00, 0x00, 0x00,
1377        ]);
1378
1379        let mut desc = gate_desc_from_bytes([
1380            0x00, 0x00, 0x00, 0x00,
1381            0x00, 0x00, 0x00, 0x00,
1382            0x00, 0x00, 0x00, 0x00,
1383            0x00, 0x00, 0x00, 0x00,
1384        ]);
1385        desc.set_selector(0xabcd.into());
1386        assert_eq!(bytes_from_gate_desc(desc), [
1387            0x00, 0x00, 0xcd, 0xab,
1388            0x00, 0x00, 0x00, 0x00,
1389            0x00, 0x00, 0x00, 0x00,
1390            0x00, 0x00, 0x00, 0x00,
1391        ]);
1392
1393        let mut desc = gate_desc_from_bytes([
1394            0xff, 0xff, 0xff, 0xff,
1395            0xff, 0xff, 0xff, 0xff,
1396            0xff, 0xff, 0xff, 0xff,
1397            0xff, 0xff, 0xff, 0xff,
1398        ]);
1399        desc.set_selector(0x0000.into());
1400        assert_eq!(bytes_from_gate_desc(desc), [
1401            0xff, 0xff, 0x00, 0x00,
1402            0xff, 0xff, 0xff, 0xff,
1403            0xff, 0xff, 0xff, 0xff,
1404            0xff, 0xff, 0xff, 0xff,
1405        ]);
1406
1407        let mut desc = gate_desc_from_bytes([
1408            0xff, 0xff, 0xff, 0xff,
1409            0xff, 0xff, 0xff, 0xff,
1410            0xff, 0xff, 0xff, 0xff,
1411            0xff, 0xff, 0xff, 0xff,
1412        ]);
1413        desc.set_selector(0xabcd.into());
1414        assert_eq!(bytes_from_gate_desc(desc), [
1415            0xff, 0xff, 0xcd, 0xab,
1416            0xff, 0xff, 0xff, 0xff,
1417            0xff, 0xff, 0xff, 0xff,
1418            0xff, 0xff, 0xff, 0xff,
1419        ]);
1420    }
1421
1422    #[test]
1423    #[allow(clippy::bool_assert_comparison)]
1424    fn test_is_port_checked() {
1425        let map = IOPermissionBitmap([]);
1426        assert_eq!(map.is_port_checked(0), true);
1427        assert_eq!(map.is_port_checked(1), true);
1428        assert_eq!(map.is_port_checked(u16::MAX), true);
1429
1430        let map = IOPermissionBitmap([0xff]);
1431        assert_eq!(map.is_port_checked(0), true);
1432        assert_eq!(map.is_port_checked(1), true);
1433        assert_eq!(map.is_port_checked(8), true);
1434        assert_eq!(map.is_port_checked(u16::MAX), true);
1435
1436        let map = IOPermissionBitmap([0x00, 0xff]);
1437        assert_eq!(map.is_port_checked(0), false);
1438        assert_eq!(map.is_port_checked(1), false);
1439        assert_eq!(map.is_port_checked(7), false);
1440        assert_eq!(map.is_port_checked(8), true);
1441        assert_eq!(map.is_port_checked(u16::MAX), true);
1442
1443        let map = IOPermissionBitmap([0xaa, 0xff]);
1444        assert_eq!(map.is_port_checked(0), false);
1445        assert_eq!(map.is_port_checked(1), true);
1446        assert_eq!(map.is_port_checked(2), false);
1447        assert_eq!(map.is_port_checked(3), true);
1448        assert_eq!(map.is_port_checked(4), false);
1449        assert_eq!(map.is_port_checked(5), true);
1450        assert_eq!(map.is_port_checked(6), false);
1451        assert_eq!(map.is_port_checked(7), true);
1452        assert_eq!(map.is_port_checked(8), true);
1453        assert_eq!(map.is_port_checked(u16::MAX), true);
1454
1455        let map = IOPermissionBitmap([0x55, 0xff]);
1456        assert_eq!(map.is_port_checked(0), true);
1457        assert_eq!(map.is_port_checked(1), false);
1458        assert_eq!(map.is_port_checked(2), true);
1459        assert_eq!(map.is_port_checked(3), false);
1460        assert_eq!(map.is_port_checked(4), true);
1461        assert_eq!(map.is_port_checked(5), false);
1462        assert_eq!(map.is_port_checked(6), true);
1463        assert_eq!(map.is_port_checked(7), false);
1464        assert_eq!(map.is_port_checked(8), true);
1465        assert_eq!(map.is_port_checked(u16::MAX), true);
1466
1467        let map = IOPermissionBitmap([0xff, 0x00, 0xff]);
1468        assert_eq!(map.is_port_checked(0), true);
1469        assert_eq!(map.is_port_checked(7), true);
1470        assert_eq!(map.is_port_checked(8), false);
1471        assert_eq!(map.is_port_checked(15), false);
1472        assert_eq!(map.is_port_checked(16), true);
1473        assert_eq!(map.is_port_checked(u16::MAX), true);
1474
1475        // The processor expects the last byte to be 0xff, and it doesn't map any ports.
1476        // For our purposes, we ignore it and treat it as though it were 0xff.
1477        let map = IOPermissionBitmap([0x00]);
1478        assert_eq!(map.is_port_checked(0), true);
1479        assert_eq!(map.is_port_checked(1), true);
1480        assert_eq!(map.is_port_checked(7), true);
1481        assert_eq!(map.is_port_checked(8), true);
1482        assert_eq!(map.is_port_checked(u16::MAX), true);
1483
1484        let map = IOPermissionBitmap([0x00, 0x00]);
1485        assert_eq!(map.is_port_checked(0), false);
1486        assert_eq!(map.is_port_checked(1), false);
1487        assert_eq!(map.is_port_checked(7), false);
1488        assert_eq!(map.is_port_checked(8), true);
1489        assert_eq!(map.is_port_checked(15), true);
1490        assert_eq!(map.is_port_checked(16), true);
1491        assert_eq!(map.is_port_checked(u16::MAX), true);
1492    }
1493
1494    #[test]
1495    fn test_set_port_checked() {
1496        let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1497        map.set_port_checked(0, true);
1498        assert_eq!(map.0, [0x01, 0x00, 0xff]);
1499
1500        let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1501        map.set_port_checked(15, true);
1502        assert_eq!(map.0, [0x00, 0x80, 0xff]);
1503
1504        let mut map = IOPermissionBitmap([0xff, 0xff, 0xff]);
1505        map.set_port_checked(0, false);
1506        assert_eq!(map.0, [0xfe, 0xff, 0xff]);
1507
1508        let mut map = IOPermissionBitmap([0xff, 0xff, 0xff]);
1509        map.set_port_checked(15, false);
1510        assert_eq!(map.0, [0xff, 0x7f, 0xff]);
1511    }
1512
1513    #[test]
1514    #[should_panic(
1515        expected = "Port 10 is beyond the maximum value Some(f) supported by this map"
1516    )]
1517    fn test_set_port_checked_out_of_range_3byte() {
1518        let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1519        map.set_port_checked(16, true);
1520    }
1521
1522    #[test]
1523    #[should_panic(
1524        expected = "Port 8 is beyond the maximum value Some(7) supported by this map"
1525    )]
1526    fn test_set_port_checked_out_of_range_2byte() {
1527        let mut map = IOPermissionBitmap([0x00, 0xff]);
1528        map.set_port_checked(8, true);
1529    }
1530
1531    #[test]
1532    #[should_panic(
1533        expected = "Port 0 is beyond the maximum value None supported by this map"
1534    )]
1535    fn test_set_port_checked_out_of_range_1byte() {
1536        let mut map = IOPermissionBitmap([0xff]);
1537        map.set_port_checked(0, true);
1538    }
1539
1540    #[test]
1541    #[should_panic(
1542        expected = "Port 0 is beyond the maximum value None supported by this map"
1543    )]
1544    fn test_set_port_checked_out_of_range_empty() {
1545        let mut map = IOPermissionBitmap([]);
1546        map.set_port_checked(0, true);
1547    }
1548}