1use core::arch::asm;
7use core::fmt;
8use core::num::NonZeroU16;
9use static_assertions::const_assert_eq;
10use tartan_bitfield::{
11 bitfield, bitfield_accessors, bitfield_without_debug, get_bit, set_bit, Bitfield,
12};
13use tartan_c_enum::c_enum;
14
15#[cfg(doc)]
16use super::FlagRegister;
17#[cfg(doc)]
18use crate::x86_64::protection::TaskStateSegmentHeader;
19#[cfg(doc)]
20use crate::x86_64::ExtendedFeatureEnableRegister;
21
22
23#[repr(C, packed)]
25#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
26pub struct GlobalDescriptorTableRegister {
27 pub limit: u16,
29 pub address: usize,
31}
32
33impl GlobalDescriptorTableRegister {
34 pub fn get() -> Self {
36 let mut value = Self::default();
37 unsafe {
38 asm!(
39 "sgdt [{0}]",
40 in(reg) &raw mut value,
41 );
42 }
43 value
44 }
45
46 pub unsafe fn set(value: &Self) {
52 asm!(
53 "lgdt [{0}]",
54 in(reg) value,
55 );
56 }
57
58 #[allow(named_asm_labels)]
64 pub unsafe fn set_with_segments(
65 gdt_pointer: &Self,
66 code_selector: Selector,
67 data_selector: Selector,
68 ) {
69 #[cfg(target_arch = "x86")]
72 asm!(
73 "
74 // Set the global descriptor table register
75 lgdt [{gdt_pointer}]
76
77 // Update the data segment selectors
78 mov ds, {data_selector:e}
79 mov es, {data_selector:e}
80 mov fs, {data_selector:e}
81 mov gs, {data_selector:e}
82 mov ss, {data_selector:e}
83
84 // Update the code segment selector. Directly loading the CS register doesn't
85 // work, so we use a far return. The 'return' is really a jump to the next
86 // instruction using a far pointer we push onto the stack, consisting of:
87 // * [rsp] = 32-bit offset
88 // * [rsp + 4] = 32-bit segment selector (zero-extended from 16 bits)
89 push {code_selector:e}
90 mov eax, offset .dummy_target
91 push eax
92 retf
93 .dummy_target:
94 ",
95 gdt_pointer = in(reg) gdt_pointer,
96 data_selector = in(reg) u16::from(data_selector),
97 code_selector = in(reg) u16::from(code_selector),
98 out("eax") _,
99 );
100
101 #[cfg(target_arch = "x86_64")]
102 asm!(
103 "
104 // Set the global descriptor table register
105 lgdt [{gdt_pointer}]
106
107 // Update the data segment selectors
108 mov ds, {data_selector:r}
109 mov es, {data_selector:r}
110 mov fs, {data_selector:r}
111 mov gs, {data_selector:r}
112 mov ss, {data_selector:r}
113
114 // Update the code segment selector. Directly loading the CS register doesn't
115 // work, so we use a far return. The 'return' is really a jump to the next
116 // instruction using a far pointer we push onto the stack, consisting of:
117 // * [rsp] = 64-bit offset
118 // * [rsp + 8] = 64-bit segment selector (zero-extended from 16 bits)
119 push {code_selector:r}
120 lea rax, [rip + .dummy_target]
121 push rax
122 rex64 retf
123 .dummy_target:
124 ",
125 gdt_pointer = in(reg) gdt_pointer,
126 data_selector = in(reg) u16::from(data_selector),
127 code_selector = in(reg) u16::from(code_selector),
128 out("rax") _,
129 );
130 }
131}
132
133
134#[allow(clippy::empty_enum)]
141pub enum LocalDescriptorTableRegister {}
142
143impl LocalDescriptorTableRegister {
144 pub fn get() -> Selector {
146 let mut value = Selector(0);
147 unsafe {
148 asm!(
149 "sldt {0:x}",
150 out(reg) value.0,
151 );
152 }
153 value
154 }
155
156 pub unsafe fn set(selector: Selector) {
166 asm!(
167 "lldt {0:x}",
168 in(reg) selector.0,
169 );
170 }
171
172 pub unsafe fn current_descriptor() -> *const SegmentDescriptor {
179 let address = LocalDescriptorTableRegister::get().descriptor_address();
180 address as *const SegmentDescriptor
181 }
182}
183
184
185#[allow(clippy::empty_enum)]
192pub enum TaskRegister {}
193
194impl TaskRegister {
195 pub fn get() -> Selector {
197 let mut value = Selector(0);
198 unsafe {
199 asm!(
200 "str {0:x}",
201 out(reg) value.0,
202 );
203 }
204 value
205 }
206
207 pub unsafe fn set(selector: Selector) {
217 asm!(
218 "ltr {0:x}",
219 in(reg) selector.0,
220 );
221 }
222}
223
224
225pub enum SegmentRegister {
227 Code,
229 Data,
231 Stack,
234 Extra,
236 ExtraF,
238 ExtraG,
240}
241
242impl SegmentRegister {
243 pub fn get(self) -> Selector {
245 let mut value = Selector(0);
246 unsafe {
247 match self {
248 Self::Code => asm!("mov {0:x}, cs", out(reg) value.0),
249 Self::Data => asm!("mov {0:x}, ds", out(reg) value.0),
250 Self::Stack => asm!("mov {0:x}, ss", out(reg) value.0),
251 Self::Extra => asm!("mov {0:x}, es", out(reg) value.0),
252 Self::ExtraF => asm!("mov {0:x}, fs", out(reg) value.0),
253 Self::ExtraG => asm!("mov {0:x}, gs", out(reg) value.0),
254 }
255 }
256 value
257 }
258
259 pub unsafe fn set(self, selector: Selector) {
265 match self {
266 Self::Code => asm!("mov cs, {0:x}", in(reg) selector.0),
267 Self::Data => asm!("mov ds, {0:x}", in(reg) selector.0),
268 Self::Stack => asm!("mov ss, {0:x}", in(reg) selector.0),
269 Self::Extra => asm!("mov es, {0:x}", in(reg) selector.0),
270 Self::ExtraF => asm!("mov fs, {0:x}", in(reg) selector.0),
271 Self::ExtraG => asm!("mov gs, {0:x}", in(reg) selector.0),
272 }
273 }
274}
275
276
277bitfield! {
278 pub struct Selector(u16) {
283 [0..2] pub privilege_level: u8,
289
290 [2] pub local,
293 }
294}
295
296impl Selector {
297 const OFFSET_MASK: u16 = 0xfff8;
298
299 pub const fn new(offset: u16, privilege_level: u8, local: bool) -> Self {
301 let value = (offset & Self::OFFSET_MASK)
303 | ((privilege_level & 0b11) as u16)
304 | ((local as u16) << 2);
305 Self(value)
306 }
307
308 pub const fn null() -> Self {
310 Self(0)
311 }
312
313 pub fn offset(self) -> u16 {
315 self.0 & Self::OFFSET_MASK
316 }
317
318 pub fn set_offset(&mut self, offset: u16) {
323 assert!(
324 offset & !Self::OFFSET_MASK == 0,
325 "Descriptor offset {offset} is not aligned on an 8-byte boundary"
326 );
327 self.0 &= !Self::OFFSET_MASK;
328 self.0 |= offset;
329 }
330
331 pub fn descriptor_address(self) -> usize {
333 let table_address = if self.local() {
334 let local_table =
335 unsafe { &*(LocalDescriptorTableRegister::current_descriptor()) };
336 local_table.address()
337 } else {
338 GlobalDescriptorTableRegister::get().address
339 };
340 table_address + self.offset() as usize
341 }
342}
343
344
345pub trait DescriptorFlags: Bitfield<u32> {
347 bitfield_accessors! {
348 [ 8..12] system_type: u8 as SystemDescriptorType,
352
353 [ 8] application_accessed,
360
361 [ 9] code_readable,
367
368 [ 9] data_writable,
375
376 [10] code_conforming,
382
383 [10] data_expand_down,
389
390 [11] is_code,
394
395 [12] is_application,
398
399 [13..15] privilege_level: u8,
415
416 [15] present,
418
419 #[cfg(any(target_arch = "x86_64", doc))]
429 #[doc(cfg(target_arch = "x86_64"))]
430 [21] code_mode_64,
431
432 [22] application_mode_32,
441 }
442
443 fn is_gate(&self) -> bool {
445 !self.is_application() && self.system_type().is_gate()
446 }
447}
448
449
450c_enum! {
451 pub enum SystemDescriptorType(u8) {
453 #[cfg(any(target_arch = "x86", doc))]
456 #[doc(cfg(target_arch = "x86"))]
457 TaskStateAvailable16Bit = 1,
458
459 LocalDescriptorTable = 2,
461
462 #[cfg(any(target_arch = "x86", doc))]
465 #[doc(cfg(target_arch = "x86"))]
466 TaskStateBusy16Bit = 3,
467
468 #[cfg(any(target_arch = "x86", doc))]
470 #[doc(cfg(target_arch = "x86"))]
471 CallGate16Bit = 4,
472
473 #[cfg(any(target_arch = "x86", doc))]
475 #[doc(cfg(target_arch = "x86"))]
476 TaskGate = 5,
477
478 #[cfg(any(target_arch = "x86", doc))]
480 #[doc(cfg(target_arch = "x86"))]
481 InterruptGate16Bit = 6,
482
483 #[cfg(any(target_arch = "x86", doc))]
485 #[doc(cfg(target_arch = "x86"))]
486 TrapGate16Bit = 7,
487
488 TaskStateAvailable = 9,
491
492 TaskStateBusy = 11,
495
496 CallGate = 12,
498
499 InterruptGate = 14,
501
502 TrapGate = 15,
508 }
509}
510
511impl SystemDescriptorType {
512 pub fn is_gate(self) -> bool {
514 #[cfg(target_arch = "x86")]
515 {
516 matches!(
517 self,
518 Self::CallGate
519 | Self::CallGate16Bit
520 | Self::InterruptGate
521 | Self::InterruptGate16Bit
522 | Self::TrapGate
523 | Self::TrapGate16Bit
524 | Self::TaskGate
525 )
526 }
527
528 #[cfg(target_arch = "x86_64")]
529 {
530 matches!(self, Self::CallGate | Self::InterruptGate | Self::TrapGate)
531 }
532 }
533}
534
535
536
537#[repr(C)]
540#[derive(Debug, Clone, Copy, PartialEq, Eq)]
541pub struct GenericDescriptor {
542 lower: u32,
543
544 pub flags: GenericDescriptorFlags,
546
547 #[cfg(any(target_arch = "x86_64", doc))]
548 #[doc(cfg(target_arch = "x86_64"))]
549 upper: u32,
550
551 #[cfg(any(target_arch = "x86_64", doc))]
554 #[doc(cfg(target_arch = "x86_64"))]
555 _reserved: u32,
556}
557
558#[cfg(target_arch = "x86")]
559const_assert_eq!(8, core::mem::size_of::<GenericDescriptor>());
560
561#[cfg(target_arch = "x86_64")]
562const_assert_eq!(16, core::mem::size_of::<GenericDescriptor>());
563
564
565bitfield_without_debug! {
566 pub struct GenericDescriptorFlags(u32) {}
568}
569
570impl fmt::Debug for GenericDescriptorFlags {
571 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
572 let mut struct_fmt = f.debug_struct("GenericDescriptorFlags");
573 struct_fmt.field("<value>", &self.0);
574 self.fmt_fields(&mut struct_fmt);
575 <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
576 struct_fmt.finish()
577 }
578}
579
580impl DescriptorFlags for GenericDescriptorFlags {}
581
582#[repr(C)]
596#[derive(Clone, Copy, PartialEq, Eq)]
597pub struct SegmentDescriptor {
598 lower: u32,
599
600 pub flags: SegmentDescriptorFlags,
602
603 #[cfg(any(target_arch = "x86_64", doc))]
604 #[doc(cfg(target_arch = "x86_64"))]
605 upper: u32,
606
607 #[cfg(any(target_arch = "x86_64", doc))]
610 #[doc(cfg(target_arch = "x86_64"))]
611 _reserved: u32,
612}
613
614#[cfg(target_arch = "x86")]
615const_assert_eq!(8, core::mem::size_of::<SegmentDescriptor>());
616
617#[cfg(target_arch = "x86_64")]
618const_assert_eq!(16, core::mem::size_of::<SegmentDescriptor>());
619
620impl SegmentDescriptor {
621 pub const LIMIT_MAX: u32 = 0x000f_ffff;
623 const LIMIT_MASK_LOWER: u32 = 0x0000_ffff;
624 const LIMIT_MASK_FLAGS: u32 = 0x000f_0000;
625
626 const ADDRESS_MASK_LOWER: u32 = 0xffff_0000;
627 const ADDRESS_MASK_FLAGS_LOWER: u32 = 0x0000_00ff;
628 const ADDRESS_MASK_FLAGS_UPPER: u32 = 0xff00_0000;
629
630 pub const fn new() -> Self {
632 #[cfg(target_arch = "x86")]
633 let value = Self { lower: 0, flags: SegmentDescriptorFlags(0) };
634
635 #[cfg(target_arch = "x86_64")]
636 let value =
637 Self { lower: 0, flags: SegmentDescriptorFlags(0), upper: 0, _reserved: 0 };
638
639 value
640 }
641
642 pub fn address(self) -> usize {
647 let mut address = ((self.lower & Self::ADDRESS_MASK_LOWER) >> 16) as usize;
648 address |= ((Self::ADDRESS_MASK_FLAGS_LOWER & self.flags.value()) << 16) as usize;
649 address |= (Self::ADDRESS_MASK_FLAGS_UPPER & self.flags.value()) as usize;
650 #[cfg(target_arch = "x86_64")]
651 {
652 address |= (self.upper as usize) << 32;
653 }
654 address
655 }
656
657 pub fn set_address(&mut self, address: usize) {
659 #![allow(clippy::cast_possible_truncation)]
660
661 self.lower &= !Self::ADDRESS_MASK_LOWER;
662 self.lower |= (address << 16) as u32 & Self::ADDRESS_MASK_LOWER;
663
664 self.flags.0 &= !Self::ADDRESS_MASK_FLAGS_LOWER & !Self::ADDRESS_MASK_FLAGS_UPPER;
665 self.flags.0 |= Self::ADDRESS_MASK_FLAGS_LOWER & (address >> 16) as u32;
666 self.flags.0 |= Self::ADDRESS_MASK_FLAGS_UPPER & address as u32;
667
668 #[cfg(target_arch = "x86_64")]
669 {
670 self.upper = (address >> 32) as u32;
671 }
672 }
673
674 pub fn limit(self) -> u32 {
688 let mut limit = self.lower & Self::LIMIT_MASK_LOWER;
689 limit |= self.flags.value() & Self::LIMIT_MASK_FLAGS;
690 limit
691 }
692
693 pub fn set_limit(&mut self, limit: u32) {
698 assert!(limit <= Self::LIMIT_MAX, "Segment limit too large: {limit:#x}");
699
700 self.lower &= !Self::LIMIT_MASK_LOWER;
701 self.lower |= Self::LIMIT_MASK_LOWER & limit;
702
703 self.flags.0 &= !Self::LIMIT_MASK_FLAGS;
704 self.flags.0 |= Self::LIMIT_MASK_FLAGS & limit;
705 }
706}
707
708impl fmt::Debug for SegmentDescriptor {
709 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
710 f.debug_struct("SegmentDescriptor")
711 .field("address", &self.address())
712 .field("limit", &self.limit())
713 .field("flags", &self.flags)
714 .finish_non_exhaustive()
715 }
716}
717
718impl Default for SegmentDescriptor {
719 fn default() -> Self {
720 Self::new()
721 }
722}
723
724
725bitfield_without_debug! {
726 pub struct SegmentDescriptorFlags(u32) {
728 [20] pub os_defined,
732
733 [23] pub granularity,
736 }
737}
738
739impl fmt::Debug for SegmentDescriptorFlags {
740 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
741 let mut struct_fmt = f.debug_struct("SegmentDescriptorFlags");
742 struct_fmt.field("<value>", &self.0);
743 self.fmt_fields(&mut struct_fmt);
744 <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
745 struct_fmt.finish()
746 }
747}
748
749impl DescriptorFlags for SegmentDescriptorFlags {}
750
751
752#[repr(C)]
762#[derive(Clone, Copy, PartialEq, Eq)]
763pub struct GateDescriptor {
764 lower: u32,
765
766 pub flags: GateDescriptorFlags,
768
769 #[cfg(any(target_arch = "x86_64", doc))]
770 #[doc(cfg(target_arch = "x86_64"))]
771 upper: u32,
772
773 #[cfg(any(target_arch = "x86_64", doc))]
776 #[doc(cfg(target_arch = "x86_64"))]
777 _reserved: u32,
778}
779
780#[cfg(target_arch = "x86")]
781const_assert_eq!(8, core::mem::size_of::<GateDescriptor>());
782
783#[cfg(target_arch = "x86_64")]
784const_assert_eq!(16, core::mem::size_of::<GateDescriptor>());
785
786impl GateDescriptor {
787 const SELECTOR_MASK: u32 = 0xffff_0000;
788 const OFFSET_MASK_LOWER: u32 = 0x0000_ffff;
789 const OFFSET_MASK_UPPER: u32 = 0xffff_0000;
790
791 pub const fn new() -> Self {
793 #[cfg(target_arch = "x86")]
794 let value = Self { lower: 0, flags: GateDescriptorFlags(0) };
795
796 #[cfg(target_arch = "x86_64")]
797 let value =
798 Self { lower: 0, flags: GateDescriptorFlags(0), upper: 0, _reserved: 0 };
799
800 value
801 }
802
803 pub fn selector(self) -> Selector {
810 #![allow(clippy::cast_possible_truncation)]
811 Selector(((self.lower & Self::SELECTOR_MASK) >> 16) as u16)
812 }
813
814 pub fn set_selector(&mut self, selector: Selector) {
816 self.lower &= !Self::SELECTOR_MASK;
817 self.lower |= Self::SELECTOR_MASK & (u32::from(selector.value()) << 16);
818 }
819
820 pub fn entry_point_offset(self) -> usize {
825 let mut offset = (self.lower & Self::OFFSET_MASK_LOWER) as usize;
826 offset |= (self.flags.value() & Self::OFFSET_MASK_UPPER) as usize;
827 #[cfg(target_arch = "x86_64")]
828 {
829 offset |= (self.upper as usize) << 32;
830 }
831 offset
832 }
833
834 pub fn set_entry_point_offset(&mut self, offset: usize) {
836 #![allow(clippy::cast_possible_truncation)]
837
838 self.lower &= !Self::OFFSET_MASK_LOWER;
839 self.lower |= Self::OFFSET_MASK_LOWER & (offset as u32);
840
841 self.flags.0 &= !Self::OFFSET_MASK_UPPER;
842 self.flags.0 |= Self::OFFSET_MASK_UPPER & (offset as u32);
843
844 #[cfg(target_arch = "x86_64")]
845 {
846 self.upper = (offset >> 32) as u32;
847 }
848 }
849}
850
851impl fmt::Debug for GateDescriptor {
852 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
853 f.debug_struct("GateDescriptor")
854 .field("selector", &self.selector())
855 .field("entry_point_offset", &self.entry_point_offset())
856 .field("flags", &self.flags)
857 .finish_non_exhaustive()
858 }
859}
860
861impl Default for GateDescriptor {
862 fn default() -> Self {
863 Self::new()
864 }
865}
866
867
868bitfield_without_debug! {
869 pub struct GateDescriptorFlags(u32) {
871 [0..4] pub call_param_count: u8,
876
877 #[cfg(any(target_arch = "x86_64", doc))]
885 #[doc(cfg(target_arch = "x86_64"))]
886 [0..2] pub interrupt_stack_index: u8,
887 }
888}
889
890impl fmt::Debug for GateDescriptorFlags {
891 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
892 let mut struct_fmt = f.debug_struct("GateDescriptorFlags");
893 struct_fmt.field("<value>", &self.0);
894 self.fmt_fields(&mut struct_fmt);
895 <Self as DescriptorFlags>::fmt_fields(self, &mut struct_fmt);
896 struct_fmt.finish()
897 }
898}
899
900impl DescriptorFlags for GateDescriptorFlags {}
901
902
903#[derive(Debug, PartialEq, Eq)]
920pub struct IOPermissionBitmap<T = [u8]>(T)
921where
922 T: AsRef<[u8]> + AsMut<[u8]> + Eq + ?Sized;
923
924impl IOPermissionBitmap {
925 pub const MAX_SIZE: usize = Self::required_size(u16::MAX);
927
928 pub const fn required_size(max_port: u16) -> usize {
944 max_port as usize / 8 + 2
945 }
946
947 pub const fn max_port_for_size(size: usize) -> Option<NonZeroU16> {
967 #[allow(clippy::cast_possible_truncation)]
968 if size <= 1 {
969 None
970 } else if size >= Self::MAX_SIZE {
971 NonZeroU16::new(u16::MAX)
972 } else {
973 NonZeroU16::new((((size - 1) * 8) - 1) as u16)
974 }
975 }
976}
977
978impl<T> IOPermissionBitmap<T>
979where
980 T: AsRef<[u8]> + AsMut<[u8]> + Eq + ?Sized,
981{
982 pub fn size(&self) -> usize {
984 self.0.as_ref().len()
985 }
986
987 pub fn max_port(&self) -> Option<NonZeroU16> {
990 IOPermissionBitmap::max_port_for_size(self.size())
991 }
992
993 fn is_port_mapped(&self, port: u16) -> bool {
994 match self.max_port() {
995 None => false,
996 Some(max) => port <= max.get(),
997 }
998 }
999
1000 pub fn is_port_checked(&self, port: u16) -> bool {
1007 if self.is_port_mapped(port) {
1008 let byte = (port / 8) as usize;
1009 #[allow(clippy::cast_possible_truncation)]
1010 let bit = (port % 8) as u8;
1011 get_bit(self.0.as_ref()[byte], bit)
1012 } else {
1013 true
1015 }
1016 }
1017
1018 pub fn set_port_checked(&mut self, port: u16, value: bool) {
1024 assert!(
1025 self.is_port_mapped(port),
1026 "Port {:x} is beyond the maximum value {:x?} supported by this map",
1027 port,
1028 self.max_port(),
1029 );
1030 let byte = (port / 8) as usize;
1031 #[allow(clippy::cast_possible_truncation)]
1032 let bit = (port % 8) as u8;
1033 let map = self.0.as_mut();
1034 map[byte] = set_bit(map[byte], bit, value);
1035 }
1036}
1037
1038
1039#[cfg(test)]
1040mod test {
1041 use super::*;
1042 use core::mem;
1043
1044 #[cfg(target_arch = "x86_64")]
1045 fn seg_desc_from_bytes(bytes: [u8; 16]) -> SegmentDescriptor {
1046 unsafe { mem::transmute(bytes) }
1047 }
1048
1049 #[cfg(target_arch = "x86_64")]
1050 fn bytes_from_seg_desc(desc: SegmentDescriptor) -> [u8; 16] {
1051 unsafe { mem::transmute(desc) }
1052 }
1053
1054 #[test]
1055 #[cfg(target_arch = "x86_64")]
1056 #[rustfmt::skip]
1057 fn test_descriptor_limit() {
1058 assert_eq!(0x0000_0000, seg_desc_from_bytes([
1059 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00,
1062 0x00, 0x00, 0x00, 0x00,
1063 ]).limit());
1064 assert_eq!(0x0000_0000, seg_desc_from_bytes([
1065 0x00, 0x00, 0xff, 0xff,
1066 0xff, 0xff, 0xf0, 0xff,
1067 0xff, 0xff, 0xff, 0xff,
1068 0xff, 0xff, 0xff, 0xff,
1069 ]).limit());
1070 assert_eq!(0x000f_ffff, seg_desc_from_bytes([
1071 0xff, 0xff, 0x00, 0x00,
1072 0x00, 0x00, 0x0f, 0x00,
1073 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00,
1075 ]).limit());
1076 assert_eq!(0x000a_bcde, seg_desc_from_bytes([
1077 0xde, 0xbc, 0x74, 0x48,
1078 0x52, 0x93, 0x8a, 0x51,
1079 0x72, 0x89, 0x73, 0x21,
1080 0x28, 0x05, 0x86, 0x85,
1081 ]).limit());
1082
1083 let mut desc = seg_desc_from_bytes([
1084 0x00, 0x00, 0x00, 0x00,
1085 0x00, 0x00, 0x00, 0x00,
1086 0x00, 0x00, 0x00, 0x00,
1087 0x00, 0x00, 0x00, 0x00,
1088 ]);
1089 desc.set_limit(0x000f_ffff);
1090 assert_eq!(bytes_from_seg_desc(desc), [
1091 0xff, 0xff, 0x00, 0x00,
1092 0x00, 0x00, 0x0f, 0x00,
1093 0x00, 0x00, 0x00, 0x00,
1094 0x00, 0x00, 0x00, 0x00,
1095 ]);
1096
1097 let mut desc = seg_desc_from_bytes([
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 ]);
1103 desc.set_limit(0x000a_bcde);
1104 assert_eq!(bytes_from_seg_desc(desc), [
1105 0xde, 0xbc, 0x00, 0x00,
1106 0x00, 0x00, 0x0a, 0x00,
1107 0x00, 0x00, 0x00, 0x00,
1108 0x00, 0x00, 0x00, 0x00,
1109 ]);
1110
1111 let mut desc = seg_desc_from_bytes([
1112 0xff, 0xff, 0xff, 0xff,
1113 0xff, 0xff, 0xff, 0xff,
1114 0xff, 0xff, 0xff, 0xff,
1115 0xff, 0xff, 0xff, 0xff,
1116 ]);
1117 desc.set_limit(0x0000_0000);
1118 assert_eq!(bytes_from_seg_desc(desc), [
1119 0x00, 0x00, 0xff, 0xff,
1120 0xff, 0xff, 0xf0, 0xff,
1121 0xff, 0xff, 0xff, 0xff,
1122 0xff, 0xff, 0xff, 0xff,
1123 ]);
1124
1125 let mut desc = seg_desc_from_bytes([
1126 0xff, 0xff, 0xff, 0xff,
1127 0xff, 0xff, 0xff, 0xff,
1128 0xff, 0xff, 0xff, 0xff,
1129 0xff, 0xff, 0xff, 0xff,
1130 ]);
1131 desc.set_limit(0x000a_bcde);
1132 assert_eq!(bytes_from_seg_desc(desc), [
1133 0xde, 0xbc, 0xff, 0xff,
1134 0xff, 0xff, 0xfa, 0xff,
1135 0xff, 0xff, 0xff, 0xff,
1136 0xff, 0xff, 0xff, 0xff,
1137 ]);
1138 }
1139
1140 #[test]
1141 #[should_panic(expected = "Segment limit too large: 0x100000")]
1142 #[cfg(target_arch = "x86_64")]
1143 #[rustfmt::skip]
1144 fn test_descriptor_limit_out_of_range() {
1145 let mut desc = seg_desc_from_bytes([
1146 0x00, 0x00, 0x00, 0x00,
1147 0x00, 0x00, 0x00, 0x00,
1148 0x00, 0x00, 0x00, 0x00,
1149 0x00, 0x00, 0x00, 0x00,
1150 ]);
1151 desc.set_limit(0x0010_0000);
1152 }
1153
1154 #[test]
1155 #[cfg(target_arch = "x86_64")]
1156 #[rustfmt::skip]
1157 fn test_descriptor_address() {
1158 assert_eq!(0x0000_0000_0000_0000_usize, seg_desc_from_bytes([
1159 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00,
1161 0x00, 0x00, 0x00, 0x00,
1162 0x00, 0x00, 0x00, 0x00,
1163 ]).address());
1164 assert_eq!(0x0000_0000_0000_0000_usize, seg_desc_from_bytes([
1165 0xff, 0xff, 0x00, 0x00,
1166 0x00, 0xff, 0xff, 0x00,
1167 0x00, 0x00, 0x00, 0x00,
1168 0xff, 0xff, 0xff, 0xff,
1169 ]).address());
1170 assert_eq!(0xffff_ffff_ffff_ffff_usize, seg_desc_from_bytes([
1171 0x00, 0x00, 0xff, 0xff,
1172 0xff, 0x00, 0x00, 0xff,
1173 0xff, 0xff, 0xff, 0xff,
1174 0x00, 0x00, 0x00, 0x00,
1175 ]).address());
1176 assert_eq!(0xaabb_ccdd_1122_3344_usize, seg_desc_from_bytes([
1177 0x8f, 0x97, 0x44, 0x33,
1178 0x22, 0x68, 0x5e, 0x11,
1179 0xdd, 0xcc, 0xbb, 0xaa,
1180 0xf8, 0x76, 0x89, 0xe5,
1181 ]).address());
1182
1183 let mut desc = seg_desc_from_bytes([
1184 0x00, 0x00, 0x00, 0x00,
1185 0x00, 0x00, 0x00, 0x00,
1186 0x00, 0x00, 0x00, 0x00,
1187 0x00, 0x00, 0x00, 0x00,
1188 ]);
1189 desc.set_address(0xffff_ffff_ffff_ffff_usize);
1190 assert_eq!(bytes_from_seg_desc(desc), [
1191 0x00, 0x00, 0xff, 0xff,
1192 0xff, 0x00, 0x00, 0xff,
1193 0xff, 0xff, 0xff, 0xff,
1194 0x00, 0x00, 0x00, 0x00,
1195 ]);
1196
1197 let mut desc = seg_desc_from_bytes([
1198 0x00, 0x00, 0x00, 0x00,
1199 0x00, 0x00, 0x00, 0x00,
1200 0x00, 0x00, 0x00, 0x00,
1201 0x00, 0x00, 0x00, 0x00,
1202 ]);
1203 desc.set_address(0xaabb_ccdd_1122_3344_usize);
1204 assert_eq!(bytes_from_seg_desc(desc), [
1205 0x00, 0x00, 0x44, 0x33,
1206 0x22, 0x00, 0x00, 0x11,
1207 0xdd, 0xcc, 0xbb, 0xaa,
1208 0x00, 0x00, 0x00, 0x00,
1209 ]);
1210
1211 let mut desc = seg_desc_from_bytes([
1212 0xff, 0xff, 0xff, 0xff,
1213 0xff, 0xff, 0xff, 0xff,
1214 0xff, 0xff, 0xff, 0xff,
1215 0xff, 0xff, 0xff, 0xff,
1216 ]);
1217 desc.set_address(0x0000_0000_0000_0000_usize);
1218 assert_eq!(bytes_from_seg_desc(desc), [
1219 0xff, 0xff, 0x00, 0x00,
1220 0x00, 0xff, 0xff, 0x00,
1221 0x00, 0x00, 0x00, 0x00,
1222 0xff, 0xff, 0xff, 0xff,
1223 ]);
1224
1225 let mut desc = seg_desc_from_bytes([
1226 0xff, 0xff, 0xff, 0xff,
1227 0xff, 0xff, 0xff, 0xff,
1228 0xff, 0xff, 0xff, 0xff,
1229 0xff, 0xff, 0xff, 0xff,
1230 ]);
1231 desc.set_address(0xaabb_ccdd_1122_3344_usize);
1232 assert_eq!(bytes_from_seg_desc(desc), [
1233 0xff, 0xff, 0x44, 0x33,
1234 0x22, 0xff, 0xff, 0x11,
1235 0xdd, 0xcc, 0xbb, 0xaa,
1236 0xff, 0xff, 0xff, 0xff,
1237 ]);
1238 }
1239
1240 #[cfg(target_arch = "x86_64")]
1241 fn gate_desc_from_bytes(bytes: [u8; 16]) -> GateDescriptor {
1242 unsafe { mem::transmute(bytes) }
1243 }
1244
1245 #[cfg(target_arch = "x86_64")]
1246 fn bytes_from_gate_desc(desc: GateDescriptor) -> [u8; 16] {
1247 unsafe { mem::transmute(desc) }
1248 }
1249
1250 #[test]
1251 #[cfg(target_arch = "x86_64")]
1252 #[rustfmt::skip]
1253 fn test_descriptor_offset() {
1254 assert_eq!(0x0000_0000_0000_0000_usize, gate_desc_from_bytes([
1255 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00,
1257 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00,
1259 ]).entry_point_offset());
1260 assert_eq!(0x0000_0000_0000_0000_usize, gate_desc_from_bytes([
1261 0x00, 0x00, 0xff, 0xff,
1262 0xff, 0xff, 0x00, 0x00,
1263 0x00, 0x00, 0x00, 0x00,
1264 0xff, 0xff, 0xff, 0xff,
1265 ]).entry_point_offset());
1266 assert_eq!(0xffff_ffff_ffff_ffff_usize, gate_desc_from_bytes([
1267 0xff, 0xff, 0x00, 0x00,
1268 0x00, 0x00, 0xff, 0xff,
1269 0xff, 0xff, 0xff, 0xff,
1270 0x00, 0x00, 0x00, 0x00,
1271 ]).entry_point_offset());
1272 assert_eq!(0xaabb_ccdd_1122_3344_usize, gate_desc_from_bytes([
1273 0x44, 0x33, 0x8f, 0x97,
1274 0x68, 0x5e, 0x22, 0x11,
1275 0xdd, 0xcc, 0xbb, 0xaa,
1276 0xf8, 0x76, 0x89, 0xe5,
1277 ]).entry_point_offset());
1278
1279 let mut desc = gate_desc_from_bytes([
1280 0x00, 0x00, 0x00, 0x00,
1281 0x00, 0x00, 0x00, 0x00,
1282 0x00, 0x00, 0x00, 0x00,
1283 0x00, 0x00, 0x00, 0x00,
1284 ]);
1285 desc.set_entry_point_offset(0xffff_ffff_ffff_ffff_usize);
1286 assert_eq!(bytes_from_gate_desc(desc), [
1287 0xff, 0xff, 0x00, 0x00,
1288 0x00, 0x00, 0xff, 0xff,
1289 0xff, 0xff, 0xff, 0xff,
1290 0x00, 0x00, 0x00, 0x00,
1291 ]);
1292
1293 let mut desc = gate_desc_from_bytes([
1294 0x00, 0x00, 0x00, 0x00,
1295 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, 0x00, 0x00,
1297 0x00, 0x00, 0x00, 0x00,
1298 ]);
1299 desc.set_entry_point_offset(0xaabb_ccdd_1122_3344_usize);
1300 assert_eq!(bytes_from_gate_desc(desc), [
1301 0x44, 0x33, 0x00, 0x00,
1302 0x00, 0x00, 0x22, 0x11,
1303 0xdd, 0xcc, 0xbb, 0xaa,
1304 0x00, 0x00, 0x00, 0x00,
1305 ]);
1306
1307 let mut desc = gate_desc_from_bytes([
1308 0xff, 0xff, 0xff, 0xff,
1309 0xff, 0xff, 0xff, 0xff,
1310 0xff, 0xff, 0xff, 0xff,
1311 0xff, 0xff, 0xff, 0xff,
1312 ]);
1313 desc.set_entry_point_offset(0x0000_0000_0000_0000_usize);
1314 assert_eq!(bytes_from_gate_desc(desc), [
1315 0x00, 0x00, 0xff, 0xff,
1316 0xff, 0xff, 0x00, 0x00,
1317 0x00, 0x00, 0x00, 0x00,
1318 0xff, 0xff, 0xff, 0xff,
1319 ]);
1320
1321 let mut desc = gate_desc_from_bytes([
1322 0xff, 0xff, 0xff, 0xff,
1323 0xff, 0xff, 0xff, 0xff,
1324 0xff, 0xff, 0xff, 0xff,
1325 0xff, 0xff, 0xff, 0xff,
1326 ]);
1327 desc.set_entry_point_offset(0xaabb_ccdd_1122_3344_usize);
1328 assert_eq!(bytes_from_gate_desc(desc), [
1329 0x44, 0x33, 0xff, 0xff,
1330 0xff, 0xff, 0x22, 0x11,
1331 0xdd, 0xcc, 0xbb, 0xaa,
1332 0xff, 0xff, 0xff, 0xff,
1333 ]);
1334 }
1335
1336 #[test]
1337 #[cfg(target_arch = "x86_64")]
1338 #[rustfmt::skip]
1339 fn test_descriptor_selector() {
1340 assert_eq!(Selector(0x0000), gate_desc_from_bytes([
1341 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, 0x00, 0x00,
1345 ]).selector());
1346 assert_eq!(Selector(0x0000), gate_desc_from_bytes([
1347 0xff, 0xff, 0x00, 0x00,
1348 0xff, 0xff, 0xff, 0xff,
1349 0xff, 0xff, 0xff, 0xff,
1350 0xff, 0xff, 0xff, 0xff,
1351 ]).selector());
1352 assert_eq!(Selector(0xffff), gate_desc_from_bytes([
1353 0x00, 0x00, 0xff, 0xff,
1354 0x00, 0x00, 0x00, 0x00,
1355 0x00, 0x00, 0x00, 0x00,
1356 0x00, 0x00, 0x00, 0x00,
1357 ]).selector());
1358 assert_eq!(Selector(0xabcd), gate_desc_from_bytes([
1359 0x94, 0x82, 0xcd, 0xab,
1360 0x52, 0x93, 0x83, 0x51,
1361 0x72, 0x89, 0x73, 0x21,
1362 0x28, 0x05, 0x86, 0x85,
1363 ]).selector());
1364
1365 let mut desc = gate_desc_from_bytes([
1366 0x00, 0x00, 0x00, 0x00,
1367 0x00, 0x00, 0x00, 0x00,
1368 0x00, 0x00, 0x00, 0x00,
1369 0x00, 0x00, 0x00, 0x00,
1370 ]);
1371 desc.set_selector(0xffff.into());
1372 assert_eq!(bytes_from_gate_desc(desc), [
1373 0x00, 0x00, 0xff, 0xff,
1374 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00,
1377 ]);
1378
1379 let mut desc = gate_desc_from_bytes([
1380 0x00, 0x00, 0x00, 0x00,
1381 0x00, 0x00, 0x00, 0x00,
1382 0x00, 0x00, 0x00, 0x00,
1383 0x00, 0x00, 0x00, 0x00,
1384 ]);
1385 desc.set_selector(0xabcd.into());
1386 assert_eq!(bytes_from_gate_desc(desc), [
1387 0x00, 0x00, 0xcd, 0xab,
1388 0x00, 0x00, 0x00, 0x00,
1389 0x00, 0x00, 0x00, 0x00,
1390 0x00, 0x00, 0x00, 0x00,
1391 ]);
1392
1393 let mut desc = gate_desc_from_bytes([
1394 0xff, 0xff, 0xff, 0xff,
1395 0xff, 0xff, 0xff, 0xff,
1396 0xff, 0xff, 0xff, 0xff,
1397 0xff, 0xff, 0xff, 0xff,
1398 ]);
1399 desc.set_selector(0x0000.into());
1400 assert_eq!(bytes_from_gate_desc(desc), [
1401 0xff, 0xff, 0x00, 0x00,
1402 0xff, 0xff, 0xff, 0xff,
1403 0xff, 0xff, 0xff, 0xff,
1404 0xff, 0xff, 0xff, 0xff,
1405 ]);
1406
1407 let mut desc = gate_desc_from_bytes([
1408 0xff, 0xff, 0xff, 0xff,
1409 0xff, 0xff, 0xff, 0xff,
1410 0xff, 0xff, 0xff, 0xff,
1411 0xff, 0xff, 0xff, 0xff,
1412 ]);
1413 desc.set_selector(0xabcd.into());
1414 assert_eq!(bytes_from_gate_desc(desc), [
1415 0xff, 0xff, 0xcd, 0xab,
1416 0xff, 0xff, 0xff, 0xff,
1417 0xff, 0xff, 0xff, 0xff,
1418 0xff, 0xff, 0xff, 0xff,
1419 ]);
1420 }
1421
1422 #[test]
1423 #[allow(clippy::bool_assert_comparison)]
1424 fn test_is_port_checked() {
1425 let map = IOPermissionBitmap([]);
1426 assert_eq!(map.is_port_checked(0), true);
1427 assert_eq!(map.is_port_checked(1), true);
1428 assert_eq!(map.is_port_checked(u16::MAX), true);
1429
1430 let map = IOPermissionBitmap([0xff]);
1431 assert_eq!(map.is_port_checked(0), true);
1432 assert_eq!(map.is_port_checked(1), true);
1433 assert_eq!(map.is_port_checked(8), true);
1434 assert_eq!(map.is_port_checked(u16::MAX), true);
1435
1436 let map = IOPermissionBitmap([0x00, 0xff]);
1437 assert_eq!(map.is_port_checked(0), false);
1438 assert_eq!(map.is_port_checked(1), false);
1439 assert_eq!(map.is_port_checked(7), false);
1440 assert_eq!(map.is_port_checked(8), true);
1441 assert_eq!(map.is_port_checked(u16::MAX), true);
1442
1443 let map = IOPermissionBitmap([0xaa, 0xff]);
1444 assert_eq!(map.is_port_checked(0), false);
1445 assert_eq!(map.is_port_checked(1), true);
1446 assert_eq!(map.is_port_checked(2), false);
1447 assert_eq!(map.is_port_checked(3), true);
1448 assert_eq!(map.is_port_checked(4), false);
1449 assert_eq!(map.is_port_checked(5), true);
1450 assert_eq!(map.is_port_checked(6), false);
1451 assert_eq!(map.is_port_checked(7), true);
1452 assert_eq!(map.is_port_checked(8), true);
1453 assert_eq!(map.is_port_checked(u16::MAX), true);
1454
1455 let map = IOPermissionBitmap([0x55, 0xff]);
1456 assert_eq!(map.is_port_checked(0), true);
1457 assert_eq!(map.is_port_checked(1), false);
1458 assert_eq!(map.is_port_checked(2), true);
1459 assert_eq!(map.is_port_checked(3), false);
1460 assert_eq!(map.is_port_checked(4), true);
1461 assert_eq!(map.is_port_checked(5), false);
1462 assert_eq!(map.is_port_checked(6), true);
1463 assert_eq!(map.is_port_checked(7), false);
1464 assert_eq!(map.is_port_checked(8), true);
1465 assert_eq!(map.is_port_checked(u16::MAX), true);
1466
1467 let map = IOPermissionBitmap([0xff, 0x00, 0xff]);
1468 assert_eq!(map.is_port_checked(0), true);
1469 assert_eq!(map.is_port_checked(7), true);
1470 assert_eq!(map.is_port_checked(8), false);
1471 assert_eq!(map.is_port_checked(15), false);
1472 assert_eq!(map.is_port_checked(16), true);
1473 assert_eq!(map.is_port_checked(u16::MAX), true);
1474
1475 let map = IOPermissionBitmap([0x00]);
1478 assert_eq!(map.is_port_checked(0), true);
1479 assert_eq!(map.is_port_checked(1), true);
1480 assert_eq!(map.is_port_checked(7), true);
1481 assert_eq!(map.is_port_checked(8), true);
1482 assert_eq!(map.is_port_checked(u16::MAX), true);
1483
1484 let map = IOPermissionBitmap([0x00, 0x00]);
1485 assert_eq!(map.is_port_checked(0), false);
1486 assert_eq!(map.is_port_checked(1), false);
1487 assert_eq!(map.is_port_checked(7), false);
1488 assert_eq!(map.is_port_checked(8), true);
1489 assert_eq!(map.is_port_checked(15), true);
1490 assert_eq!(map.is_port_checked(16), true);
1491 assert_eq!(map.is_port_checked(u16::MAX), true);
1492 }
1493
1494 #[test]
1495 fn test_set_port_checked() {
1496 let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1497 map.set_port_checked(0, true);
1498 assert_eq!(map.0, [0x01, 0x00, 0xff]);
1499
1500 let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1501 map.set_port_checked(15, true);
1502 assert_eq!(map.0, [0x00, 0x80, 0xff]);
1503
1504 let mut map = IOPermissionBitmap([0xff, 0xff, 0xff]);
1505 map.set_port_checked(0, false);
1506 assert_eq!(map.0, [0xfe, 0xff, 0xff]);
1507
1508 let mut map = IOPermissionBitmap([0xff, 0xff, 0xff]);
1509 map.set_port_checked(15, false);
1510 assert_eq!(map.0, [0xff, 0x7f, 0xff]);
1511 }
1512
1513 #[test]
1514 #[should_panic(
1515 expected = "Port 10 is beyond the maximum value Some(f) supported by this map"
1516 )]
1517 fn test_set_port_checked_out_of_range_3byte() {
1518 let mut map = IOPermissionBitmap([0x00, 0x00, 0xff]);
1519 map.set_port_checked(16, true);
1520 }
1521
1522 #[test]
1523 #[should_panic(
1524 expected = "Port 8 is beyond the maximum value Some(7) supported by this map"
1525 )]
1526 fn test_set_port_checked_out_of_range_2byte() {
1527 let mut map = IOPermissionBitmap([0x00, 0xff]);
1528 map.set_port_checked(8, true);
1529 }
1530
1531 #[test]
1532 #[should_panic(
1533 expected = "Port 0 is beyond the maximum value None supported by this map"
1534 )]
1535 fn test_set_port_checked_out_of_range_1byte() {
1536 let mut map = IOPermissionBitmap([0xff]);
1537 map.set_port_checked(0, true);
1538 }
1539
1540 #[test]
1541 #[should_panic(
1542 expected = "Port 0 is beyond the maximum value None supported by this map"
1543 )]
1544 fn test_set_port_checked_out_of_range_empty() {
1545 let mut map = IOPermissionBitmap([]);
1546 map.set_port_checked(0, true);
1547 }
1548}