This repository has been archived by the owner on Nov 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
longmode.c
102 lines (92 loc) · 3.37 KB
/
longmode.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#include "bootloader.h"
/* defined in longmode.asm */
extern int cpuid_unavail();
extern int longmode_avail();
extern int gib_page_avail();
extern void pae_enable();
extern void lme_enable();
extern void paging_enable();
static uint64_t *pml4;
static uint64_t *init_pdpt;
static uint64_t *phys_mem_pdpt;
static bool use_gib_pages = false;
static uint64_t *allocate_paging_struct()
{
return malloc_aligned(4096, 4096);
}
static uint64_t make_paging_entry(uint64_t address, uint64_t flags)
{
return address | flags;
}
static uint64_t *extract_address(uint64_t entry)
{
return (uint64_t *)((uint32_t)(entry & 0xFFFFF000ULL));
}
static uint64_t *extract_address_or_new(uint64_t *entry)
{
if ((*entry) & PAGE_PRESENT) {
return extract_address(*entry);
}
uint64_t *page = allocate_paging_struct();
for (int i = 0; i < 512; i++) page[i] = 0;
*entry = make_paging_entry((uint32_t)page, PAGE_PRESENT | PAGE_WRITABLE);
return page;
}
static uint64_t make_gib_entry(uint64_t address, uint64_t flags)
{
if (use_gib_pages) {
return make_paging_entry(address, flags | PAGE_LARGEPAGE);
}
uint64_t *pagedir = allocate_paging_struct();
for (uint64_t i = 0; i < 512; i++) {
pagedir[i] = make_paging_entry(address + 0x200000 * i, flags | PAGE_LARGEPAGE);
}
return make_paging_entry((uint32_t)pagedir, flags);
}
/* TODO: support controlling attributes of mapping (e.g. read/write) */
void *map_new_page(uint64_t vaddr)
{
if (vaddr & 0xFFF)
boot_error("Internal error: expected page-aligned address");
uint64_t vaddr_top = vaddr >> 47ULL;
if (vaddr_top != 0 && vaddr_top != 0x1FFFF)
boot_error("Internal error: invalid 64-bit virtual address");
uint64_t pml4_index = (vaddr >> 39ULL) & 0x1FF;
uint64_t pdpt_index = (vaddr >> 30ULL) & 0x1FF;
uint64_t pd_index = (vaddr >> 21ULL) & 0x1FF;
uint64_t pt_index = (vaddr >> 12ULL) & 0x1FF;
uint64_t *pdpt = extract_address_or_new(pml4 + pml4_index);
uint64_t *pd = extract_address_or_new(pdpt + pdpt_index);
uint64_t *pt = extract_address_or_new(pd + pd_index);
return extract_address_or_new(pt + pt_index);
}
void longmode_init()
{
if (cpuid_unavail()) {
boot_error("CPUID not available");
}
if (!longmode_avail()) {
boot_error("64-bit mode is not supported");
}
use_gib_pages = gib_page_avail();
pml4 = allocate_paging_struct();
init_pdpt = allocate_paging_struct();
phys_mem_pdpt = allocate_paging_struct();
for (int i = 0; i < 512; i++) pml4[i] = init_pdpt[i] = phys_mem_pdpt[i] = 0;
/* Identity map bottom 1GB with a single 1GB page for use by the
* bootloader only. The kernel needs to remove this mapping and
* manage mapping the physical memory by itself. */
init_pdpt[0] = make_gib_entry(0, PAGE_WRITABLE | PAGE_PRESENT);
pml4[0] = make_paging_entry((uint32_t)init_pdpt, PAGE_WRITABLE | PAGE_PRESENT);
/* Map 512GB of physical memory at the bottom of the higher half.
* This mapping is the one actually used by the kernel. */
for (uint64_t i = 0; i < 512; i++) {
phys_mem_pdpt[i] = make_gib_entry(i << (uint64_t)30,
PAGE_WRITABLE | PAGE_PRESENT);
}
pml4[256] = make_paging_entry((uint32_t)phys_mem_pdpt, PAGE_WRITABLE | PAGE_PRESENT);
pae_enable();
asm volatile("mov %0, %%cr3" :: "r"(pml4) : "memory");
lme_enable();
paging_enable();
}