46#define BASE_TO_PHYS(x) ((char*)(x << 12))
47#define BASE_TO_DIRECT_MAPPED(x) ((vm_direct_mapping_initialized ? ALLOCATOR_REGION_DIRECT_MAPPING.start : 0) + BASE_TO_PHYS(x))
48#define BASE_TO_TABLE(x) ((struct vm_table*)BASE_TO_DIRECT_MAPPED(x))
50#define PML4_INDEX(x) (((x) >> 39) & 0x1FF)
51#define PDP_INDEX(x) (((x) >> 30) & 0x1FF)
52#define PD_INDEX(x) (((x) >> 21) & 0x1FF)
53#define PT_INDEX(x) (((x) >> 12) & 0x1FF)
77 if(physicalEndAddress > 1 *
GiB) {
78 numPages = (physicalEndAddress + (
GiB - 1)) / (1 *
GiB);
82 numPages = (physicalEndAddress + (
MiB - 1)) / (2 *
MiB);
86 logd(
"vm",
"%B -> %d pages @ %B", physicalEndAddress, numPages, pageSize);
96 for(
ptr_t i = 0; i <= physicalEndAddress;) {
102 if(pml4_idx != last_pml4_idx) {
104 memset((
void*)pdp, 0, 0x1000);
113 last_pml4_idx = pml4_idx;
116 if(pageSize == 1*
GiB) {
122 .next_base = i >> 12,
127 if(pdp_idx != last_pdp_idx) {
129 memset((
void*)pd, 0, 0x1000);
138 last_pdp_idx = pdp_idx;
147 .next_base = i >> 12,
230 logd(
"vm",
"direct mapping set up");
234 logd(
"vm",
"page descriptor structure initialized");
240 logd(
"vm",
"bootstrapped kernel context");
245 for(
int i = 256; i < 512; ++i) {
258 pat &= ~(0xFFULL << 56);
259 pat |= (0x01ULL << 56);
260 pat &= ~(0xFFULL << 48);
263 logd(
"vm",
"reserved kernel PML4 entries");
264 logw(
"vm",
"Skipping lots of code because not completed");
284 pdp_idx = pd_idx = pt_idx = 0;
291 pdp_idx = pd_idx = pt_idx = 0;
331 logd(
"vm",
"set up descriptors for early pages");
341 for(
uint16_t pml4_idx = 0; pml4_idx < 256; ++pml4_idx) {
345 for(
uint16_t pdp_idx = 0; pdp_idx < 512; ++pdp_idx) {
352 for(
uint16_t pd_idx = 0; pd_idx < 512; ++pd_idx) {
359 for(
uint16_t pt_idx = 0; pt_idx < 512; ++pt_idx) {
378 logi(
"vm",
"Cleaned %B", ret);
395 if(!
entry->present) {
397 memset((
void*)nt, 0, 4096);
401 entry->writeable = 1;
402 entry->userspace = 1;
466 for(
int i = start; i < end; i++) {
534 while(current <= region.
end) {
540 bool abortThisCandidate =
false;
541 for(
size_t i = 0; i <
num; ++i) {
544 abortThisCandidate =
true;
549 if(abortThisCandidate) {
566 for(
size_t i = 0; i <
num; ++i) {
589 if(src_phys != dst_phys) {
592 panic_message(
"vm_copy_page with same src and dest physical mapping!");
598 uint16_t pml4_l = 0, pdp_l = 0, pd_l = 0;
599 struct vm_table *src_pdp = 0, *src_pd = 0, *src_pt = 0;
600 struct vm_table *dst_pdp = 0, *dst_pd = 0, *dst_pt = 0;
608 if(!src_pdp || pml4_i != pml4_l) {
624 panic_message(
"vm_copy_range/pdp: huge src and dst present is not yet implemented!");
627 if((i % (1*
GiB)) != 0) {
628 panic_message(
"vm_copy_range/pdp: unaligned huge page address!");
639 if(!src_pd || pdp_i != pdp_l) {
646 if(!src_pd->entries[pd_i].present) {
652 if(src_pd->entries[pd_i].huge) {
653 if(dst_pd->entries[pd_i].present) {
655 panic_message(
"vm_copy_range/pd: huge src and dst present is not yet implemented!");
658 if((i % (2*
MiB)) != 0) {
659 panic_message(
"vm_copy_range/pd: unaligned huge page address!");
662 dst_pd->entries[pd_i] = src_pd->entries[pd_i];
670 if(!src_pt || pd_i != pd_l) {
677 if(src_pt->entries[pt_i].present) {
679 if(dst_pt->entries[pt_i].present) {
681 logw(
"vm",
"vm_copy_range: unmapping page without marking it as free: %x", dst_pt->entries[pt_i].next_base << 12);
684 if((i % (4*
KiB)) != 0) {
688 dst_pt->entries[pt_i] = src_pt->entries[pt_i];
698 size_t pages = (
size + 4095 + 16 ) / 4096;
704 return (
char*)ptr + 8;
712 if(
size != ~validation) {
718 for(
uint64_t i = 0; i < pages; ++i) {
732 size_t size = *((
size_t*)ptr-1);
745 asm(
"mov %%cr3, %0":
"=r"(current));
758 size_t pages = (len + 4095) / 4096;
761 for(
size_t page = 0; page < pages; ++page) {
762 vm_context_map(context, dest + (page * 4096), hw + (page * 4096), 0x07);
struct allocator allocator_t
static const uint8_t PageSize2MiB
#define ALLOCATOR_REGION_DIRECT_MAPPING
#define ALLOCATOR_REGION_KERNEL_HEAP
#define ALLOCATOR_REGION_SCRATCHPAD
static const uint32_t PageUsagePagingStructure
Page is locked and cannot be unmapped.
static const uint32_t PageUsageKernel
static const uint8_t PageSize4KiB
Page is used as paging structure.
#define ALLOCATOR_REGION_USER_HARDWARE
static const uint8_t PageSize1GiB
void * memcpy(void *dest, void const *source, size_t size)
void * memset(void *dest, int c, size_t size)
uint64_t flags
Flags for the memory region. See MEMORY_REGION_ defines.
uint16_t size
Size of the loaded file.
#define logd(component, fmt,...)
#define logw(component, fmt,...)
#define logi(component, fmt,...)
void * mm_alloc_pages(uint64_t count)
void mm_mark_physical_pages(ptr_t start, uint64_t count, mm_page_status_t status)
ptr_t mm_highest_address(void)
uint64_t read_msr(uint32_t msr)
void write_msr(uint32_t msr, uint64_t value)
void panic_message(const char *message)
ptr_t slab_alloc(SlabHeader *slab)
void *(* alloc)(struct allocator *alloc, size_t size)
tpa_t * tpa_new(allocator_t *alloc, uint64_t entry_size, uint64_t page_size, tpa_t *tpa)
void * tpa_get(tpa_t *tpa, uint64_t idx)
void tpa_set(tpa_t *tpa, uint64_t idx, void *data)
void vm_ref_inc(ptr_t physical)
static void * kernel_alloc_fn(allocator_t *alloc, size_t size)
void vm_context_unmap(struct vm_table *context, ptr_t virtual)
void cleanup_boot_vm(void)
ptr_t vm_context_alloc_pages(struct vm_table *context, region_t region, size_t num)
struct vm_table * vm_current_context(void)
static void vm_ensure_table(struct vm_table *table, uint16_t index)
void vm_context_map(struct vm_table *pml4, ptr_t virtual, ptr_t physical, uint8_t pat)
ptr_t vm_context_find_free(struct vm_table *context, region_t region, size_t num)
void vm_copy_range(struct vm_table *dst, struct vm_table *src, ptr_t addr, size_t size)
ptr_t vm_map_hardware(ptr_t hw, size_t len)
Map a given memory area in the currently running userspace process at a random location.
bool vm_context_page_present(struct vm_table *context, ptr_t virtual)
ptr_t vm_context_get_physical_for_virtual(struct vm_table *context, ptr_t virtual)
void vm_setup_direct_mapping_init(struct vm_table *context)
void vm_set_page_descriptor(ptr_t physical, uint32_t flags, uint8_t size)
void vm_ref_dec(ptr_t physical)
void vm_context_activate(struct vm_table *context)
void vm_copy_page(struct vm_table *dst_ctx, ptr_t dst, struct vm_table *src_ctx, ptr_t src)
struct vm_table * vm_context_new(void)
#define BASE_TO_DIRECT_MAPPED(x)
int vm_table_get_free_index1(struct vm_table *table)
struct page_descriptor * vm_page_descriptor(ptr_t physical)
int vm_table_get_free_index3(struct vm_table *table, int start, int end)
static bool vm_direct_mapping_initialized
void * vm_alloc(size_t size)
Like malloc but allocates full pages only. 16 byte data overhead.
struct vm_table * VM_KERNEL_CONTEXT
static tpa_t * page_descriptors
static void kernel_dealloc_fn(allocator_t *alloc, void *ptr)
void vm_free(void *ptr)
the matching free() like function for vm_alloc
struct vm_table_entry entries[512]
void * vm_page_alloc(uint32_t flags, uint8_t size)
A paging table, when this is a PML4 it may also be called context.
A single entry in a paging table.