LF OS
Hobby operating system for amd64 with high ambitions
Loading...
Searching...
No Matches
vm.c
Go to the documentation of this file.
1#include <vm.h>
2#include <mm.h>
3#include <string.h>
4#include <log.h>
5#include <slab.h>
6#include <panic.h>
7#include <tpa.h>
8#include <msr.h>
12void load_cr3(ptr_t cr3);
13
17
20 uint8_t size: 2; // 0 = 4KiB, 1 = 2MiB, 2 = 1GiB, 3 = panic
22};
23
26 unsigned int present : 1;
27 unsigned int writeable : 1;
28 unsigned int userspace : 1;
29 unsigned int pat0 : 1;
30 unsigned int pat1 : 1;
31 unsigned int accessed : 1;
32 unsigned int dirty : 1;
33 unsigned int huge : 1;
34 unsigned int global : 1;
35 unsigned int available : 3;
36 unsigned long next_base : 40;
37 unsigned int available2 : 11;
38 unsigned int nx : 1;
39}__attribute__((packed));
40
42struct vm_table {
44}__attribute__((packed));
45
46#define BASE_TO_PHYS(x) ((char*)(x << 12))
47#define BASE_TO_DIRECT_MAPPED(x) ((vm_direct_mapping_initialized ? ALLOCATOR_REGION_DIRECT_MAPPING.start : 0) + BASE_TO_PHYS(x))
48#define BASE_TO_TABLE(x) ((struct vm_table*)BASE_TO_DIRECT_MAPPED(x))
49
50#define PML4_INDEX(x) (((x) >> 39) & 0x1FF)
51#define PDP_INDEX(x) (((x) >> 30) & 0x1FF)
52#define PD_INDEX(x) (((x) >> 21) & 0x1FF)
53#define PT_INDEX(x) (((x) >> 12) & 0x1FF)
54
56 // we have to implement a lot code from below in a simpler way since we cannot assume a lot of things assumed below:
57 // - we do not have the direct mapping of all physical memory yet, so we have to carefully work with virtual and physical addresses
58 // - we do not have malloc() yet
59 // - we do not have any exception handlers in place. Creating a page fault _now_ will reboot the system
60
61 // to make things simple, we just don't call any other function here that relies on the virtual memory management
62 // things allowed here:
63 // - fbconsole (debugging output)
64 // - mm (physical memory management)
65 // - data structure definitions and macros
66
67 ptr_t physicalEndAddress = mm_highest_address();
68
69 if(ALLOCATOR_REGION_DIRECT_MAPPING.start + physicalEndAddress > ALLOCATOR_REGION_DIRECT_MAPPING.end) {
70 logw("vm", "More physical memory than direct mapping region. Only %B will be usable", ALLOCATOR_REGION_DIRECT_MAPPING.end - ALLOCATOR_REGION_DIRECT_MAPPING.start);
72 }
73
74 size_t numPages;
75 size_t pageSize;
76
77 if(physicalEndAddress > 1 * GiB) {
78 numPages = (physicalEndAddress + (GiB - 1)) / (1 * GiB);
79 pageSize = 1 * GiB;
80 }
81 else {
82 numPages = (physicalEndAddress + (MiB - 1)) / (2 * MiB);
83 pageSize = 2 * MiB;
84 }
85
86 logd("vm", "%B -> %d pages @ %B", physicalEndAddress, numPages, pageSize);
87
88 SlabHeader* scratchpad_allocator = (SlabHeader*)ALLOCATOR_REGION_SCRATCHPAD.start;
89
90 struct vm_table* pdp;
91 int16_t last_pml4_idx = -1;
92
93 struct vm_table* pd; // only used for 1MiB pages
94 int16_t last_pdp_idx = -1;
95
96 for(ptr_t i = 0; i <= physicalEndAddress;) {
98
99 int16_t pml4_idx = PML4_INDEX(vi);
100 int16_t pdp_idx = PDP_INDEX(vi);
101
102 if(pml4_idx != last_pml4_idx) {
103 pdp = (struct vm_table*)slab_alloc(scratchpad_allocator);
104 memset((void*)pdp, 0, 0x1000);
105
106 context->entries[pml4_idx] = (struct vm_table_entry){
107 .present = 1,
108 .writeable = 1,
109 .userspace = 0,
110 .next_base = vm_context_get_physical_for_virtual(context, (ptr_t)pdp) >> 12,
111 };
112
113 last_pml4_idx = pml4_idx;
114 }
115
116 if(pageSize == 1*GiB) {
117 pdp->entries[pdp_idx] = (struct vm_table_entry){
118 .huge = 1,
119 .present = 1,
120 .writeable = 1,
121 .userspace = 0,
122 .next_base = i >> 12,
123 };
124
125 i += pageSize;
126 } else {
127 if(pdp_idx != last_pdp_idx) {
128 pd = (struct vm_table*)slab_alloc(scratchpad_allocator);
129 memset((void*)pd, 0, 0x1000);
130
131 pdp->entries[pdp_idx] = (struct vm_table_entry){
132 .present = 1,
133 .writeable = 1,
134 .userspace = 0,
135 .next_base = vm_context_get_physical_for_virtual(context, (ptr_t)pd) >> 12,
136 };
137
138 last_pdp_idx = pdp_idx;
139 }
140
141 int16_t pd_idx = PD_INDEX(vi);
142 pd->entries[pd_idx] = (struct vm_table_entry){
143 .huge = 1,
144 .present = 1,
145 .writeable = 1,
146 .userspace = 0,
147 .next_base = i >> 12,
148 };
149
150 i += pageSize;
151 }
152 }
153
155 page_descriptors = (tpa_t*)slab_alloc(scratchpad_allocator);
157}
158
160 struct page_descriptor* page = tpa_get(page_descriptors, physical >> 12);
161
162 if(!page) {
163 struct page_descriptor new_page = {
165 .flags = PageUsageKernel,
166 .refcount = 0,
167 };
168
169 tpa_set(page_descriptors, physical >> 12, &new_page);
170 page = tpa_get(page_descriptors, physical >> 12);
171
172 if(page == 0) {
173 panic_message("page error");
174 }
175 }
176
177 return page;
178}
179
181 struct page_descriptor* page = vm_page_descriptor(physical);
182
183 page->flags = flags;
184 page->size = size;
185
186 if(size > 3) {
187 panic_message("Invalid page size!");
188 }
189}
190
191void vm_ref_inc(ptr_t physical) {
192 struct page_descriptor* page = vm_page_descriptor(physical);
193 ++page->refcount;
194}
195
196void vm_ref_dec(ptr_t physical) {
197 struct page_descriptor* page = vm_page_descriptor(physical);
198 --page->refcount;
199
200 if(!page->refcount) {
201 tpa_set(page_descriptors, physical >> 12, 0);
202 }
203}
204
207
208 void* ret;
209 switch(size) {
210 case PageSize4KiB:
211 ret = mm_alloc_pages(1);
212 break;
213 case PageSize2MiB:
214 ret = mm_alloc_pages(2*MiB / 4*KiB);
215 break;
216 case PageSize1GiB:
217 ret = mm_alloc_pages(1*GiB / 4*KiB);
218 break;
219 }
220
221 // XXX this can recursively call vm_page_alloc
222 // \todo describe kernel pages
223 // vm_set_page_descriptor((ptr_t)ret, flags, size);
224
225 return ret;
226}
227
228void init_vm(void) {
230 logd("vm", "direct mapping set up");
231
232 // initializing page descriptors
233 page_descriptors = tpa_new(&kernel_alloc, sizeof(struct page_descriptor), 4080, page_descriptors); // vm_alloc needs 16 bytes
234 logd("vm", "page descriptor structure initialized");
235
237 memcpy(new_kernel_context, VM_KERNEL_CONTEXT, 4*KiB);
238
239 VM_KERNEL_CONTEXT = new_kernel_context;
240 logd("vm", "bootstrapped kernel context");
241
242 // allocate all PDPs for the kernel
243 // since we copy the kernel context for new processes, every process inherits those PDPs and
244 // we don't have to sync every kernel mapping to every process manually
245 for(int i = 256; i < 512; ++i) {
248 memset((char*)page + ALLOCATOR_REGION_DIRECT_MAPPING.start, 0, 4*KiB);
249 VM_KERNEL_CONTEXT->entries[i].next_base = (ptr_t)page >> 12;
253 }
254 }
255
256 // set up PAT table, especially setting PAT 7 to write combine and PAT 6 to uncachable
257 uint64_t pat = read_msr(0x0277);
258 pat &= ~(0xFFULL << 56);
259 pat |= (0x01ULL << 56);
260 pat &= ~(0xFFULL << 48);
261 write_msr(0x0277, pat);
262
263 logd("vm", "reserved kernel PML4 entries");
264 logw("vm", "Skipping lots of code because not completed");
265 return;
266
267 uint16_t pml4_idx = 0;
268 uint16_t pdp_idx = 0;
269 uint16_t pd_idx = 0;
270 uint16_t pt_idx = 0;
271
272 while(pml4_idx < PML4_INDEX(ALLOCATOR_REGION_DIRECT_MAPPING.start)) {
273 if(pt_idx == 512) {
274 pt_idx = 0;
275 ++pd_idx;
276 }
277
278 if(pd_idx == 512) {
279 pd_idx = pt_idx = 0;
280 ++pdp_idx;
281 }
282
283 if(pdp_idx == 512) {
284 pdp_idx = pd_idx = pt_idx = 0;
285 ++pml4_idx;
286 continue;
287 }
288
289 if(!VM_KERNEL_CONTEXT->entries[pml4_idx].present) {
290 ++pml4_idx;
291 pdp_idx = pd_idx = pt_idx = 0;
292 continue;
293 }
294
295 struct vm_table* pdp = BASE_TO_TABLE(VM_KERNEL_CONTEXT->entries[pml4_idx].next_base);
296
297 if(!pdp->entries[pdp_idx].present || pdp->entries[pdp_idx].huge) {
298 if(pdp->entries[pdp_idx].huge) {
300 vm_ref_inc(pdp->entries[pdp_idx].next_base << 12);
301 }
302
303 ++pdp_idx;
304 pd_idx = pt_idx = 0;
305 continue;
306 }
307
308 struct vm_table* pd = BASE_TO_TABLE(pdp->entries[pdp_idx].next_base);
309
310 if(!pd->entries[pd_idx].present || pd->entries[pd_idx].huge) {
311 if(pd->entries[pd_idx].huge) {
313 vm_ref_inc(pd->entries[pd_idx].next_base << 12);
314 }
315
316 ++pd_idx;
317 pt_idx = 0;
318 continue;
319 }
320
321 struct vm_table* pt = BASE_TO_TABLE(pd->entries[pd_idx].next_base);
322
323 if(pt->entries[pt_idx].present) {
325 vm_ref_inc(pt->entries[pt_idx].next_base << 12);
326 }
327
328 ++pt_idx;
329 }
330
331 logd("vm", "set up descriptors for early pages");
332}
333
334void cleanup_boot_vm(void) {
335 size_t ret = 0;
336
337 // XXX: check higher half mappings if this page is mapped elsewhere
338 // mark physical page as free if not
339 // -> search for ret += foo lines
340
341 for(uint16_t pml4_idx = 0; pml4_idx < 256; ++pml4_idx) {
342 if(VM_KERNEL_CONTEXT->entries[pml4_idx].present) {
343 struct vm_table* pdp = BASE_TO_TABLE(VM_KERNEL_CONTEXT->entries[pml4_idx].next_base);
344
345 for(uint16_t pdp_idx = 0; pdp_idx < 512; ++pdp_idx) {
346 if(pdp->entries[pdp_idx].huge) {
347 ret += 1*GiB;
348 }
349 else if(pdp->entries[pdp_idx].present) {
350 struct vm_table* pd = BASE_TO_TABLE(pdp->entries[pdp_idx].next_base);
351
352 for(uint16_t pd_idx = 0; pd_idx < 512; ++pd_idx) {
353 if(pd->entries[pd_idx].huge) {
354 ret += 2*MiB;
355 }
356 else if(pd->entries[pd_idx].present) {
357 struct vm_table* pt = BASE_TO_TABLE(pd->entries[pd_idx].next_base);
358
359 for(uint16_t pt_idx = 0; pt_idx < 512; ++pt_idx) {
360 if(pt->entries[pt_idx].present) {
361 pt->entries[pt_idx].present = 0;
362 ret += 4*KiB;
363 }
364 }
365 }
366
367 pd->entries[pd_idx].present = 0;
368 }
369 }
370
371 pdp->entries[pdp_idx].present = 0;
372 }
373 }
374
375 VM_KERNEL_CONTEXT->entries[pml4_idx].present = 0;
376 }
377
378 logi("vm", "Cleaned %B", ret);
379}
380
383 memcpy((void*)context, VM_KERNEL_CONTEXT, 4096);
384
385 return context;
386}
387
391
392static void vm_ensure_table(struct vm_table* table, uint16_t index) {
393 struct vm_table_entry* entry = &table->entries[index];
394
395 if(!entry->present) {
397 memset((void*)nt, 0, 4096);
398
399 entry->next_base = (nt - ALLOCATOR_REGION_DIRECT_MAPPING.start) >> 12;
400 entry->present = 1;
401 entry->writeable = 1;
402 entry->userspace = 1;
403 }
404}
405
406void vm_context_map(struct vm_table* pml4, ptr_t virtual, ptr_t physical, uint8_t pat) {
407 vm_ensure_table(pml4, PML4_INDEX(virtual));
408
409 struct vm_table* pdp = BASE_TO_TABLE(pml4->entries[PML4_INDEX(virtual)].next_base);
410 vm_ensure_table(pdp, PDP_INDEX(virtual));
411
412 struct vm_table* pd = BASE_TO_TABLE(pdp->entries[PDP_INDEX(virtual)].next_base);
413 vm_ensure_table(pd, PD_INDEX(virtual));
414
415 struct vm_table* pt = BASE_TO_TABLE(pd->entries[PD_INDEX(virtual)].next_base);
416
417 pt->entries[PT_INDEX(virtual)].next_base = physical >> 12;
418 pt->entries[PT_INDEX(virtual)].present = 1;
419 pt->entries[PT_INDEX(virtual)].writeable = 1;
420 pt->entries[PT_INDEX(virtual)].userspace = 1;
421
422 pt->entries[PT_INDEX(virtual)].pat0 = !!(pat & 1);
423 pt->entries[PT_INDEX(virtual)].pat1 = !!(pat & 2);
424 pt->entries[PT_INDEX(virtual)].huge = !!(pat & 4); // huge bit is pat2 bit in PT
425}
426
427void vm_context_unmap(struct vm_table* context, ptr_t virtual) {
428 struct vm_table_entry* pml4_entry = &context->entries[PML4_INDEX(virtual)];
429
430 if(!pml4_entry->present) {
431 return;
432 }
433
434 struct vm_table* pdp= BASE_TO_TABLE(pml4_entry->next_base);
435 struct vm_table_entry* pdp_entry = &pdp->entries[PDP_INDEX(virtual)];
436
437 if(!pdp_entry->present) {
438 return;
439 }
440
441 struct vm_table* pd = BASE_TO_TABLE(pdp_entry->next_base);
442 struct vm_table_entry* pd_entry = &pd->entries[PD_INDEX(virtual)];
443
444 if(!pd_entry->present) {
445 return;
446 }
447
448 struct vm_table* pt = BASE_TO_TABLE(pd_entry->next_base);
449 struct vm_table_entry* pt_entry = &pt->entries[PT_INDEX(virtual)];
450
451 if(!pt_entry->present) {
452 return;
453 }
454
455 pt_entry->next_base = 0;
456 pt_entry->present = 0;
457 pt_entry->writeable = 0;
458 pt_entry->userspace = 0;
459}
460
462 return vm_table_get_free_index3(table, 0, 512);
463}
464
465int vm_table_get_free_index3(struct vm_table *table, int start, int end) {
466 for(int i = start; i < end; i++) {
467 if(!table->entries[i].present) {
468 return i;
469 }
470 }
471
472 return -1;
473}
474
476 if(virtual >= ALLOCATOR_REGION_DIRECT_MAPPING.start && virtual <= ALLOCATOR_REGION_DIRECT_MAPPING.end) {
477 return virtual - ALLOCATOR_REGION_DIRECT_MAPPING.start;
478 }
479
480 if(!context->entries[PML4_INDEX(virtual)].present)
481 return 0;
482
483 struct vm_table* pdp = BASE_TO_TABLE(context->entries[PML4_INDEX(virtual)].next_base);
484
485 if(!pdp->entries[PDP_INDEX(virtual)].present)
486 return 0;
487 else if(pdp->entries[PDP_INDEX(virtual)].huge)
488 return (pdp->entries[PDP_INDEX(virtual)].next_base << 30) | (virtual & 0x3FFFFFFF);
489
490 struct vm_table* pd = BASE_TO_TABLE(pdp->entries[PDP_INDEX(virtual)].next_base);
491
492 if(!pd->entries[PD_INDEX(virtual)].present)
493 return 0;
494 else if(pd->entries[PD_INDEX(virtual)].huge)
495 return (pd->entries[PD_INDEX(virtual)].next_base << 21) | (virtual & 0x1FFFFF);
496
497 struct vm_table* pt = BASE_TO_TABLE(pd->entries[PD_INDEX(virtual)].next_base);
498
499 if(!pt->entries[PT_INDEX(virtual)].present)
500 return 0;
501 else
502 return (pt->entries[PT_INDEX(virtual)].next_base << 12) | (virtual & 0xFFF);
503}
504
505bool vm_context_page_present(struct vm_table* context, ptr_t virtual) {
506 if(!context->entries[PML4_INDEX(virtual)].present)
507 return false;
508
509 struct vm_table* pdp = BASE_TO_TABLE(context->entries[PML4_INDEX(virtual)].next_base);
510
511 if(!pdp->entries[PDP_INDEX(virtual)].present)
512 return false;
513 else if(pdp->entries[PDP_INDEX(virtual)].huge)
514 return true;
515
516 struct vm_table* pd = BASE_TO_TABLE(pdp->entries[PDP_INDEX(virtual)].next_base);
517
518 if(!pd->entries[PD_INDEX(virtual)].present)
519 return false;
520 else if(pd->entries[PD_INDEX(virtual)].huge)
521 return true;
522
523 struct vm_table* pt = BASE_TO_TABLE(pd->entries[PD_INDEX(virtual)].next_base);
524
525 if(!pt->entries[PT_INDEX(virtual)].present)
526 return false;
527 else
528 return true;
529}
530
531ptr_t vm_context_find_free(struct vm_table* context, region_t region, size_t num) {
532 ptr_t current = region.start;
533
534 while(current <= region.end) {
535 if(vm_context_page_present(context, current)) {
536 current += 4096;
537 continue;
538 }
539
540 bool abortThisCandidate = false;
541 for(size_t i = 0; i < num; ++i) {
542 if(vm_context_page_present(context, current + (i * 4096))) {
543 current += 4096;
544 abortThisCandidate = true;
545 break;
546 }
547 }
548
549 if(abortThisCandidate) {
550 continue;
551 }
552
553 // if we came to this point, enough space is available starting at $current
554 return current;
555 }
556
557 return 0;
558}
559
560ptr_t vm_context_alloc_pages(struct vm_table* context, region_t region, size_t num) {
561 ptr_t vdest = vm_context_find_free(context, region, num);
562
563 if(!vdest) return 0;
564
565 // we do not need continuous physical memory, so allocate each page on it's own
566 for(size_t i = 0; i < num; ++i) {
567 ptr_t physical = (ptr_t)mm_alloc_pages(1);
568 vm_context_map(context, vdest + (i * 4096), physical, 0);
569 }
570
571 return vdest;
572}
573
574void vm_copy_page(struct vm_table* dst_ctx, ptr_t dst, struct vm_table* src_ctx, ptr_t src) {
575 // XXX: make some copy-on-write here
576 // XXX: incompatible with non-4k pages!
577
578 if(vm_context_page_present(src_ctx, src)) {
579 ptr_t src_phys = vm_context_get_physical_for_virtual(src_ctx, src);
580 ptr_t dst_phys;
581
582 if(!vm_context_page_present(dst_ctx, dst)) {
583 dst_phys = (ptr_t)mm_alloc_pages(1);
584 vm_context_map(dst_ctx, dst, dst_phys, 0); // TODO: get PAT from source page
585 } else {
586 dst_phys = vm_context_get_physical_for_virtual(dst_ctx, dst);
587 }
588
589 if(src_phys != dst_phys) {
590 memcpy((void*)(dst_phys + ALLOCATOR_REGION_DIRECT_MAPPING.start), (void*)(src_phys + ALLOCATOR_REGION_DIRECT_MAPPING.start), 0x1000);
591 } else {
592 panic_message("vm_copy_page with same src and dest physical mapping!");
593 }
594 }
595}
596
597void vm_copy_range(struct vm_table* dst, struct vm_table* src, ptr_t addr, size_t size) {
598 uint16_t pml4_l = 0, pdp_l = 0, pd_l = 0;
599 struct vm_table *src_pdp = 0, *src_pd = 0, *src_pt = 0;
600 struct vm_table *dst_pdp = 0, *dst_pd = 0, *dst_pt = 0;
601
602 for(ptr_t i = addr; i < addr + size; ) {
603 uint16_t pml4_i = PML4_INDEX(i);
604 uint16_t pdp_i = PDP_INDEX(i);
605 uint16_t pd_i = PD_INDEX(i);
606 uint16_t pt_i = PT_INDEX(i);
607
608 if(!src_pdp || pml4_i != pml4_l) {
609 vm_ensure_table(dst, pml4_i);
610 src_pdp = BASE_TO_TABLE(src->entries[pml4_i].next_base);
611 dst_pdp = BASE_TO_TABLE(dst->entries[pml4_i].next_base);
612 pml4_l = pml4_i;
613 }
614
615 if(!src_pdp->entries[pdp_i].present) {
616 i += 1*GiB;
617 continue;
618 }
619
620 // 1 GiB pages
621 if(src_pdp->entries[pdp_i].huge) {
622 if(dst_pdp->entries[pdp_i].present) {
623 // we would have to free downwards
624 panic_message("vm_copy_range/pdp: huge src and dst present is not yet implemented!");
625 }
626
627 if((i % (1*GiB)) != 0) {
628 panic_message("vm_copy_range/pdp: unaligned huge page address!");
629 }
630
631 dst_pdp->entries[pdp_i] = src_pdp->entries[pdp_i];
632 dst_pdp->entries[pdp_i].next_base = (uint64_t)(mm_alloc_pages(1*GiB/4*KiB)) >> 12;
634
635 i += 1*GiB;
636 continue;
637 }
638
639 if(!src_pd || pdp_i != pdp_l) {
640 vm_ensure_table(dst_pdp, pdp_i);
641 src_pd = BASE_TO_TABLE(src_pdp->entries[pdp_i].next_base);
642 dst_pd = BASE_TO_TABLE(dst_pdp->entries[pdp_i].next_base);
643 pdp_l = pdp_i;
644 }
645
646 if(!src_pd->entries[pd_i].present) {
647 i += 2*MiB;
648 continue;
649 }
650
651 // 2 MiB pages
652 if(src_pd->entries[pd_i].huge) {
653 if(dst_pd->entries[pd_i].present) {
654 // we would have to free downwards
655 panic_message("vm_copy_range/pd: huge src and dst present is not yet implemented!");
656 }
657
658 if((i % (2*MiB)) != 0) {
659 panic_message("vm_copy_range/pd: unaligned huge page address!");
660 }
661
662 dst_pd->entries[pd_i] = src_pd->entries[pd_i];
663 dst_pd->entries[pd_i].next_base = (uint64_t)(mm_alloc_pages(2*MiB/4*KiB)) >> 12;
664 memcpy(BASE_TO_DIRECT_MAPPED(dst_pd->entries[pd_i].next_base), BASE_TO_DIRECT_MAPPED(src_pd->entries[pd_i].next_base), 2*MiB);
665
666 i += 2*MiB;
667 continue;
668 }
669
670 if(!src_pt || pd_i != pd_l) {
671 vm_ensure_table(dst_pd, pd_i);
672 src_pt = BASE_TO_TABLE(src_pd->entries[pd_i].next_base);
673 dst_pt = BASE_TO_TABLE(dst_pd->entries[pd_i].next_base);
674 pd_l = pd_i;
675 }
676
677 if(src_pt->entries[pt_i].present) {
678 // 4 KiB pages
679 if(dst_pt->entries[pt_i].present) {
680 // TODO: ref counter for physical pages, mark as free
681 logw("vm", "vm_copy_range: unmapping page without marking it as free: %x", dst_pt->entries[pt_i].next_base << 12);
682 }
683
684 if((i % (4*KiB)) != 0) {
685 panic_message("vm_copy_range/pt: unaligned page address!");
686 }
687
688 dst_pt->entries[pt_i] = src_pt->entries[pt_i];
689 dst_pt->entries[pt_i].next_base = (uint64_t)mm_alloc_pages(1) >> 12;
690 memcpy(BASE_TO_DIRECT_MAPPED(dst_pt->entries[pt_i].next_base), BASE_TO_DIRECT_MAPPED(src_pt->entries[pt_i].next_base), 4*KiB);
691 }
692
693 i += 4*KiB;
694 }
695}
696
697void* vm_alloc(size_t size) {
698 size_t pages = (size + 4095 /* for rounding */ + 16 /* overhead */) / 4096;
699
701 *(uint64_t*)ptr = size;
702 *(uint64_t*)((char*)ptr + size + 8) = ~size;
703
704 return (char*)ptr + 8;
705}
706
707void vm_free(void* ptr) {
709 uint64_t size = *(uint64_t*)((char*)ptr - 8);
710 uint64_t validation = *(uint64_t*)((char*)ptr + size);
711
712 if(size != ~validation) {
713 panic_message("VM corruption detected!");
714 }
715
716 uint64_t pages = (size + 4095) / 4096;
717
718 for(uint64_t i = 0; i < pages; ++i) {
719 ptr_t vir = ((ptr_t)ptr - 8) + (i * 0x1000);
723 }
724}
725
726static void* kernel_alloc_fn(allocator_t* alloc, size_t size) {
727 alloc->tag += size;
728 return vm_alloc(size);
729}
730
731static void kernel_dealloc_fn(allocator_t* alloc, void* ptr) {
732 size_t size = *((size_t*)ptr-1);
733 alloc->tag -= size;
734 vm_free(ptr);
735}
736
739 .dealloc = kernel_dealloc_fn,
740 .tag = 0,
741};
742
744 struct vm_table* current;
745 asm("mov %%cr3, %0":"=r"(current));
746
748 return (void*)((char*)current + ALLOCATOR_REGION_DIRECT_MAPPING.start);
749 }
750 else {
751 return current;
752 }
753}
754
755ptr_t vm_map_hardware(ptr_t hw, size_t len) {
756 struct vm_table* context = vm_current_context();
757
758 size_t pages = (len + 4095) / 4096;
760
761 for(size_t page = 0; page < pages; ++page) {
762 vm_context_map(context, dest + (page * 4096), hw + (page * 4096), 0x07);
763 }
764
765 return dest;
766}
struct allocator allocator_t
signed short int16_t
Definition arch.h:7
unsigned short uint16_t
Definition arch.h:8
uint64_t ptr_t
Definition arch.h:17
unsigned int uint32_t
Definition arch.h:11
unsigned long uint64_t
Definition arch.h:14
unsigned char uint8_t
Definition arch.h:5
static const uint8_t PageSize2MiB
Definition vm.h:41
#define ALLOCATOR_REGION_DIRECT_MAPPING
Definition vm.h:29
#define ALLOCATOR_REGION_KERNEL_HEAP
Definition vm.h:25
#define ALLOCATOR_REGION_SCRATCHPAD
Definition vm.h:22
static const uint32_t PageUsagePagingStructure
Page is locked and cannot be unmapped.
Definition vm.h:37
static const uint32_t PageUsageKernel
Definition vm.h:32
ptr_t start
Definition vm.h:9
static const uint8_t PageSize4KiB
Page is used as paging structure.
Definition vm.h:40
#define ALLOCATOR_REGION_USER_HARDWARE
Definition vm.h:19
ptr_t end
Definition vm.h:10
static const uint8_t PageSize1GiB
Definition vm.h:42
Definition vm.h:7
void * memcpy(void *dest, void const *source, size_t size)
Definition string.c:80
void * memset(void *dest, int c, size_t size)
Definition string.c:72
ptr_t addr
Definition elf.h:3
allocator_t * alloc
uint64_t flags
Flags for the memory region. See MEMORY_REGION_ defines.
Definition loader.h:7
uint16_t size
Size of the loaded file.
Definition loader.h:5
#define logd(component, fmt,...)
Definition log.h:28
#define logw(component, fmt,...)
Definition log.h:44
#define logi(component, fmt,...)
Definition log.h:36
void * mm_alloc_pages(uint64_t count)
Definition mm.c:24
void mm_mark_physical_pages(ptr_t start, uint64_t count, mm_page_status_t status)
Definition mm.c:93
ptr_t mm_highest_address(void)
Definition mm.c:166
#define MiB
Definition mm.h:7
#define KiB
Definition mm.h:6
@ MM_FREE
Definition mm.h:13
#define GiB
Definition mm.h:8
uint64_t read_msr(uint32_t msr)
Definition msr.c:10
void write_msr(uint32_t msr, uint64_t value)
Definition msr.c:3
void panic_message(const char *message)
Definition panic.c:64
ptr_t slab_alloc(SlabHeader *slab)
Definition slab.c:22
Header of a Slab region.
Definition slab.h:10
uint64_t tag
Definition allocator.h:12
void *(* alloc)(struct allocator *alloc, size_t size)
Definition allocator.h:9
static void * entry
Definition syscalls.h:34
static uint16_t num
Definition syscalls.h:126
tpa_t * tpa_new(allocator_t *alloc, uint64_t entry_size, uint64_t page_size, tpa_t *tpa)
Definition tpa.c:48
void * tpa_get(tpa_t *tpa, uint64_t idx)
Definition tpa.c:144
void tpa_set(tpa_t *tpa, uint64_t idx, void *data)
Definition tpa.c:174
Header of a TPA.
Definition tpa.c:34
#define UNUSED_PARAM(v)
Definition unused_param.h:5
unsigned int accessed
Definition vm.c:31
void vm_ref_inc(ptr_t physical)
Definition vm.c:191
unsigned int pat1
Definition vm.c:30
static void * kernel_alloc_fn(allocator_t *alloc, size_t size)
Definition vm.c:726
void vm_context_unmap(struct vm_table *context, ptr_t virtual)
Definition vm.c:427
void cleanup_boot_vm(void)
Definition vm.c:334
ptr_t vm_context_alloc_pages(struct vm_table *context, region_t region, size_t num)
Definition vm.c:560
struct vm_table * vm_current_context(void)
Definition vm.c:743
static void vm_ensure_table(struct vm_table *table, uint16_t index)
Definition vm.c:392
unsigned int available2
Definition vm.c:37
unsigned int available
Definition vm.c:35
void vm_context_map(struct vm_table *pml4, ptr_t virtual, ptr_t physical, uint8_t pat)
Definition vm.c:406
void load_cr3(ptr_t cr3)
ptr_t vm_context_find_free(struct vm_table *context, region_t region, size_t num)
Definition vm.c:531
#define PML4_INDEX(x)
Definition vm.c:50
void vm_copy_range(struct vm_table *dst, struct vm_table *src, ptr_t addr, size_t size)
Definition vm.c:597
unsigned int global
Definition vm.c:34
ptr_t vm_map_hardware(ptr_t hw, size_t len)
Map a given memory area in the currently running userspace process at a random location.
Definition vm.c:755
bool vm_context_page_present(struct vm_table *context, ptr_t virtual)
Definition vm.c:505
unsigned int huge
Definition vm.c:33
ptr_t vm_context_get_physical_for_virtual(struct vm_table *context, ptr_t virtual)
Definition vm.c:475
unsigned int nx
Definition vm.c:38
#define PD_INDEX(x)
Definition vm.c:52
unsigned long next_base
Definition vm.c:36
allocator_t kernel_alloc
Definition vm.c:737
#define PT_INDEX(x)
Definition vm.c:53
void vm_setup_direct_mapping_init(struct vm_table *context)
Definition vm.c:55
void vm_set_page_descriptor(ptr_t physical, uint32_t flags, uint8_t size)
Definition vm.c:180
unsigned int present
Definition vm.c:26
unsigned int huge
Definition vm.c:7
unsigned int pat0
Definition vm.c:29
void vm_ref_dec(ptr_t physical)
Definition vm.c:196
void vm_context_activate(struct vm_table *context)
Definition vm.c:388
unsigned long next_base
Definition vm.c:10
void vm_copy_page(struct vm_table *dst_ctx, ptr_t dst, struct vm_table *src_ctx, ptr_t src)
Definition vm.c:574
struct vm_table * vm_context_new(void)
Definition vm.c:381
#define BASE_TO_DIRECT_MAPPED(x)
Definition vm.c:47
#define PDP_INDEX(x)
Definition vm.c:51
unsigned int userspace
Definition vm.c:28
void init_vm(void)
Definition vm.c:228
int vm_table_get_free_index1(struct vm_table *table)
Definition vm.c:461
struct page_descriptor * vm_page_descriptor(ptr_t physical)
Definition vm.c:159
int vm_table_get_free_index3(struct vm_table *table, int start, int end)
Definition vm.c:465
static bool vm_direct_mapping_initialized
Definition vm.c:14
unsigned int present
Definition vm.c:0
void * vm_alloc(size_t size)
Like malloc but allocates full pages only. 16 byte data overhead.
Definition vm.c:697
struct vm_table * VM_KERNEL_CONTEXT
Definition vm.c:16
static tpa_t * page_descriptors
Definition vm.c:15
#define BASE_TO_TABLE(x)
Definition vm.c:48
static void kernel_dealloc_fn(allocator_t *alloc, void *ptr)
Definition vm.c:731
void vm_free(void *ptr)
the matching free() like function for vm_alloc
Definition vm.c:707
struct vm_table_entry entries[512]
Definition vm.c:43
unsigned int writeable
Definition vm.c:27
void * vm_page_alloc(uint32_t flags, uint8_t size)
Definition vm.c:205
uint32_t flags
Definition vm.c:19
uint32_t refcount
Definition vm.c:21
unsigned int dirty
Definition vm.c:32
uint8_t size
Definition vm.c:20
A paging table, when this is a PML4 it may also be called context.
Definition vm.c:42
A single entry in a paging table.
Definition vm.c:25