X-Git-Url: http://git.mutantstargoat.com/user/nuclear/?a=blobdiff_plain;f=src%2Fboot%2Fboot2.s;h=15b1cb1bd6e64e55fb78c71c3400cdc48066f739;hb=79be435fcf61613617025a2c4ef5fddf00a09178;hp=59918b8358341f30d99edc122c1514e789651329;hpb=d1e8a437c1fab4535f82c4c214ec3330ac32e48d;p=bootcensus diff --git a/src/boot/boot2.s b/src/boot/boot2.s index 59918b8..15b1cb1 100644 --- a/src/boot/boot2.s +++ b/src/boot/boot2.s @@ -34,6 +34,9 @@ # enable A20 address line call enable_a20 + # detect available memory + call detect_memory + # load the whole program into memory starting at 1MB call load_main @@ -553,5 +556,170 @@ kbc_wait_write: numbuf: .space 16 + + # sets the carry flag on failure +detect_memory: + mov $buffer, %edi + xor %ebx, %ebx + mov $0x534d4150, %edx + +memdet_looptop: + mov $0xe820, %eax + mov $24, %ecx + int $0x15 + jc memdet_fail + cmp $0x534d4150, %eax + jnz memdet_fail + + mov buffer, %eax + mov $boot_mem_map, %esi + mov boot_mem_map_size, %ebp + # again, that's [ebp * 8 + esi] + mov %eax, (%esi,%ebp,8) + + # only care for type 1 (usable ram), otherwise ignore + cmpl $1, 16(%edi) + jnz memdet_skip + + # skip areas with 0 size (also clamp size to 4gb) + # test high 32bits + cmpl $0, 12(%edi) + jz memdet_highzero + # high part is non-zero, make low part ffffffff + xor %eax, %eax + not %eax + jmp 0f + +memdet_highzero: + # if both high and low parts are zero, ignore + mov 8(%edi), %eax + cmpl $0, %eax + jz memdet_skip + +0: mov %eax, 4(%esi,%ebp,8) + incl boot_mem_map_size + +memdet_skip: + # terminate the loop if ebx was reset to 0 + cmp $0, %ebx + jz memdet_done + jmp memdet_looptop + +memdet_done: + ret + +memdet_fail: + # if size > 0, then it's not a failure, just the end + cmpl $0, boot_mem_map_size + jnz memdet_done + + # just panic... + mov $memdet_fail_msg, %esi + call putstr +0: hlt + jmp 0b + +memdet_fail_msg: .asciz "Failed to detect available memory!\n" + + .global boot_mem_map_size +boot_mem_map_size: .long 0 + .global boot_mem_map +boot_mem_map: .space 128 + + +# this is not boot loader code. It's called later on by the main kernel +# code in 32bit protected mode. It's placed here because it needs to be +# located in base memory as it returns and runs in real mode. + .code32 + .align 4 + # place to save the protected mode IDTR pseudo-descriptor + # with sidt, so that it can be restored before returning + .short 0 +saved_idtr: +idtlim: .short 0 +idtaddr:.long 0 + # real mode IDTR pseudo-descriptor pointing to the IVT at addr 0 + .short 0 +rmidt: .short 0x3ff + .long 0 + +saved_esp: .long 0 +saved_ebp: .long 0 + + # drop back to unreal mode to call 16bit interrupt + .global int86 +int86: + push %ebp + mov %esp, %ebp + pushal + cli + # save protected mode IDTR and replace it with the real mode vectors + sidt (saved_idtr) + lidt (rmidt) + + # modify the int instruction do this here before the + # cs-load jumps, to let them flush the instruction cache + mov $int_op, %ebx + movb 8(%ebp), %al + movb %al, 1(%ebx) + + # long jump to load code selector for 16bit code (6) + ljmp $0x30,$0f +0: + .code16 + # disable protection + mov %cr0, %eax + and $0xfffe, %ax + mov %eax, %cr0 + # load cs <- 0 + ljmp $0,$0f +0: # zero data segments + xor %ax, %ax + mov %ax, %ds + mov %ax, %es + mov %ax, %ss + nop + + # load registers from the int86regs struct + mov %esp, saved_esp + mov %ebp, saved_ebp + mov 12(%ebp), %esp + popal + mov saved_esp, %esp + + # call 16bit interrupt +int_op: int $0 + + mov saved_ebp, %ebp + mov 12(%ebp), %esp + add $34, %esp + pushfw + pushal + mov saved_esp, %esp + + # re-enable protection + mov %cr0, %eax + or $1, %ax + mov %eax, %cr0 + # long jump to load code selector for 32bit code (1) + ljmp $0x8,$0f +0: + .code32 + # set data selector (2) to all segment regs + mov $0x10, %ax + mov %ax, %ds + mov %ax, %es + mov %ax, %ss + nop + + # restore 32bit interrupt descriptor table + lidt (saved_idtr) + sti + popal + pop %ebp + ret + + + # buffer used by the track loader ... to load tracks. .align 16 buffer: