X-Git-Url: http://git.mutantstargoat.com/user/nuclear/?a=blobdiff_plain;f=src%2Fboot%2Fboot2.s;h=15b1cb1bd6e64e55fb78c71c3400cdc48066f739;hb=79be435fcf61613617025a2c4ef5fddf00a09178;hp=5422c0b5d529c2db14f89a3e4aa1e970126f5c93;hpb=34ae2440d7cd133051523bce2aac0760cf8296eb;p=bootcensus diff --git a/src/boot/boot2.s b/src/boot/boot2.s index 5422c0b..15b1cb1 100644 --- a/src/boot/boot2.s +++ b/src/boot/boot2.s @@ -34,11 +34,14 @@ # enable A20 address line call enable_a20 + # detect available memory + call detect_memory + # load the whole program into memory starting at 1MB call load_main - mov $0x13, %ax - int $0x10 + #mov $0x13, %ax + #int $0x10 # load initial GDT lgdt (gdt_lim) @@ -349,14 +352,15 @@ abort_read: jmp 0b - # better print routines, since we're not constrainted by the 512b of # the boot sector. + .global cursor_x + .global cursor_y cursor_x: .long 0 cursor_y: .long 0 putchar: - pusha + pushal call ser_putchar cmp $10, %al @@ -382,7 +386,7 @@ putchar: jnz 1f call video_newline -1: popa +1: popal ret # expects string pointer in esi @@ -398,7 +402,7 @@ putstr: # expects number in eax print_num: # save registers - pusha + pushal mov $numbuf + 16, %esi movb $0, (%esi) @@ -415,7 +419,7 @@ convloop: call putstr # restore regs - popa + popal ret @@ -429,7 +433,7 @@ video_newline: 0: ret scrollup: - pusha + pushal # move 80 * 24 lines from b80a0 -> b8000 mov $0xb8000, %edi mov $0xb80a0, %esi @@ -440,7 +444,7 @@ scrollup: xor %eax, %eax mov $40, %ecx addr32 rep stosl - popa + popal ret clearscr: @@ -552,5 +556,170 @@ kbc_wait_write: numbuf: .space 16 + + # sets the carry flag on failure +detect_memory: + mov $buffer, %edi + xor %ebx, %ebx + mov $0x534d4150, %edx + +memdet_looptop: + mov $0xe820, %eax + mov $24, %ecx + int $0x15 + jc memdet_fail + cmp $0x534d4150, %eax + jnz memdet_fail + + mov buffer, %eax + mov $boot_mem_map, %esi + mov boot_mem_map_size, %ebp + # again, that's [ebp * 8 + esi] + mov %eax, (%esi,%ebp,8) + + # only care for type 1 (usable ram), otherwise ignore + cmpl $1, 16(%edi) + jnz memdet_skip + + # skip areas with 0 size (also clamp size to 4gb) + # test high 32bits + cmpl $0, 12(%edi) + jz memdet_highzero + # high part is non-zero, make low part ffffffff + xor %eax, %eax + not %eax + jmp 0f + +memdet_highzero: + # if both high and low parts are zero, ignore + mov 8(%edi), %eax + cmpl $0, %eax + jz memdet_skip + +0: mov %eax, 4(%esi,%ebp,8) + incl boot_mem_map_size + +memdet_skip: + # terminate the loop if ebx was reset to 0 + cmp $0, %ebx + jz memdet_done + jmp memdet_looptop + +memdet_done: + ret + +memdet_fail: + # if size > 0, then it's not a failure, just the end + cmpl $0, boot_mem_map_size + jnz memdet_done + + # just panic... + mov $memdet_fail_msg, %esi + call putstr +0: hlt + jmp 0b + +memdet_fail_msg: .asciz "Failed to detect available memory!\n" + + .global boot_mem_map_size +boot_mem_map_size: .long 0 + .global boot_mem_map +boot_mem_map: .space 128 + + +# this is not boot loader code. It's called later on by the main kernel +# code in 32bit protected mode. It's placed here because it needs to be +# located in base memory as it returns and runs in real mode. + .code32 + .align 4 + # place to save the protected mode IDTR pseudo-descriptor + # with sidt, so that it can be restored before returning + .short 0 +saved_idtr: +idtlim: .short 0 +idtaddr:.long 0 + # real mode IDTR pseudo-descriptor pointing to the IVT at addr 0 + .short 0 +rmidt: .short 0x3ff + .long 0 + +saved_esp: .long 0 +saved_ebp: .long 0 + + # drop back to unreal mode to call 16bit interrupt + .global int86 +int86: + push %ebp + mov %esp, %ebp + pushal + cli + # save protected mode IDTR and replace it with the real mode vectors + sidt (saved_idtr) + lidt (rmidt) + + # modify the int instruction do this here before the + # cs-load jumps, to let them flush the instruction cache + mov $int_op, %ebx + movb 8(%ebp), %al + movb %al, 1(%ebx) + + # long jump to load code selector for 16bit code (6) + ljmp $0x30,$0f +0: + .code16 + # disable protection + mov %cr0, %eax + and $0xfffe, %ax + mov %eax, %cr0 + # load cs <- 0 + ljmp $0,$0f +0: # zero data segments + xor %ax, %ax + mov %ax, %ds + mov %ax, %es + mov %ax, %ss + nop + + # load registers from the int86regs struct + mov %esp, saved_esp + mov %ebp, saved_ebp + mov 12(%ebp), %esp + popal + mov saved_esp, %esp + + # call 16bit interrupt +int_op: int $0 + + mov saved_ebp, %ebp + mov 12(%ebp), %esp + add $34, %esp + pushfw + pushal + mov saved_esp, %esp + + # re-enable protection + mov %cr0, %eax + or $1, %ax + mov %eax, %cr0 + # long jump to load code selector for 32bit code (1) + ljmp $0x8,$0f +0: + .code32 + # set data selector (2) to all segment regs + mov $0x10, %ax + mov %ax, %ds + mov %ax, %es + mov %ax, %ss + nop + + # restore 32bit interrupt descriptor table + lidt (saved_idtr) + sti + popal + pop %ebp + ret + + + # buffer used by the track loader ... to load tracks. .align 16 buffer: