--- /dev/null
+#ifndef MALLOC_H_
+#define MALLOC_H_
+
+#include "malloc.h"
+#include "mem.h"
+#include "asmutil.h"
+#include "dbg.h"
+
+#define MAGIC_FREE 0xbaadf00d
+#define MAGIC_ALLOC 0xdeadbeef
+
+struct memrange {
+ unsigned long magic, len;
+ struct memrange __far *next;
+ struct memrange __far *prev;
+};
+#define DESCSZ sizeof(struct memrange)
+#define DESC(p) ((struct memrange __far*)(p) - 1)
+#define RNGEND(rng) ((struct memrange __far*)((char __far*)(rng) + (rng)->len))
+
+static struct memrange __far *pool;
+static unsigned int freemem, num_alloc;
+
+int kmalloc_init(int numblk)
+{
+ int first;
+
+ if((first = alloc_blocks(numblk)) == -1) {
+ return -1;
+ }
+ pool->magic = MAGIC_FREE;
+ pool = MK_FP(first >> 6, 0);
+ pool->len = (unsigned long)numblk * MEM_BLKSZ;
+ pool->next = pool->prev = 0;
+
+ freemem = pool->len;
+ num_alloc = 0;
+ return 0;
+}
+
+void __far *kmalloc(unsigned long sz)
+{
+ __far struct memrange *pptr, *newfree;
+ unsigned long fullsz;
+
+ /* force paragraph-aligned sizes, including memrange desc */
+ fullsz = (sz + DESCSZ + 15) & 0xfffffff0ul;
+
+ pptr = pool;
+ while(pptr) {
+ if(fullsz <= pptr->len) {
+ newfree = pptr + fullsz;
+ newfree->magic = pptr->magic;
+ newfree->len = pptr->len - fullsz;
+ newfree->next = pptr->next;
+ newfree->prev = pptr->prev;
+ if(newfree->prev) {
+ newfree->prev->next = newfree;
+ } else {
+ pool = newfree;
+ }
+ pptr->magic = MAGIC_ALLOC;
+ pptr->len = fullsz;
+ pptr->prev = pptr->next = 0;
+
+ freemem -= fullsz;
+ num_alloc++;
+ return pptr + 1;
+ }
+
+ pptr = pptr->next;
+ }
+ return 0;
+}
+
+static void assert_magic(struct memrange __far *p, unsigned long expect)
+{
+ if(p->magic != expect) {
+ void __far *ptr = p + 1;
+ panic("kfree: bad magic %lx @ %x:%x (len: %lx)", p->magic, FP_SEG(ptr),
+ FP_OFFS(ptr), p->len);
+ }
+}
+
+void kfree(void __far *p)
+{
+ __far struct memrange *mem, *memend, *cur, *curend, *next, *last;
+
+ mem = DESC(p);
+ memend = RNGEND(mem);
+
+ if(mem->magic == MAGIC_FREE) {
+ panic("double kfree %x:%x (len: %lx)", FP_SEG(p), FP_OFFS(p), mem->len);
+ } else {
+ assert_magic(mem, MAGIC_ALLOC);
+ }
+ mem->magic = MAGIC_FREE;
+
+ freemem -= mem->len;
+ num_alloc--;
+
+ if(!pool) {
+ pool = mem;
+ mem->next = mem->prev = 0;
+ return;
+ }
+
+ if(pool > memend) {
+ /* memory before pool, not adjacent, just prepend */
+ assert_magic(pool, MAGIC_FREE);
+ mem->next = pool;
+ pool->prev = mem;
+ pool = mem;
+ return;
+ }
+
+ if(pool == memend) {
+ /* memory adjacent before pool */
+ assert_magic(pool, MAGIC_FREE);
+ mem->len += pool->len;
+ mem->next = pool->next;
+ mem->prev = 0;
+ pool->magic = 0;
+ pool = mem;
+ return;
+ }
+
+ cur = pool;
+ last = 0;
+ while(cur && mem > (curend = RNGEND(cur))) {
+ assert_magic(cur, MAGIC_FREE);
+ last = cur;
+ cur = cur->next;
+ }
+
+ if(!cur) {
+ /* memory after the end, non-adjacent */
+ if(!last) {
+ panic("unexpected lack of memory ranges in pool");
+ }
+ last->next = mem;
+ mem->prev = last;
+ return;
+ }
+
+ if(curend == mem) {
+ /* memory adjacent at the end of previous range */
+ mem->next = cur->next;
+ mem->prev = cur;
+ cur->next = mem;
+ cur->len += mem->len;
+ mem->magic = 0;
+ mem = cur; /* fall through to check adjacency at the other end */
+ }
+ if(memend == (next = cur->next)) {
+ /* memory adjacent at the start of next range */
+ next->magic = 0;
+ mem->len += next->len;
+ mem->next = next->next;
+ if(next->next) {
+ next->next->prev = mem;
+ }
+ }
+}
+
+#endif /* MALLOC_H_ */
--- /dev/null
+#include <inttypes.h>
+#include "mem.h"
+#include "malloc.h"
+#include "dbg.h"
+
+#define MEMBLOCKS(x) ((x) >> 10)
+#define SEGBLOCKS(x) ((x) << 6)
+#define BLKBYTES(x) ((unsigned long)(x) << 10)
+
+#define MAX_BLOCKS (1048576 / MEM_BLKSZ)
+#define BLKMAP_SZ (MAX_BLOCKS / 16)
+#define BLKMAP_WIDX(x) ((x) >> 4)
+#define BLKMAP_WBIT(x) ((x) & 0xf)
+
+static uint16_t blkmap[BLKMAP_SZ];
+
+static void mark(int blk, int n, int val);
+#define mark_alloc(blk, n) mark(blk, n, 1)
+#define mark_free(blk, n) mark(blk, n, 0)
+
+
+int mem_init(void)
+{
+ /* TODO detect amount of memory instead of assuming 640k */
+
+ /* first segment belongs to the kernel for code/data/stack */
+ mark_alloc(0, SEGBLOCKS(1));
+ /* mark 640k-1M unavailable */
+ mark_alloc(MEMBLOCKS(0xa0000ul), MEMBLOCKS(0x100000ul - 0xa0000ul));
+
+ /* let the kernel malloc grab 32k */
+ return kmalloc_init(32);
+}
+
+int alloc_blocks(int n)
+{
+ int i, j, blk, idx0, bit0, span = 0;
+
+ if(n <= 0) {
+ panic("BUG: alloc_blocks(%x)", n);
+ }
+
+ for(i=0; i<BLKMAP_SZ; i++) {
+ if(blkmap[i] == 0xffff) {
+ span = 0; /* cancel the current span if any */
+ continue;
+ }
+
+ for(j=0; j<16; j++) {
+ if((blkmap[i] >> j) & 1) {
+ /* found allocated block, cancel span */
+ span = 0;
+ } else {
+ /* found free block, continue span or start it */
+ if(!span) {
+ idx0 = i;
+ bit0 = j;
+ }
+ if(++span >= n) {
+ /* done! mark as allocated and return first block */
+ blk = (idx0 << 4) + bit0;
+ mark_alloc(blk, span);
+ return blk;
+ }
+ }
+ }
+ }
+
+ /* failed to find n consecutive free blocks */
+ return -1;
+}
+
+void free_blocks(int blk, int n)
+{
+ mark_free(blk, n);
+}
+
+static void mark(int blk, int n, int val)
+{
+ int idx, bit, span;
+ uint16_t bits;
+ static const uint16_t spanbits[] = {0, 1, 3, 7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
+ 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff};
+
+ while(n > 0) {
+ idx = BLKMAP_WIDX(blk);
+ bit = BLKMAP_WBIT(blk);
+ span = n;
+ if(span > 16 - bit) {
+ span = 16 - bit;
+ }
+ bits = spanbits[span] << bit;
+
+ if(val) {
+ blkmap[idx] |= bits;
+ } else {
+ blkmap[idx] &= ~bits;
+ }
+ n -= span;
+ blk += span;
+ }
+}