--- /dev/null
+/*
+pcboot - bootable PC demo/game kernel
+Copyright (C) 2018 John Tsiombikas <nuclear@member.fsf.org>
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY, without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <https://www.gnu.org/licenses/>.
+*/
+#include <stdlib.h>
+#include <errno.h>
+#include "mem.h"
+
+struct mem_desc {
+ size_t size;
+ uint32_t magic;
+ struct mem_desc *next;
+};
+
+#define MAGIC 0xdeadf00d
+
+#define DESC_PTR(b) ((void*)((struct mem_desc*)(b) + 1))
+#define PTR_DESC(p) ((struct mem_desc*)(p) - 1)
+
+#define NUM_POOLS 16
+#define FIRST_POOL_POW2 5
+/* 2**(x+5) size pools: 0->32, 1->64, 2->128 .. 15->1048576 */
+#define POOL_SIZE(x) (1 << ((x) + FIRST_POOL_POW2))
+#define MAX_POOL_SIZE (1 << ((NUM_POOLS - 1) + FIRST_POOL_POW2))
+#define MAX_POOL_PAGES BYTES_TO_PAGES(MAX_POOL_SIZE)
+static struct mem_desc *pools[NUM_POOLS];
+
+static int add_to_pool(struct mem_desc *b);
+
+static int pool_index(int sz)
+{
+ int x = 0;
+ --sz;
+ while(sz) {
+ sz >>= 1;
+ ++x;
+ }
+ return x - FIRST_POOL_POW2;
+}
+
+void *malloc(size_t sz)
+{
+ int pidx, pg0;
+ size_t total_sz, halfsz;
+ struct mem_desc *mem, *other;
+
+ total_sz = sz + sizeof(struct mem_desc);
+
+ if(total_sz > MAX_POOL_SIZE) {
+ /*printf(" allocation too big, hitting sys_alloc directly\n");*/
+ if((pg0 = alloc_ppages(BYTES_TO_PAGES(total_sz))) == -1) {
+ errno = ENOMEM;
+ return 0;
+ }
+ mem = PAGE_TO_PTR(pg0);
+ mem->magic = MAGIC;
+ mem->size = total_sz;
+ mem->next = 0;
+ return DESC_PTR(mem);
+ }
+
+ pidx = pool_index(total_sz);
+ while(pidx < NUM_POOLS) {
+ if(pools[pidx]) {
+ /* found a pool with a free block that fits */
+ mem = pools[pidx];
+ pools[pidx] = mem->next;
+ mem->next = 0;
+
+ /*printf(" using pool %d (size %ld)\n", pidx, (unsigned long)mem->size);*/
+
+ /* while the mem size is larger than twice the allocation, split it */
+ while(pidx-- > 0 && total_sz <= (halfsz = mem->size / 2)) {
+ /*printf(" splitting %ld block in half and ", (unsigned long)mem->size);*/
+ other = (struct mem_desc*)((char*)mem + halfsz);
+ mem->size = other->size = halfsz;
+
+ add_to_pool(other);
+ }
+
+ return DESC_PTR(mem);
+ }
+
+ ++pidx;
+ }
+
+ /* did not find a free block, add a new one */
+ pidx = NUM_POOLS - 1;
+ if((pg0 = alloc_ppages(MAX_POOL_PAGES)) == -1) {
+ errno = ENOMEM;
+ return 0;
+ }
+ mem = PAGE_TO_PTR(pg0);
+ mem->size = MAX_POOL_SIZE;
+ mem->next = pools[pidx];
+ mem->magic = MAGIC;
+ pools[pidx] = mem;
+
+ /* try again now that there is a free block */
+ return malloc(sz);
+}
+
+
+void free(void *p)
+{
+ int pg0;
+ struct mem_desc *mem = PTR_DESC(p);
+
+ if(mem->size > MAX_POOL_SIZE) {
+ /*printf("foo_free(%p): %ld bytes. block too large for pools, returning to system\n",
+ (void*)p, (unsigned long)mem->size);*/
+ pg0 = ADDR_TO_PAGE(mem);
+ free_ppages(pg0, BYTES_TO_PAGES(mem->size));
+ return;
+ }
+
+ /*printf("foo_free(%p): block of %ld bytes and ", (void*)p, (unsigned long)mem->size);*/
+ add_to_pool(mem);
+}
+
+static int add_to_pool(struct mem_desc *mem)
+{
+ int pidx;
+ struct mem_desc head;
+ struct mem_desc *iter, *pnode;
+
+ pidx = pool_index(mem->size);
+
+ /*printf("adding %ld block to pool %d\n", (unsigned long)mem->size, pidx);*/
+
+ iter = &head;
+ head.next = pools[pidx];
+
+ while(iter->next) {
+ pnode = iter->next;
+ if(mem->size == pnode->size) { /* only coalesce same-sized blocks */
+ if((char*)mem == (char*)pnode - pnode->size) {
+ iter = pnode->next; /* unlink pnode */
+ pools[pidx] = head.next;
+ mem->next = 0;
+ mem->size += pnode->size;
+
+ /*printf(" coalescing %p with %p and ", (void*)mem, (void*)pnode);*/
+ return add_to_pool(mem);
+ }
+ if((char*)mem == (char*)pnode + pnode->size) {
+ iter = pnode->next; /* unlink pnode */
+ pools[pidx] = head.next;
+ pnode->next = 0;
+ pnode->size += mem->size;
+
+ /*printf(" coalescing %p with %p and ", (void*)mem, (void*)pnode);*/
+ return add_to_pool(pnode);
+ }
+ }
+ iter = iter->next;
+ }
+
+ /* otherwise just add it to the pool */
+ mem->next = pools[pidx];
+ pools[pidx] = mem;
+ return 0;
+}
+
/* A bitmap is used to track which physical memory pages are used, and which
- * are available for allocation by alloc_phys_page.
+ * are available for allocation by alloc_ppage.
*
* last_alloc_idx keeps track of the last 32bit element in the bitmap array
* where a free page was found. It's guaranteed that all the elements before
}
}
-/* alloc_phys_page finds the first available page of physical memory,
- * marks it as used in the bitmap, and returns its number. If there's
- * no unused physical page, -1 is returned.
- */
int alloc_ppage(void)
{
- int i, idx, max, intr_state;
+ return alloc_ppages(1);
+}
+
+/* free_ppage marks the physical page, free in the allocation bitmap.
+ *
+ * CAUTION: no checks are done that this page should actually be freed or not.
+ * If you call free_ppage with the address of some part of memory that was
+ * originally reserved due to it being in a memory hole or part of the kernel
+ * image or whatever, it will be subsequently allocatable by alloc_ppage.
+ */
+void free_ppage(int pg)
+{
+ int bmidx = BM_IDX(pg);
+
+ int intr_state = get_intr_flag();
+ disable_intr();
+
+ if(IS_FREE(pg)) {
+ panic("free_ppage(%d): I thought that was already free!\n", pg);
+ }
+
+ mark_page(pg, FREE);
+ if(bmidx < last_alloc_idx) {
+ last_alloc_idx = bmidx;
+ }
+
+ set_intr_flag(intr_state);
+}
+
+
+int alloc_ppages(int count)
+{
+ int i, pg, idx, max, intr_state, found_free = 0;
intr_state = get_intr_flag();
disable_intr();
while(idx <= max) {
/* if at least one bit is 0 then we have at least
- * one free page. find it and allocate it.
+ * one free page. find it and try to allocate a range starting from there
*/
if(bitmap[idx] != 0xffffffff) {
for(i=0; i<32; i++) {
- int pg = idx * 32 + i;
+ pg = idx * 32 + i;
if(IS_FREE(pg)) {
- mark_page(pg, USED);
-
- last_alloc_idx = idx;
-
- /*printf("alloc_phys_page() -> %x (page: %d)\n", PAGE_TO_ADDR(pg), pg);*/
-
- set_intr_flag(intr_state);
- return pg;
+ if(!found_free) {
+ last_alloc_idx = idx;
+ found_free = 1;
+ }
+
+ if(alloc_ppage_range(pg, count) != -1) {
+ set_intr_flag(intr_state);
+ return pg;
+ }
}
}
- panic("can't happen: alloc_ppage (mem.c)\n");
}
idx++;
}
return -1;
}
-/* free_ppage marks the physical page, free in the allocation bitmap.
- *
- * CAUTION: no checks are done that this page should actually be freed or not.
- * If you call free_phys_page with the address of some part of memory that was
- * originally reserved due to it being in a memory hole or part of the kernel
- * image or whatever, it will be subsequently allocatable by alloc_phys_page.
- */
-void free_ppage(int pg)
+void free_ppages(int pg0, int count)
{
- int bmidx = BM_IDX(pg);
-
- int intr_state = get_intr_flag();
- disable_intr();
-
- if(IS_FREE(pg)) {
- panic("free_ppage(%d): I thought that was already free!\n", pg);
- }
+ int i;
- mark_page(pg, FREE);
- if(bmidx < last_alloc_idx) {
- last_alloc_idx = bmidx;
+ for(i=0; i<count; i++) {
+ free_ppage(pg0++);
}
-
- set_intr_flag(intr_state);
}
-
int alloc_ppage_range(int start, int size)
{
int i, pg = start;
+ int intr_state;
+
+ intr_state = get_intr_flag();
+ disable_intr();
/* first validate that no page in the requested range is allocated */
for(i=0; i<size; i++) {
for(i=0; i<size; i++) {
mark_page(pg++, USED);
}
+
+ set_intr_flag(intr_state);
return 0;
}