15 #define VMEM_PTR ((void*)(0xa0000 + __djgpp_conventional_base))
17 #define VMEM_PTR ((void*)0xa0000)
20 #define SAME_BPP(a, b) \
21 ((a) == (b) || ((a) == 16 && (b) == 15) || ((a) == 15 && (b) == 16) || \
22 ((a) == 32 && (b) == 24) || ((a) == 24 && (b) == 32))
24 void (*blit_frame)(void*, int);
26 static void blit_frame_lfb(void *pixels, int vsync);
27 static void blit_frame_banked(void *pixels, int vsync);
28 static uint32_t calc_mask(int sz, int pos);
30 static void enable_wrcomb(uint32_t addr, int len);
31 static const char *mtrr_type_name(int type);
32 static void print_mtrr(void);
34 static struct video_mode *vmodes;
35 static int num_vmodes;
37 static int vbe_init_ver;
38 static struct vbe_info vbe;
41 static struct video_mode *curmode;
42 static void *vpgaddr[2];
43 static int frontidx, backidx;
44 static int pgcount, pgsize, fbsize;
49 int i, num, max_modes;
50 struct video_mode *vmptr;
52 if(vbe_info(&vbe) == -1) {
53 errormsg("failed to retrieve VBE information\n");
56 vbe_print_info(stdout, &vbe);
60 if(!(vmodes = malloc(max_modes * sizeof *vmodes))) {
61 errormsg("failed to allocate video modes list\n");
65 num = vbe_num_modes(&vbe);
66 for(i=0; i<num; i++) {
67 struct vbe_mode_info minf;
69 if(vbe_mode_info(vbe.modes[i], &minf) == -1) {
73 if(num_vmodes >= max_modes) {
74 int newmax = max_modes ? (max_modes << 1) : 16;
75 if(!(vmptr = realloc(vmodes, newmax * sizeof *vmodes))) {
76 errormsg("failed to grow video mode list (%d)\n", newmax);
84 vmptr = vmodes + num_vmodes++;
85 memset(vmptr, 0, sizeof *vmptr);
86 vmptr->mode = vbe.modes[i];
87 vmptr->xsz = minf.xres;
88 vmptr->ysz = minf.yres;
89 vmptr->bpp = minf.bpp;
90 vmptr->pitch = minf.scanline_bytes;
91 if(minf.mem_model == VBE_TYPE_DIRECT) {
92 vmptr->rbits = minf.rsize;
93 vmptr->gbits = minf.gsize;
94 vmptr->bbits = minf.bsize;
95 vmptr->rshift = minf.rpos;
96 vmptr->gshift = minf.gpos;
97 vmptr->bshift = minf.bpos;
98 vmptr->rmask = calc_mask(minf.rsize, minf.rpos);
99 vmptr->gmask = calc_mask(minf.gsize, minf.gpos);
100 vmptr->bmask = calc_mask(minf.bsize, minf.bpos);
101 /*vmptr->bpp = vmptr->rbits + vmptr->gbits + vmptr->bbits;*/
103 if(minf.attr & VBE_ATTR_LFB) {
104 vmptr->fb_addr = minf.fb_addr;
106 vmptr->max_pages = minf.num_img_pages;
107 vmptr->win_gran = minf.win_gran;
109 infomsg("%04x: ", vbe.modes[i]);
110 vbe_print_mode_info(stdout, &minf);
114 vbe_init_ver = VBE_VER_MAJOR(vbe.ver);
118 void cleanup_video(void)
123 struct video_mode *video_modes(void)
128 int num_video_modes(void)
133 struct video_mode *get_video_mode(int idx)
135 if(idx == VMODE_CURRENT) {
141 int match_video_mode(int xsz, int ysz, int bpp)
144 struct video_mode *vm;
146 for(i=0; i<num_vmodes; i++) {
148 if(vm->xsz != xsz || vm->ysz != ysz) continue;
149 if(SAME_BPP(vm->bpp, bpp)) {
152 if(vm->bpp == bpp) break;
156 errormsg("failed to find video mode %dx%d %d bpp)\n", xsz, ysz, bpp);
162 int find_video_mode(int mode)
165 struct video_mode *vm;
168 for(i=0; i<num_vmodes; i++) {
169 if(vm->mode == mode) return i;
174 void *set_video_mode(int idx, int nbuf)
177 struct video_mode *vm = vmodes + idx;
179 if(curmode == vm) return vpgaddr[0];
181 infomsg("setting video mode %x (%dx%d %d bpp)\n", (unsigned int)vm->mode,
182 vm->xsz, vm->ysz, vm->bpp);
185 mode = vm->mode | VBE_MODE_LFB;
186 if(vbe_setmode(mode) == -1) {
187 infomsg("Warning: failed to get a linear framebuffer. falling back to banked mode\n");
189 if(vbe_setmode(mode) == -1) {
190 errormsg("failed to set video mode %x\n", (unsigned int)vm->mode);
195 /* unmap previous video memory mapping, if there was one (switching modes) */
196 if(vpgaddr[0] && vpgaddr[0] != VMEM_PTR) {
197 dpmi_munmap(vpgaddr[0]);
198 vpgaddr[0] = vpgaddr[1] = 0;
202 if(nbuf < 1) nbuf = 1;
203 if(nbuf > 2) nbuf = 2;
204 pgcount = nbuf > vm->max_pages + 1 ? vm->max_pages + 1 : nbuf;
205 pgsize = vm->ysz * vm->pitch;
206 fbsize = pgcount * pgsize;
209 infomsg("rgb mask: %x %x %x\n", (unsigned int)vm->rmask,
210 (unsigned int)vm->gmask, (unsigned int)vm->bmask);
211 infomsg("rgb shift: %d %d %d\n", vm->rshift, vm->gshift, vm->bshift);
213 infomsg("pgcount: %d, pgsize: %d, fbsize: %d\n", pgcount, pgsize, fbsize);
215 infomsg("phys addr: %p\n", (void*)vm->fb_addr);
220 vpgaddr[0] = (void*)dpmi_mmap(vm->fb_addr, fbsize);
222 errormsg("failed to map framebuffer (phys: %lx, size: %d)\n",
223 (unsigned long)vm->fb_addr, fbsize);
227 memset(vpgaddr[0], 0xaa, pgsize);
230 vpgaddr[1] = (char*)vpgaddr[0] + pgsize;
232 page_flip(FLIP_NOW); /* start with the second page visible */
234 frontidx = backidx = 0;
238 blit_frame = blit_frame_lfb;
240 /* only attempt to set up write combining if the CPU we're running on
241 * supports memory type range registers, and we're running on ring 0
246 errormsg("Can't set framebuffer range to write-combining, running in ring %d\n", cpl);
248 uint32_t len = (uint32_t)vbe.vmem_blk << 16;
250 /* if vmem_blk is 0 or if the reported size is absurd (more than
251 * 256mb), just use the framebuffer size for this mode to setup the
254 if(!len || len > 0x10000000) {
255 infomsg("reported vmem too large or overflowed, using fbsize for wrcomb setup\n");
259 enable_wrcomb(vm->fb_addr, len);
264 vpgaddr[0] = VMEM_PTR;
267 blit_frame = blit_frame_banked;
269 /* calculate window granularity shift */
270 vm->win_gran_shift = 0;
271 vm->win_64k_step = 1;
272 if(vm->win_gran > 0 && vm->win_gran < 64) {
273 int gran = vm->win_gran;
275 vm->win_gran_shift++;
278 vm->win_64k_step = 1 << vm->win_gran_shift;
281 infomsg("granularity: %dk (step: %d)\n", vm->win_gran, vm->win_64k_step);
288 int set_text_mode(void)
290 /* unmap previous video memory mapping, if there was one (switching modes) */
291 if(vpgaddr[0] && vpgaddr[0] != VMEM_PTR) {
292 dpmi_munmap(vpgaddr[0]);
293 vpgaddr[0] = vpgaddr[1] = 0;
301 void *page_flip(int vsync)
304 /* page flipping not supported */
308 vbe_swap(backidx ? pgsize : 0, vsync ? VBE_SWAP_VBLANK : VBE_SWAP_NOW);
310 backidx = (backidx + 1) & 1;
312 return vpgaddr[backidx];
316 static void blit_frame_lfb(void *pixels, int vsync)
318 if(vsync) wait_vsync();
319 memcpy(vpgaddr[frontidx], pixels, pgsize);
322 static void blit_frame_banked(void *pixels, int vsync)
324 int sz, offs, pending;
325 unsigned char *pptr = pixels;
327 if(vsync) wait_vsync();
329 /* assume initial window offset at 0 */
333 sz = pending > 65536 ? 65536 : pending;
334 memcpy(VMEM_PTR, pptr, sz);
337 offs += curmode->win_64k_step;
343 static uint32_t calc_mask(int sz, int pos)
347 mask = (mask << 1) | 1;
352 #define MSR_MTRRCAP 0xfe
353 #define MSR_MTRRDEFTYPE 0x2ff
354 #define MSR_MTRRBASE(x) (0x200 | ((x) << 1))
355 #define MSR_MTRRMASK(x) (0x201 | ((x) << 1))
356 #define MTRRDEF_EN 0x800
357 #define MTRRCAP_HAVE_WC 0x400
358 #define MTRRMASK_VALID 0x800
362 static int get_page_memtype(uint32_t addr, int num_ranges)
365 uint32_t rlow, rhigh;
368 for(i=0; i<num_ranges; i++) {
369 get_msr(MSR_MTRRMASK(i), &rlow, &rhigh);
370 if(!(rlow & MTRRMASK_VALID)) {
373 mask = rlow & 0xfffff000;
375 get_msr(MSR_MTRRBASE(i), &rlow, &rhigh);
376 base = rlow & 0xfffff000;
378 if((addr & mask) == (base & mask)) {
383 get_msr(MSR_MTRRDEFTYPE, &rlow, &rhigh);
387 static int check_wrcomb_enabled(uint32_t addr, int len, int num_ranges)
390 if(get_page_memtype(addr, num_ranges) != MTRR_WC) {
399 static int alloc_mtrr(int num_ranges)
402 uint32_t rlow, rhigh;
404 for(i=0; i<num_ranges; i++) {
405 get_msr(MSR_MTRRMASK(i), &rlow, &rhigh);
406 if(!(rlow & MTRRMASK_VALID)) {
413 static void enable_wrcomb(uint32_t addr, int len)
415 int num_ranges, mtrr;
416 uint32_t rlow, rhigh;
419 if(len <= 0 || (addr | (uint32_t)len) & 0xfff) {
420 errormsg("failed to enable write combining, unaligned range: %p/%x\n",
421 (void*)addr, (unsigned int)len);
425 get_msr(MSR_MTRRCAP, &rlow, &rhigh);
426 num_ranges = rlow & 0xff;
428 infomsg("enable_wrcomb: addr=%p len=%x\n", (void*)addr, (unsigned int)len);
430 if(!(rlow & MTRRCAP_HAVE_WC)) {
431 errormsg("failed to enable write combining, processor doesn't support it\n");
435 if(check_wrcomb_enabled(addr, len, num_ranges)) {
439 if((mtrr = alloc_mtrr(num_ranges)) == -1) {
440 errormsg("failed to enable write combining, no free MTRRs\n");
450 mask = ~mask & 0xfffff000;
452 infomsg(" ... mask: %08x\n", (unsigned int)mask);
455 get_msr(MSR_MTRRDEFTYPE, &def, &rhigh);
456 set_msr(MSR_MTRRDEFTYPE, def & ~MTRRDEF_EN, rhigh);
458 set_msr(MSR_MTRRBASE(mtrr), addr | MTRR_WC, 0);
459 set_msr(MSR_MTRRMASK(mtrr), mask | MTRRMASK_VALID, 0);
461 set_msr(MSR_MTRRDEFTYPE, def | MTRRDEF_EN, 0);
465 static const char *mtrr_names[] = { "N/A", "W C", "N/A", "N/A", "W T", "W P", "W B" };
467 static const char *mtrr_type_name(int type)
469 if(type < 0 || type >= sizeof mtrr_names / sizeof *mtrr_names) {
470 return mtrr_names[0];
472 return mtrr_names[type];
475 static void print_mtrr(void)
478 uint32_t rlow, rhigh, base, mask;
480 get_msr(MSR_MTRRCAP, &rlow, &rhigh);
481 num_ranges = rlow & 0xff;
483 for(i=0; i<num_ranges; i++) {
484 get_msr(MSR_MTRRBASE(i), &base, &rhigh);
485 get_msr(MSR_MTRRMASK(i), &mask, &rhigh);
487 if(mask & MTRRMASK_VALID) {
488 infomsg("mtrr%d: base %p, mask %08x type %s\n", i, (void*)(base & 0xfffff000),
489 (unsigned int)(mask & 0xfffff000), mtrr_type_name(base & 0xff));
491 infomsg("mtrr%d unused (%08x/%08x)\n", i, (unsigned int)base,