8 int nelem; /* num elements per attribute [1, 4] */
9 float *data; /* dynarr */
11 int vbo_valid, data_valid;
17 unsigned int nverts, nfaces;
19 /* current value for each attribute for the immediate mode interface */
20 cgm_vec4 cur_val[CMESH_NUM_ATTR];
22 unsigned int buffer_objects[CMESH_NUM_ATTR + 1];
23 struct cmesh_vattrib vattr[CMESH_NUM_ATTR];
25 unsigned int *idata; /* dynarr */
27 int ibo_valid, idata_valid;
29 /* index buffer for wireframe rendering (constructed on demand) */
30 unsigned int wire_ibo;
33 /* axis-aligned bounding box */
34 cgm_vec3 aabb_min, aabb_max;
42 static int sdr_loc[CMESH_NUM_ATTR] = {0, 1, 2, 3, 4, 5, 6, 7};
46 void cmesh_set_attrib_sdrloc(int attr, int loc)
51 int cmesh_get_attrib_sdrloc(int attr)
56 void cmesh_clear_attrib_sdrloc(void)
59 for(i=0; i<CMESH_NUM_ATTR; i++) {
65 struct cmesh *cmesh_alloc(void)
69 if(!(cm = malloc(sizeof *cm))) {
72 if(cmesh_init(cm) == -1) {
79 void cmesh_free(struct cmesh *cm)
85 int cmesh_init(struct cmesh *cm)
89 memset(cm, 0, sizeof *cm);
90 cgm_wcons(cm->cur_val + CMESH_ATTR_COLOR, 1, 1, 1, 1);
92 glGenBuffers(CMESH_NUM_ATTR + 1, cm->buffer_objects);
94 for(i=0; i<CMESH_NUM_ATTR; i++) {
95 if(!(cm->vattr[i].data = dynarr_alloc(0, sizeof(float)))) {
99 cm->vattr[i].vbo = buffer_objects[i];
102 cm->ibo = buffer_objects[CMESH_NUM_ATTR];
103 if(!(cm->idata = dynarr_alloc(0, sizeof *cm->idata))) {
110 void cmesh_destroy(struct cmesh *cm)
116 for(i=0; i<CMESH_NUM_ATTR; i++) {
117 dynarr_free(cm->vattr[i].data);
119 dynarr_free(cm->idata);
121 glDeleteBuffers(CMESH_NUM_ATTR + 1, cm->buffer_objects);
123 glDeleteBuffers(1, &cm->wire_ibo);
127 void cmesh_clear(struct cmesh *cm)
131 for(i=0; i<CMESH_NUM_ATTR; i++) {
132 cm->vattr[i].nelem = 0;
133 cm->vattr[i].vbo_valid = 0;
134 cm->vattr[i].data_valid = 0;
135 cm->vattr[i].data = dynarr_clear(cm->vattr[i].data);
137 cm->ibo_valid = cm->idata_valid = 0;
138 cm->idata = dynarr_clear(cm->idata);
140 cm->wire_ibo_valid = 0;
141 cm->nverts = cm->nfaces = 0;
143 cm->bsph_valid = cm->aabb_valid = 0;
146 int cmesh_clone(struct cmesh *cmdest, struct cmesh *cmsrc)
150 float *varr[CMESH_NUM_ATTR] = {0};
151 unsigned int *iarr = 0;
153 /* do anything that can fail first, before making any changes to cmdest
154 * so we have the option of recovering gracefuly
157 if(!(name = malloc(strlen(cmsrc->name)))) {
160 strcpy(name, cmsrc->name);
162 if(cmesh_indexed(cmsrc)) {
163 num = dynarr_size(cmsrc->idata);
164 if(!(iarr = dynarr_alloc(num, sizeof *iarr))) {
169 for(i=0; i<CMESH_NUM_ATTR; i++) {
170 if(cmesh_has_attrib(cmsrc, i)) {
171 nelem = cmsrc->vattr[i].nelem;
172 num = dynarr_size(cmsrc->vattr[i].data);
173 if(!(varr[i] = dynarr_alloc(num * nelem, sizeof(float)))) {
175 dynarr_free(varr[i]);
186 for(i=0; i<CMESH_NUM_ATTR; i++) {
187 dynarr_free(cmdest->vattr[i].data);
189 if(cmesh_has_attrib(cmsrc, i)) {
190 cmesh_attrib(cmsrc, i); /* force validation of the actual data on the source mesh */
192 nelem = cmsrc->vattr[i].nelem;
193 cmdest->vattr[i].nelem = nelem
194 num = dynarr_size(cmsrc->vattr[i].data);
195 cmdest->vattr[i].data = varr[i];
196 memcpy(cmdest->vattr[i].data, cmsrc->vattr[i].data, num * nelem * sizeof(float));
197 cmdest->vattr[i].data_valid = 1;
198 cmdest->vattr[i].vbo_valid = 0;
200 memset(cmdest->vattr + i, 0, sizeof cmdest->vattr[i]);
204 dynarr_free(cmdest->idata);
205 if(cmesh_indexed(cmsrc)) {
206 cmesh_index(cmsrc); /* force validation .... */
208 num = dynarr_size(cmsrc->idata);
209 cmdest->idata = iarr;
210 memcpy(cmdest->idata, cmsrc->idata, num * sizeof *cmdest->idata);
211 cmdest->idata_valid = 1;
214 cmdest->idata_valid = cmdest->ibo_valid = 0;
220 cmdest->nverts = cmsrc->nverts;
221 cmdest->nfaces = cmsrc->nfaces;
223 memcpy(cmdest->cur_val, cmsrc->cur_val, sizeof cmdest->cur_val);
225 cmdest->aabb_min = cmsrc->aabb_min;
226 cmdest->aabb_max = cmsrc->aabb_max;
227 cmdest->aabb_valid = cmsrc->aabb_valid;
228 cmdest->bsph_center = cmsrc->bsph_center;
229 cmdest->bsph_radius = cmsrc->bsph_radius;
230 cmdest->bsph_valid = cmsrc->bsph_valid;
235 int cmesh_set_name(struct cmesh *cm, const char *name)
237 int len = strlen(name);
238 char *tmp = malloc(len + 1);
242 memcpy(cm->name, name, len + 1);
246 const char *cmesh_name(struct cmesh *cm)
251 int cmesh_has_attrib(struct cmesh *cm, int attr)
253 if(attr < 0 || attr >= CMESH_NUM_ATTR) {
256 return cm->vattr[attr].vbo_valid | cm->vattr[attr].data_valid;
259 int cmesh_indexed(struct cmesh *cm)
261 return cm->ibo_valid | cm->idata_valid;
264 /* vdata can be 0, in which case only memory is allocated
265 * returns pointer to the attribute array
267 float *cmesh_set_attrib(struct cmesh *cm, int attr, int nelem, unsigned int num,
272 if(attr < 0 || attr >= CMESH_NUM_ATTR) {
275 if(cm->nverts && num != cm->nverts) {
279 if(!(newarr = dynarr_alloc(num * nelem, sizeof *newarr))) {
283 memcpy(newarr, vdata, num * nelem * sizeof *newarr);
288 dynarr_free(cm->vattr[attr].data);
289 cm->vattr[attr].data = newarr;
290 cm->vattr[attr].nelem = nelem;
291 cm->vattr[attr].data_valid = 1;
292 cm->vattr[attr].vbo_valid = 0;
296 float *cmesh_attrib(struct cmesh *cm, int attr)
298 if(attr < 0 || attr >= CMESH_NUM_ATTR) {
301 cm->vattr[attr].vbo_valid = 0;
302 return (float*)cmesh_attrib_ro(cm, attr);
305 const float *cmesh_attrib_ro(struct cmesh *cm, int attr)
310 if(attr < 0 || attr >= CMESH_NUM_ATTR) {
314 if(!cm->vattr[attr].data_valid) {
315 #if GL_ES_VERSION_2_0
318 if(!cm->vattr[attr].vbo_valid) {
322 /* local data copy unavailable, grab the data from the vbo */
323 nelem = cm->vattr[attr].nelem;
324 if(!(tmp = dynarr_resize(cm->vattr[attr].data, cm->nverts * nelem))) {
327 cm->vattr[attr].data = tmp;
329 glBindBuffer(GL_ARRAY_BUFFER, vattr[attr].vbo);
330 tmp = glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
331 memcpy(cm->vattr[attr].data, tmp, cm->nverts * nelem * sizeof(float));
332 glUnmapBuffer(GL_ARRAY_BUFFER);
334 cm->vattr[attr].data_valid = 1;
337 return cm->vattr[attr].data;
340 int cmesh_attrib_count(struct cmesh *cm, int attr)
342 return cmesh_has_attrib(cm, attr) ? cm->nverts : 0;
345 /* indices can be 0, in which case only memory is allocated
346 * returns pointer to the index array
348 unsigned int *cmesh_set_index(struct cmesh *cm, int num, const unsigned int *indices)
351 int nidx = cm->nfaces * 3;
353 if(nidx && num != nidx) {
357 if(!(tmp = dynarr_alloc(num, sizeof *tmp))) {
361 memcpy(tmp, indices, num * sizeof *tmp);
364 dynarr_free(cm->idata);
371 unsigned int *cmesh_index(struct cmesh *cm)
374 return (unsigned int*)cmesh_index_ro(cm);
377 const unsigned int *cmesh_index_ro(struct cmesh *cm)
382 if(!cm->idata_valid) {
383 #if GL_ES_VERSION_2_0
390 /* local copy is unavailable, grab the data from the ibo */
391 nidx = cm->nfaces * 3;
392 if(!(tmp = dynarr_alloc(nidx, sizeof *cm->idata))) {
395 dynarr_free(cm->idata);
398 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cm->ibo);
399 tmp = glMapBuffer(GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY);
400 memcpy(cm->idata, tmp, nidx * sizeof *cm->idata);
401 glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER);
408 int cmesh_index_count(struct cmesh *cm)
410 return cm->nfaces * 3;
413 int get_poly_count(struct cmesh *cm)
419 return cm->nverts / 3;
424 /* attr can be -1 to invalidate all attributes */
425 void cmesh_invalidate_vbo(struct cmesh *cm, int attr)
429 if(attr >= CMESH_NUM_ATTR) {
434 for(i=0; i<CMESH_NUM_ATTR; i++) {
435 cm->vattr[i].vbo_valid = 0;
438 cm->vattr[attr].vbo_valid = 0;
442 void cmesh_invalidate_index(struct cmesh *cm)
447 int cmesh_append(struct cmesh *cmdest, struct cmesh *cmsrc)
449 int i, nelem, nidx, newsz, origsz;
452 unsigned int idxoffs;
454 if(!cmdest->nverts) {
455 return cmesh_clone(cmdest, cmsrc);
458 for(i=0; i<CMESH_NUM_ATTR; i++) {
459 if(cmesh_has_attrib(cmdest) && cmesh_has_attrib(cmsrc)) {
460 /* force validation of the data arrays */
461 cmesh_attrib(cmdest, i);
462 cmesh_attrib_ro(cmsrc, id);
464 assert(cmdest->vattr[i].nelem == cmsrc->vattr[i].nelem);
465 nelem = cmdest->vattr[i].nelem;
466 origsz = cmdest->nverts * nelem;
467 newsz = cmdest->nverts + cmsrc->nverts * nelem;
469 if(!(vptr = dynarr_resize(cmdest->vattr[i].data, newsz))) {
472 memcpy(vptr + origsz, cmsrc->vattr[i].data, cmsrc->nverts * nelem * sizeof(float));
473 cmdest->vattr[i].data = vptr;
477 if(cmesh_indexed(cmdest)) {
478 assert(cmesh_indexed(cmsrc));
479 /* force validation ... */
481 cmesh_index_ro(cmsrc);
483 idxoff = cmdest->nverts;
484 origsz = dynarr_size(cmdest->idata);
485 srcsz = dynarr_size(cmsrc->idata);
486 newsz = origsz + srcsz;
488 if(!(iptr = dynarr_resize(cmdest->idata, newsz))) {
491 cmdest->idata = iptr;
493 /* copy and fixup all the new indices */
495 for(i=0; i<srcsz; i++) {
496 *iptr++ = cmsrc->idata[i] + idxoffs;
500 cmdest->wire_ibo_valid = 0;
501 cmdest->aabb_valid = 0;
502 cmdest->bsph_valid = 0;
505 /* assemble a complete vertex by adding all the useful attributes */
506 int cmesh_vertex(struct cmesh *cm, float x, float y, float z)
510 cgm_wcons(cm->cur_val + CMESH_ATTR_VERTEX, x, y, z, 1.0f);
511 cm->vattr[CMESH_ATTR_VERTEX].data_valid = 1;
512 cm->vattr[CMESH_ATTR_VERTEX].nelem = 3;
514 for(i=0; i<CMESH_ATTR_VERTEX; i++) {
515 if(cm->vattr[i].data_valid) {
516 for(j=0; j<cm->vattr[CMESH_ATTR_VERTEX].nelem; j++) {
517 float *tmp = dynarr_push(cm->vattr[i].data, cur_val[i] + j);
519 cm->vattr[i].data = tmp;
522 cm->vattr[i].vbo_valid = 0;
523 cm->vattr[i].data_valid = 1;
526 if(cm->idata_valid) {
527 cm->idata = dynarr_clear(cm->idata);
529 cm->ibo_valid = cm->idata_valid = 0;
533 void cmesh_normal(struct cmesh *cm, float nx, float ny, float nz)
535 cgm_wcons(cm->cur_val + CMESH_ATTR_NORMAL, nx, ny, nz, 1.0f);
536 cm->vattr[CMESH_ATTR_NORMAL].nelem = 3;
539 void cmesh_tangent(struct cmesh *cm, float tx, float ty, float tz)
541 cgm_wcons(cm->cur_val + CMESH_ATTR_TANGENT, tx, ty, tz, 1.0f);
542 cm->vattr[CMESH_ATTR_TANGENT].nelem = 3;
545 void cmesh_texcoord(struct cmesh *cm, float u, float v, float w)
547 cgm_wcons(cm->cur_val + CMESH_ATTR_TEXCOORD, u, v, w, 1.0f);
548 cm->vattr[CMESH_ATTR_TEXCOORD].nelem = 3;
551 void cmesh_boneweights(struct cmesh *cm, float w1, float w2, float w3, float w4)
553 cgm_wcons(cm->cur_val + CMESH_ATTR_BONEWEIGHTS, w1, w2, w3, w4);
554 cm->vattr[CMESH_ATTR_BONEWEIGHTS].nelem = 4;
557 void cmesh_boneidx(struct cmesh *cm, int idx1, int idx2, int idx3, int idx4)
559 cgm_wcons(cm->cur_val + CMESH_ATTR_BONEIDX, idx1, idx2, idx3, idx4);
560 cm->vattr[CMESH_ATTR_BONEIDX].nelem = 4;
563 /* dir_xform can be null, in which case it's calculated from xform */
564 void cmesh_apply_xform(struct cmesh *cm, float *xform, float *dir_xform);
566 void cmesh_flip(struct cmesh *cm); /* flip faces (winding) and normals */
567 void cmesh_flip_faces(struct cmesh *cm);
568 void cmesh_flip_normals(struct cmesh *cm);
570 void cmesh_explode(struct cmesh *cm); /* undo all vertex sharing */
572 /* this is only guaranteed to work on an exploded mesh */
573 void cmesh_calc_face_normals(struct cmesh *cm);
575 void cmesh_draw(struct cmesh *cm);
576 void cmesh_draw_wire(struct cmesh *cm, float linesz);
577 void cmesh_draw_vertices(struct cmesh *cm, float ptsz);
578 void cmesh_draw_normals(struct cmesh *cm, float len);
579 void cmesh_draw_tangents(struct cmesh *cm, float len);
581 /* get the bounding box in local space. The result will be cached and subsequent
582 * calls will return the same box. The cache gets invalidated by any functions that
583 * can affect the vertex data
585 void cmesh_aabbox(struct cmesh *cm, cgm_vec3 *vmin, cgm_vec3 *vmax);
587 /* get the bounding sphere in local space. The result will be cached ... see above */
588 float cmesh_bsphere(struct cmesh *cm, cgm_vec3 *center, float *rad);
590 /* texture coordinate manipulation */
591 void cmesh_texcoord_apply_xform(struct cmesh *cm, float *xform);
592 void cmesh_texcoord_gen_plane(struct cmesh *cm, cgm_vec3 *norm, cgm_vec3 *tang);
593 void cmesh_texcoord_gen_box(struct cmesh *cm);
594 void cmesh_texcoord_gen_cylinder(struct cmesh *cm);
596 int cmesh_dump(struct cmesh *cm, const char *fname);
597 int cmesh_dump_file(struct cmesh *cm, FILE *fp);
598 int cmesh_dump_obj(struct cmesh *cm, const char *fname);
599 int cmesh_dump_obj_file(struct cmesh *cm, FILE *fp, int voffs);