10 struct render_job *next;
14 int fb_width, fb_height, fb_size;
21 static void proc_render_job(void *cls);
22 static void done_render_job(void *cls);
23 static struct render_job *alloc_job(void);
24 static void free_job(struct render_job *job);
26 static struct thread_pool *tpool;
29 struct erb_rend *erb_create(void)
34 if(!(tpool = tpool_create(0))) {
35 fprintf(stderr, "erb_create: fatal error, failed to create thread pool!\n");
40 if(!(erb = calloc(1, sizeof *erb))) {
46 void erb_destroy(struct erb_rend *erb)
53 int erb_allocframe(struct erb_rend *erb, int width, int height)
58 if(width == erb->fb_width && height == erb->fb_height) {
62 sz = width * height * 4 * sizeof *erb->fb_pixels;
63 if(!(newfb = malloc(sz))) {
64 fprintf(stderr, "erb_allocframe: failed to allocate %dx%d framebuffer\n", width, height);
70 erb->fb_pixels = newfb;
71 erb->fb_width = width;
72 erb->fb_height = height;
77 float *erb_getframe(struct erb_rend *erb)
79 return erb->fb_pixels;
82 void erb_begin(struct erb_rend *erb)
84 memset(erb->fb_pixels, 0, erb->fb_size);
87 float *erb_end(struct erb_rend *erb)
89 int i, npix = erb->fb_width * erb->fb_height;
91 float *pptr = erb->fb_pixels;
93 for(i=0; i<npix; i++) {
105 return erb->fb_pixels;
108 void erb_set_done_callback(struct erb_rend *erb, erb_done_func donecb, void *cls)
110 erb->donecb = donecb;
114 void erb_queue_frame(struct erb_rend *erb, unsigned int job_id)
116 erb_queue_block(erb, job_id, 0, 0, erb->fb_width, erb->fb_height);
119 void erb_queue_block(struct erb_rend *erb, unsigned int job_id, int x, int y,
120 int width, int height)
122 struct render_job *job;
124 if(!(job = alloc_job())) {
125 fprintf(stderr, "erb_queue_block: failed to allocate rendering job\n");
134 job->rect.h = height;
136 tpool_enqueue(tpool, job, proc_render_job, done_render_job);
139 void erb_wait(struct erb_rend *erb)
141 /* XXX should we have a per-renderer instance thread pool, to wait only for our own jobs? */
145 void erb_primary_ray(struct erb_rend *erb, struct erb_ray *ray, int sample)
150 void erb_sample_ray(struct erb_rend *erb, struct erb_ray *ray, float *col)
153 col[0] = fmod(col[0] + 1.0f, 1.0f);
154 col[1] = col[2] = fmod(col[1] + 0.33f, 1.0f);
157 static void proc_render_job(void *cls)
160 struct erb_rend *erb;
161 struct render_job *job = cls;
166 fboffs = job->rect.y * erb->fb_width + job->rect.x;
167 fbptr = erb->fb_pixels + fboffs * 4;
169 for(i=0; i<job->rect.h; i++) {
170 for(j=0; j<job->rect.w; j++) {
171 erb_primary_ray(erb, &ray, (int)fbptr[3]);
172 erb_sample_ray(erb, &ray, fbptr);
175 fbptr += (erb->fb_width - job->rect.w) * 4;
179 static void done_render_job(void *cls)
181 struct erb_rend *erb;
182 struct render_job *job = cls;
186 erb->donecb(job->id, &job->rect, erb->donecls);
193 #define MAX_JOB_POOL 128
194 static struct render_job *job_pool;
195 static int num_free_jobs;
196 static pthread_mutex_t job_pool_lock = PTHREAD_MUTEX_INITIALIZER;
198 static struct render_job *alloc_job(void)
200 struct render_job *job;
202 pthread_mutex_lock(&job_pool_lock);
204 pthread_mutex_unlock(&job_pool_lock);
205 return malloc(sizeof *job);
209 job_pool = job->next;
211 pthread_mutex_unlock(&job_pool_lock);
215 static void free_job(struct render_job *job)
217 pthread_mutex_lock(&job_pool_lock);
218 if(num_free_jobs >= MAX_JOB_POOL) {
219 pthread_mutex_unlock(&job_pool_lock);
224 job->next = job_pool;
227 pthread_mutex_unlock(&job_pool_lock);