added thread pool, started fleshing out the render job system
[erebus2020] / liberebus / src / tpool.c
1 /* worker thread pool based on POSIX threads
2  * author: John Tsiombikas <nuclear@member.fsf.org>
3  * This code is public domain.
4  */
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9 #include "tpool.h"
10
11 #if defined(__APPLE__) && defined(__MACH__)
12 # ifndef __unix__
13 #  define __unix__      1
14 # endif /* unix */
15 # ifndef __bsd__
16 #  define __bsd__       1
17 # endif /* bsd */
18 #endif  /* apple */
19
20 #if defined(unix) || defined(__unix__)
21 #include <unistd.h>
22 #include <sys/time.h>
23
24 # ifdef __bsd__
25 #  include <sys/sysctl.h>
26 # endif
27 #endif
28
29 #if defined(WIN32) || defined(__WIN32__)
30 #include <windows.h>
31 #endif
32
33
34 struct work_item {
35         void *data;
36         tpool_callback work, done;
37         struct work_item *next;
38 };
39
40 struct thread_data {
41         int id;
42         struct thread_pool *pool;
43 };
44
45 struct thread_pool {
46         pthread_t *threads;
47         struct thread_data *tdata;
48         int num_threads;
49         pthread_key_t idkey;
50
51         int qsize;
52         struct work_item *workq, *workq_tail;
53         pthread_mutex_t workq_mutex;
54         pthread_cond_t workq_condvar;
55
56         int nactive;    /* number of active workers (not sleeping) */
57
58         pthread_cond_t done_condvar;
59
60         int should_quit;
61         int in_batch;
62
63         int nref;       /* reference count */
64
65 #if defined(WIN32) || defined(__WIN32__)
66         HANDLE wait_event;
67 #else
68         int wait_pipe[2];
69 #endif
70 };
71
72 static void *thread_func(void *args);
73 static void send_done_event(struct thread_pool *tpool);
74
75 static struct work_item *alloc_work_item(void);
76 static void free_work_item(struct work_item *w);
77
78
79 struct thread_pool *tpool_create(int num_threads)
80 {
81         int i;
82         struct thread_pool *tpool;
83
84         if(!(tpool = calloc(1, sizeof *tpool))) {
85                 return 0;
86         }
87         pthread_mutex_init(&tpool->workq_mutex, 0);
88         pthread_cond_init(&tpool->workq_condvar, 0);
89         pthread_cond_init(&tpool->done_condvar, 0);
90         pthread_key_create(&tpool->idkey, 0);
91
92         pthread_setspecific(tpool->idkey, (void*)0xffffffff);
93
94 #if !defined(WIN32) && !defined(__WIN32__)
95         tpool->wait_pipe[0] = tpool->wait_pipe[1] = -1;
96 #endif
97
98         if(num_threads <= 0) {
99                 num_threads = tpool_num_processors();
100         }
101         tpool->num_threads = num_threads;
102
103         if(!(tpool->threads = calloc(num_threads, sizeof *tpool->threads))) {
104                 free(tpool);
105                 return 0;
106         }
107         if(!(tpool->tdata = malloc(num_threads * sizeof *tpool->tdata))) {
108                 free(tpool->threads);
109                 free(tpool);
110                 return 0;
111         }
112
113         for(i=0; i<num_threads; i++) {
114                 tpool->tdata[i].id = i;
115                 tpool->tdata[i].pool = tpool;
116
117                 if(pthread_create(tpool->threads + i, 0, thread_func, tpool->tdata + i) == -1) {
118                         /*tpool->threads[i] = 0;*/
119                         tpool_destroy(tpool);
120                         return 0;
121                 }
122         }
123         return tpool;
124 }
125
126 void tpool_destroy(struct thread_pool *tpool)
127 {
128         int i;
129         if(!tpool) return;
130
131         tpool_clear(tpool);
132         tpool->should_quit = 1;
133
134         pthread_cond_broadcast(&tpool->workq_condvar);
135
136         if(tpool->threads) {
137                 for(i=0; i<tpool->num_threads; i++) {
138                         pthread_join(tpool->threads[i], 0);
139                 }
140                 putchar('\n');
141                 free(tpool->threads);
142         }
143         free(tpool->tdata);
144
145         /* also wake up anyone waiting on the wait* calls */
146         tpool->nactive = 0;
147         pthread_cond_broadcast(&tpool->done_condvar);
148         send_done_event(tpool);
149
150         pthread_mutex_destroy(&tpool->workq_mutex);
151         pthread_cond_destroy(&tpool->workq_condvar);
152         pthread_cond_destroy(&tpool->done_condvar);
153         pthread_key_delete(tpool->idkey);
154
155 #if defined(WIN32) || defined(__WIN32__)
156         if(tpool->wait_event) {
157                 CloseHandle(tpool->wait_event);
158         }
159 #else
160         if(tpool->wait_pipe[0] >= 0) {
161                 close(tpool->wait_pipe[0]);
162                 close(tpool->wait_pipe[1]);
163         }
164 #endif
165 }
166
167 int tpool_addref(struct thread_pool *tpool)
168 {
169         return ++tpool->nref;
170 }
171
172 int tpool_release(struct thread_pool *tpool)
173 {
174         if(--tpool->nref <= 0) {
175                 tpool_destroy(tpool);
176                 return 0;
177         }
178         return tpool->nref;
179 }
180
181 void tpool_begin_batch(struct thread_pool *tpool)
182 {
183         tpool->in_batch = 1;
184 }
185
186 void tpool_end_batch(struct thread_pool *tpool)
187 {
188         tpool->in_batch = 0;
189         pthread_cond_broadcast(&tpool->workq_condvar);
190 }
191
192 int tpool_enqueue(struct thread_pool *tpool, void *data,
193                 tpool_callback work_func, tpool_callback done_func)
194 {
195         struct work_item *job;
196
197         if(!(job = alloc_work_item())) {
198                 return -1;
199         }
200         job->work = work_func;
201         job->done = done_func;
202         job->data = data;
203         job->next = 0;
204
205         pthread_mutex_lock(&tpool->workq_mutex);
206         if(tpool->workq) {
207                 tpool->workq_tail->next = job;
208                 tpool->workq_tail = job;
209         } else {
210                 tpool->workq = tpool->workq_tail = job;
211         }
212         ++tpool->qsize;
213         pthread_mutex_unlock(&tpool->workq_mutex);
214
215         if(!tpool->in_batch) {
216                 pthread_cond_broadcast(&tpool->workq_condvar);
217         }
218         return 0;
219 }
220
221 void tpool_clear(struct thread_pool *tpool)
222 {
223         pthread_mutex_lock(&tpool->workq_mutex);
224         while(tpool->workq) {
225                 void *tmp = tpool->workq;
226                 tpool->workq = tpool->workq->next;
227                 free(tmp);
228         }
229         tpool->workq = tpool->workq_tail = 0;
230         tpool->qsize = 0;
231         pthread_mutex_unlock(&tpool->workq_mutex);
232 }
233
234 int tpool_queued_jobs(struct thread_pool *tpool)
235 {
236         int res;
237         pthread_mutex_lock(&tpool->workq_mutex);
238         res = tpool->qsize;
239         pthread_mutex_unlock(&tpool->workq_mutex);
240         return res;
241 }
242
243 int tpool_active_jobs(struct thread_pool *tpool)
244 {
245         int res;
246         pthread_mutex_lock(&tpool->workq_mutex);
247         res = tpool->nactive;
248         pthread_mutex_unlock(&tpool->workq_mutex);
249         return res;
250 }
251
252 int tpool_pending_jobs(struct thread_pool *tpool)
253 {
254         int res;
255         pthread_mutex_lock(&tpool->workq_mutex);
256         res = tpool->qsize + tpool->nactive;
257         pthread_mutex_unlock(&tpool->workq_mutex);
258         return res;
259 }
260
261 void tpool_wait(struct thread_pool *tpool)
262 {
263         pthread_mutex_lock(&tpool->workq_mutex);
264         while(tpool->nactive || tpool->qsize) {
265                 pthread_cond_wait(&tpool->done_condvar, &tpool->workq_mutex);
266         }
267         pthread_mutex_unlock(&tpool->workq_mutex);
268 }
269
270 void tpool_wait_pending(struct thread_pool *tpool, int pending_target)
271 {
272         pthread_mutex_lock(&tpool->workq_mutex);
273         while(tpool->qsize + tpool->nactive > pending_target) {
274                 pthread_cond_wait(&tpool->done_condvar, &tpool->workq_mutex);
275         }
276         pthread_mutex_unlock(&tpool->workq_mutex);
277 }
278
279 #if defined(WIN32) || defined(__WIN32__)
280 long tpool_timedwait(struct thread_pool *tpool, long timeout)
281 {
282         fprintf(stderr, "tpool_timedwait currently unimplemented on windows\n");
283         abort();
284         return 0;
285 }
286
287 /* TODO: actually does this work with MinGW ? */
288 int tpool_get_wait_fd(struct thread_pool *tpool)
289 {
290         static int once;
291         if(!once) {
292                 once = 1;
293                 fprintf(stderr, "warning: tpool_get_wait_fd call on Windows does nothing\n");
294         }
295         return 0;
296 }
297
298 void *tpool_get_wait_handle(struct thread_pool *tpool)
299 {
300         if(!tpool->wait_event) {
301                 if(!(tpool->wait_event = CreateEvent(0, 0, 0, 0))) {
302                         return 0;
303                 }
304         }
305         return tpool->wait_event;
306 }
307
308 static void send_done_event(struct thread_pool *tpool)
309 {
310         if(tpool->wait_event) {
311                 SetEvent(tpool->wait_event);
312         }
313 }
314
315 #else           /* UNIX */
316
317 long tpool_timedwait(struct thread_pool *tpool, long timeout)
318 {
319         struct timespec tout_ts;
320         struct timeval tv0, tv;
321         gettimeofday(&tv0, 0);
322
323         long sec = timeout / 1000;
324         tout_ts.tv_nsec = tv0.tv_usec * 1000 + (timeout % 1000) * 1000000;
325         tout_ts.tv_sec = tv0.tv_sec + sec;
326
327         pthread_mutex_lock(&tpool->workq_mutex);
328         while(tpool->nactive || tpool->qsize) {
329                 if(pthread_cond_timedwait(&tpool->done_condvar,
330                                         &tpool->workq_mutex, &tout_ts) == ETIMEDOUT) {
331                         break;
332                 }
333         }
334         pthread_mutex_unlock(&tpool->workq_mutex);
335
336         gettimeofday(&tv, 0);
337         return (tv.tv_sec - tv0.tv_sec) * 1000 + (tv.tv_usec - tv0.tv_usec) / 1000;
338 }
339
340 int tpool_get_wait_fd(struct thread_pool *tpool)
341 {
342         if(tpool->wait_pipe[0] < 0) {
343                 if(pipe(tpool->wait_pipe) == -1) {
344                         return -1;
345                 }
346         }
347         return tpool->wait_pipe[0];
348 }
349
350 void *tpool_get_wait_handle(struct thread_pool *tpool)
351 {
352         static int once;
353         if(!once) {
354                 once = 1;
355                 fprintf(stderr, "warning: tpool_get_wait_handle call on UNIX does nothing\n");
356         }
357         return 0;
358 }
359
360 static void send_done_event(struct thread_pool *tpool)
361 {
362         if(tpool->wait_pipe[1] >= 0) {
363                 write(tpool->wait_pipe[1], tpool, 1);
364         }
365 }
366 #endif  /* WIN32/UNIX */
367
368 static void *thread_func(void *args)
369 {
370         struct thread_data *tdata = args;
371         struct thread_pool *tpool = tdata->pool;
372
373         pthread_setspecific(tpool->idkey, (void*)(intptr_t)tdata->id);
374
375         pthread_mutex_lock(&tpool->workq_mutex);
376         while(!tpool->should_quit) {
377                 if(!tpool->workq) {
378                         pthread_cond_wait(&tpool->workq_condvar, &tpool->workq_mutex);
379                         if(tpool->should_quit) break;
380                 }
381
382                 while(!tpool->should_quit && tpool->workq) {
383                         /* grab the first job */
384                         struct work_item *job = tpool->workq;
385                         tpool->workq = tpool->workq->next;
386                         if(!tpool->workq)
387                                 tpool->workq_tail = 0;
388                         ++tpool->nactive;
389                         --tpool->qsize;
390                         pthread_mutex_unlock(&tpool->workq_mutex);
391
392                         /* do the job */
393                         job->work(job->data);
394                         if(job->done) {
395                                 job->done(job->data);
396                         }
397                         free_work_item(job);
398
399                         pthread_mutex_lock(&tpool->workq_mutex);
400                         /* notify everyone interested that we're done with this job */
401                         pthread_cond_broadcast(&tpool->done_condvar);
402                         send_done_event(tpool);
403                         --tpool->nactive;
404                 }
405         }
406         pthread_mutex_unlock(&tpool->workq_mutex);
407
408         return 0;
409 }
410
411
412 int tpool_thread_id(struct thread_pool *tpool)
413 {
414         int id = (intptr_t)pthread_getspecific(tpool->idkey);
415         if(id >= tpool->num_threads) {
416                 return -1;
417         }
418         return id;
419 }
420
421
422 /* The following highly platform-specific code detects the number
423  * of processors available in the system. It's used by the thread pool
424  * to autodetect how many threads to spawn.
425  * Currently works on: Linux, BSD, Darwin, and Windows.
426  */
427 int tpool_num_processors(void)
428 {
429 #if defined(unix) || defined(__unix__)
430 # if defined(__bsd__)
431         /* BSD systems provide the num.processors through sysctl */
432         int num, mib[] = {CTL_HW, HW_NCPU};
433         size_t len = sizeof num;
434
435         sysctl(mib, 2, &num, &len, 0, 0);
436         return num;
437
438 # elif defined(__sgi)
439         /* SGI IRIX flavour of the _SC_NPROC_ONLN sysconf */
440         return sysconf(_SC_NPROC_ONLN);
441 # else
442         /* Linux (and others?) have the _SC_NPROCESSORS_ONLN sysconf */
443         return sysconf(_SC_NPROCESSORS_ONLN);
444 # endif /* bsd/sgi/other */
445
446 #elif defined(WIN32) || defined(__WIN32__)
447         /* under windows we need to call GetSystemInfo */
448         SYSTEM_INFO info;
449         GetSystemInfo(&info);
450         return info.dwNumberOfProcessors;
451 #endif
452 }
453
454 #define MAX_WPOOL_SIZE  64
455 static pthread_mutex_t wpool_lock = PTHREAD_MUTEX_INITIALIZER;
456 static struct work_item *wpool;
457 static int wpool_size;
458
459 /* work item allocator */
460 static struct work_item *alloc_work_item(void)
461 {
462         struct work_item *w;
463
464         pthread_mutex_lock(&wpool_lock);
465         if(!wpool) {
466                 pthread_mutex_unlock(&wpool_lock);
467                 return malloc(sizeof(struct work_item));
468         }
469
470         w = wpool;
471         wpool = wpool->next;
472         --wpool_size;
473         pthread_mutex_unlock(&wpool_lock);
474         return w;
475 }
476
477 static void free_work_item(struct work_item *w)
478 {
479         pthread_mutex_lock(&wpool_lock);
480         if(wpool_size >= MAX_WPOOL_SIZE) {
481                 free(w);
482         } else {
483                 w->next = wpool;
484                 wpool = w;
485                 ++wpool_size;
486         }
487         pthread_mutex_unlock(&wpool_lock);
488 }