1 /* worker thread pool based on POSIX threads
2 * author: John Tsiombikas <nuclear@member.fsf.org>
3 * This code is public domain.
11 #if defined(__APPLE__) && defined(__MACH__)
20 #if defined(unix) || defined(__unix__)
25 # include <sys/sysctl.h>
29 #if defined(WIN32) || defined(__WIN32__)
36 tpool_callback work, done;
37 struct work_item *next;
42 struct thread_pool *pool;
47 struct thread_data *tdata;
52 struct work_item *workq, *workq_tail;
53 pthread_mutex_t workq_mutex;
54 pthread_cond_t workq_condvar;
56 int nactive; /* number of active workers (not sleeping) */
58 pthread_cond_t done_condvar;
63 int nref; /* reference count */
65 #if defined(WIN32) || defined(__WIN32__)
72 static void *thread_func(void *args);
73 static void send_done_event(struct thread_pool *tpool);
75 static struct work_item *alloc_work_item(void);
76 static void free_work_item(struct work_item *w);
79 struct thread_pool *ass_tpool_create(int num_threads)
82 struct thread_pool *tpool;
84 if(!(tpool = calloc(1, sizeof *tpool))) {
87 pthread_mutex_init(&tpool->workq_mutex, 0);
88 pthread_cond_init(&tpool->workq_condvar, 0);
89 pthread_cond_init(&tpool->done_condvar, 0);
90 pthread_key_create(&tpool->idkey, 0);
92 pthread_setspecific(tpool->idkey, (void*)0xffffffff);
94 #if !defined(WIN32) && !defined(__WIN32__)
95 tpool->wait_pipe[0] = tpool->wait_pipe[1] = -1;
98 if(num_threads <= 0) {
99 num_threads = ass_tpool_num_processors();
101 tpool->num_threads = num_threads;
103 if(!(tpool->threads = calloc(num_threads, sizeof *tpool->threads))) {
107 if(!(tpool->tdata = malloc(num_threads * sizeof *tpool->tdata))) {
108 free(tpool->threads);
113 for(i=0; i<num_threads; i++) {
114 tpool->tdata[i].id = i;
115 tpool->tdata[i].pool = tpool;
117 if(pthread_create(tpool->threads + i, 0, thread_func, tpool->tdata + i) == -1) {
118 /*tpool->threads[i] = 0;*/
119 ass_tpool_destroy(tpool);
126 void ass_tpool_destroy(struct thread_pool *tpool)
131 ass_tpool_clear(tpool);
132 tpool->should_quit = 1;
134 pthread_cond_broadcast(&tpool->workq_condvar);
137 printf("thread_pool: waiting for %d worker threads to stop ", tpool->num_threads);
140 for(i=0; i<tpool->num_threads; i++) {
141 pthread_join(tpool->threads[i], 0);
146 free(tpool->threads);
150 /* also wake up anyone waiting on the wait* calls */
152 pthread_cond_broadcast(&tpool->done_condvar);
153 send_done_event(tpool);
155 pthread_mutex_destroy(&tpool->workq_mutex);
156 pthread_cond_destroy(&tpool->workq_condvar);
157 pthread_cond_destroy(&tpool->done_condvar);
158 pthread_key_delete(tpool->idkey);
160 #if defined(WIN32) || defined(__WIN32__)
161 if(tpool->wait_event) {
162 CloseHandle(tpool->wait_event);
165 if(tpool->wait_pipe[0] >= 0) {
166 close(tpool->wait_pipe[0]);
167 close(tpool->wait_pipe[1]);
172 int ass_tpool_addref(struct thread_pool *tpool)
174 return ++tpool->nref;
177 int ass_tpool_release(struct thread_pool *tpool)
179 if(--tpool->nref <= 0) {
180 ass_tpool_destroy(tpool);
186 void ass_tpool_begin_batch(struct thread_pool *tpool)
191 void ass_tpool_end_batch(struct thread_pool *tpool)
194 pthread_cond_broadcast(&tpool->workq_condvar);
197 int ass_tpool_enqueue(struct thread_pool *tpool, void *data,
198 tpool_callback work_func, tpool_callback done_func)
200 struct work_item *job;
202 if(!(job = alloc_work_item())) {
205 job->work = work_func;
206 job->done = done_func;
210 pthread_mutex_lock(&tpool->workq_mutex);
212 tpool->workq_tail->next = job;
213 tpool->workq_tail = job;
215 tpool->workq = tpool->workq_tail = job;
218 pthread_mutex_unlock(&tpool->workq_mutex);
220 if(!tpool->in_batch) {
221 pthread_cond_broadcast(&tpool->workq_condvar);
226 void ass_tpool_clear(struct thread_pool *tpool)
228 pthread_mutex_lock(&tpool->workq_mutex);
229 while(tpool->workq) {
230 void *tmp = tpool->workq;
231 tpool->workq = tpool->workq->next;
234 tpool->workq = tpool->workq_tail = 0;
236 pthread_mutex_unlock(&tpool->workq_mutex);
239 int ass_tpool_queued_jobs(struct thread_pool *tpool)
242 pthread_mutex_lock(&tpool->workq_mutex);
244 pthread_mutex_unlock(&tpool->workq_mutex);
248 int ass_tpool_active_jobs(struct thread_pool *tpool)
251 pthread_mutex_lock(&tpool->workq_mutex);
252 res = tpool->nactive;
253 pthread_mutex_unlock(&tpool->workq_mutex);
257 int ass_tpool_pending_jobs(struct thread_pool *tpool)
260 pthread_mutex_lock(&tpool->workq_mutex);
261 res = tpool->qsize + tpool->nactive;
262 pthread_mutex_unlock(&tpool->workq_mutex);
266 void ass_tpool_wait(struct thread_pool *tpool)
268 pthread_mutex_lock(&tpool->workq_mutex);
269 while(tpool->nactive || tpool->qsize) {
270 pthread_cond_wait(&tpool->done_condvar, &tpool->workq_mutex);
272 pthread_mutex_unlock(&tpool->workq_mutex);
275 void ass_tpool_wait_pending(struct thread_pool *tpool, int pending_target)
277 pthread_mutex_lock(&tpool->workq_mutex);
278 while(tpool->qsize + tpool->nactive > pending_target) {
279 pthread_cond_wait(&tpool->done_condvar, &tpool->workq_mutex);
281 pthread_mutex_unlock(&tpool->workq_mutex);
284 #if defined(WIN32) || defined(__WIN32__)
285 long ass_tpool_timedwait(struct thread_pool *tpool, long timeout)
287 fprintf(stderr, "tpool_timedwait currently unimplemented on windows\n");
292 /* TODO: actually does this work with MinGW ? */
293 int ass_tpool_get_wait_fd(struct thread_pool *tpool)
298 fprintf(stderr, "warning: ass_tpool_get_wait_fd call on Windows does nothing\n");
303 void *ass_tpool_get_wait_handle(struct thread_pool *tpool)
305 if(!tpool->wait_event) {
306 if(!(tpool->wait_event = CreateEvent(0, 0, 0, 0))) {
310 return tpool->wait_event;
313 static void send_done_event(struct thread_pool *tpool)
315 if(tpool->wait_event) {
316 SetEvent(tpool->wait_event);
322 long ass_tpool_timedwait(struct thread_pool *tpool, long timeout)
324 struct timespec tout_ts;
325 struct timeval tv0, tv;
326 gettimeofday(&tv0, 0);
328 long sec = timeout / 1000;
329 tout_ts.tv_nsec = tv0.tv_usec * 1000 + (timeout % 1000) * 1000000;
330 tout_ts.tv_sec = tv0.tv_sec + sec;
332 pthread_mutex_lock(&tpool->workq_mutex);
333 while(tpool->nactive || tpool->qsize) {
334 if(pthread_cond_timedwait(&tpool->done_condvar,
335 &tpool->workq_mutex, &tout_ts) == ETIMEDOUT) {
339 pthread_mutex_unlock(&tpool->workq_mutex);
341 gettimeofday(&tv, 0);
342 return (tv.tv_sec - tv0.tv_sec) * 1000 + (tv.tv_usec - tv0.tv_usec) / 1000;
345 int ass_tpool_get_wait_fd(struct thread_pool *tpool)
347 if(tpool->wait_pipe[0] < 0) {
348 if(pipe(tpool->wait_pipe) == -1) {
352 return tpool->wait_pipe[0];
355 void *ass_tpool_get_wait_handle(struct thread_pool *tpool)
360 fprintf(stderr, "warning: ass_tpool_get_wait_handle call on UNIX does nothing\n");
365 static void send_done_event(struct thread_pool *tpool)
367 if(tpool->wait_pipe[1] >= 0) {
368 write(tpool->wait_pipe[1], tpool, 1);
371 #endif /* WIN32/UNIX */
373 static void *thread_func(void *args)
375 struct thread_data *tdata = args;
376 struct thread_pool *tpool = tdata->pool;
378 pthread_setspecific(tpool->idkey, (void*)(intptr_t)tdata->id);
380 pthread_mutex_lock(&tpool->workq_mutex);
381 while(!tpool->should_quit) {
383 pthread_cond_wait(&tpool->workq_condvar, &tpool->workq_mutex);
384 if(tpool->should_quit) break;
387 while(!tpool->should_quit && tpool->workq) {
388 /* grab the first job */
389 struct work_item *job = tpool->workq;
390 tpool->workq = tpool->workq->next;
392 tpool->workq_tail = 0;
395 pthread_mutex_unlock(&tpool->workq_mutex);
398 job->work(job->data);
400 job->done(job->data);
404 pthread_mutex_lock(&tpool->workq_mutex);
405 /* notify everyone interested that we're done with this job */
406 pthread_cond_broadcast(&tpool->done_condvar);
407 send_done_event(tpool);
411 pthread_mutex_unlock(&tpool->workq_mutex);
417 int ass_tpool_thread_id(struct thread_pool *tpool)
419 int id = (intptr_t)pthread_getspecific(tpool->idkey);
420 if(id >= tpool->num_threads) {
427 /* The following highly platform-specific code detects the number
428 * of processors available in the system. It's used by the thread pool
429 * to autodetect how many threads to spawn.
430 * Currently works on: Linux, BSD, Darwin, and Windows.
432 int ass_tpool_num_processors(void)
434 #if defined(unix) || defined(__unix__)
435 # if defined(__bsd__)
436 /* BSD systems provide the num.processors through sysctl */
437 int num, mib[] = {CTL_HW, HW_NCPU};
438 size_t len = sizeof num;
440 sysctl(mib, 2, &num, &len, 0, 0);
443 # elif defined(__sgi)
444 /* SGI IRIX flavour of the _SC_NPROC_ONLN sysconf */
445 return sysconf(_SC_NPROC_ONLN);
447 /* Linux (and others?) have the _SC_NPROCESSORS_ONLN sysconf */
448 return sysconf(_SC_NPROCESSORS_ONLN);
449 # endif /* bsd/sgi/other */
451 #elif defined(WIN32) || defined(__WIN32__)
452 /* under windows we need to call GetSystemInfo */
454 GetSystemInfo(&info);
455 return info.dwNumberOfProcessors;
459 #define MAX_WPOOL_SIZE 64
460 static pthread_mutex_t wpool_lock = PTHREAD_MUTEX_INITIALIZER;
461 static struct work_item *wpool;
462 static int wpool_size;
464 /* work item allocator */
465 static struct work_item *alloc_work_item(void)
469 pthread_mutex_lock(&wpool_lock);
471 pthread_mutex_unlock(&wpool_lock);
472 return malloc(sizeof(struct work_item));
478 pthread_mutex_unlock(&wpool_lock);
482 static void free_work_item(struct work_item *w)
484 pthread_mutex_lock(&wpool_lock);
485 if(wpool_size >= MAX_WPOOL_SIZE) {
492 pthread_mutex_unlock(&wpool_lock);