File: | tests/suite/ecore/src/lib/ecore_thread.c |
Location: | line 972, column 26 |
Description: | Value stored to 'worker' during its initialization is never read |
1 | #ifdef HAVE_CONFIG_H1 |
2 | # include <config.h> |
3 | #endif |
4 | |
5 | #ifdef HAVE_EVIL |
6 | # include <Evil.h> |
7 | #endif |
8 | |
9 | #ifdef EFL_HAVE_PTHREAD |
10 | # include <pthread.h> |
11 | # ifdef __linux__1 |
12 | # ifndef _GNU_SOURCE1 |
13 | # define _GNU_SOURCE1 1 |
14 | # endif |
15 | # include <sched.h> |
16 | # include <sys/time.h> |
17 | # include <sys/resource.h> |
18 | # include <unistd.h> |
19 | # include <sys/syscall.h> |
20 | # include <errno.h> |
21 | # endif |
22 | #endif |
23 | |
24 | #include "Ecore.h" |
25 | #include "ecore_private.h" |
26 | |
27 | typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker; |
28 | typedef struct _Ecore_Pthread Ecore_Pthread; |
29 | typedef struct _Ecore_Thread_Data Ecore_Thread_Data; |
30 | |
31 | struct _Ecore_Thread_Data |
32 | { |
33 | void *data; |
34 | Eina_Free_Cb cb; |
35 | }; |
36 | |
37 | struct _Ecore_Pthread_Worker |
38 | { |
39 | union { |
40 | struct { |
41 | Ecore_Cb func_blocking; |
42 | } short_run; |
43 | struct { |
44 | Ecore_Thread_Heavy_Cb func_heavy; |
45 | Ecore_Thread_Notify_Cb func_notify; |
46 | Ecore_Pipe *notify; |
47 | } feedback_run; |
48 | } u; |
49 | |
50 | Ecore_Cb func_cancel; |
51 | Ecore_Cb func_end; |
52 | #ifdef EFL_HAVE_PTHREAD |
53 | pthread_t self; |
54 | Eina_Hash *hash; |
55 | pthread_cond_t cond; |
56 | pthread_mutex_t mutex; |
57 | #endif |
58 | |
59 | const void *data; |
60 | |
61 | Eina_Bool cancel : 1; |
62 | Eina_Bool feedback_run : 1; |
63 | }; |
64 | |
65 | #ifdef EFL_HAVE_PTHREAD |
66 | typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data; |
67 | |
68 | struct _Ecore_Pthread_Data |
69 | { |
70 | Ecore_Pipe *p; |
71 | void *data; |
72 | pthread_t thread; |
73 | }; |
74 | #endif |
75 | |
76 | static int _ecore_thread_count_max = 0; |
77 | static int ECORE_THREAD_PIPE_DEL = 0; |
78 | |
79 | #ifdef EFL_HAVE_PTHREAD |
80 | static int _ecore_thread_count = 0; |
81 | |
82 | static Eina_List *_ecore_active_job_threads = NULL((void*)0); |
83 | static Eina_List *_ecore_pending_job_threads = NULL((void*)0); |
84 | static Eina_List *_ecore_pending_job_threads_feedback = NULL((void*)0); |
85 | static Ecore_Event_Handler *del_handler = NULL((void*)0); |
86 | static pthread_mutex_t _ecore_pending_job_threads_mutex = PTHREAD_MUTEX_INITIALIZER; |
87 | |
88 | static Eina_Hash *_ecore_thread_global_hash = NULL((void*)0); |
89 | static pthread_rwlock_t _ecore_thread_global_hash_lock = PTHREAD_RWLOCK_INITIALIZER; |
90 | static pthread_mutex_t _ecore_thread_global_hash_mutex = PTHREAD_MUTEX_INITIALIZER; |
91 | static pthread_cond_t _ecore_thread_global_hash_cond = PTHREAD_COND_INITIALIZER; |
92 | static pthread_t main_loop_thread; |
93 | static Eina_Bool have_main_loop_thread = 0; |
94 | static void |
95 | _ecore_thread_data_free(void *data) |
96 | { |
97 | Ecore_Thread_Data *d = data; |
98 | |
99 | if (d->cb) d->cb(d->data); |
100 | free(d); |
101 | } |
102 | |
103 | static void |
104 | _ecore_thread_pipe_free(void *data __UNUSED__, void *event) |
105 | { |
106 | Ecore_Pipe *p = event; |
107 | |
108 | ecore_pipe_del(p); |
109 | } |
110 | |
111 | static Eina_Bool |
112 | _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__) |
113 | { |
114 | /* This is a hack to delay pipe destruction until we are out of its internal loop. */ |
115 | return ECORE_CALLBACK_CANCEL((Eina_Bool)0); |
116 | } |
117 | |
118 | static void |
119 | _ecore_thread_end(Ecore_Pthread_Data *pth) |
120 | { |
121 | Ecore_Pipe *p; |
122 | |
123 | if (pthread_join(pth->thread, (void **) &p) != 0) |
124 | return ; |
125 | |
126 | _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth); |
127 | |
128 | ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL((void*)0)); |
129 | free(pth); |
130 | } |
131 | |
132 | static void |
133 | _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte) |
134 | { |
135 | Ecore_Pthread_Worker *work; |
136 | |
137 | if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ; |
138 | |
139 | work = *(Ecore_Pthread_Worker **)buffer; |
140 | |
141 | if (work->cancel) |
142 | { |
143 | if (work->func_cancel) |
144 | work->func_cancel((void *) work->data); |
145 | } |
146 | else |
147 | { |
148 | if (work->func_end) |
149 | work->func_end((void *) work->data); |
150 | } |
151 | |
152 | if (work->feedback_run) |
153 | ecore_pipe_del(work->u.feedback_run.notify); |
154 | pthread_cond_destroy(&work->cond); |
155 | pthread_mutex_destroy(&work->mutex); |
156 | if (work->hash) |
157 | eina_hash_free(work->hash); |
158 | free(work); |
159 | } |
160 | |
161 | static void |
162 | _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte) |
163 | { |
164 | Ecore_Pthread_Worker *work = data; |
165 | void *user_data; |
166 | |
167 | if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ; |
168 | |
169 | user_data = *(void **)buffer; |
170 | |
171 | if (work->u.feedback_run.func_notify) |
172 | work->u.feedback_run.func_notify((Ecore_Thread *) work, user_data, (void *) work->data); |
173 | } |
174 | |
175 | static void |
176 | _ecore_short_job(Ecore_Pipe *end_pipe) |
177 | { |
178 | Ecore_Pthread_Worker *work; |
179 | |
180 | while (_ecore_pending_job_threads) |
181 | { |
182 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
183 | |
184 | if (!_ecore_pending_job_threads) |
185 | { |
186 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
187 | break; |
188 | } |
189 | |
190 | work = eina_list_data_get(_ecore_pending_job_threads); |
191 | _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads); |
192 | |
193 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
194 | |
195 | if (!work->cancel) |
196 | work->u.short_run.func_blocking((void *) work->data); |
197 | |
198 | ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *)); |
199 | } |
200 | } |
201 | |
202 | static void |
203 | _ecore_feedback_job(Ecore_Pipe *end_pipe, pthread_t thread) |
204 | { |
205 | Ecore_Pthread_Worker *work; |
206 | |
207 | while (_ecore_pending_job_threads_feedback) |
208 | { |
209 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
210 | |
211 | if (!_ecore_pending_job_threads_feedback) |
212 | { |
213 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
214 | break; |
215 | } |
216 | |
217 | work = eina_list_data_get(_ecore_pending_job_threads_feedback); |
218 | _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, _ecore_pending_job_threads_feedback); |
219 | |
220 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
221 | |
222 | work->self = thread; |
223 | if (!work->cancel) |
224 | work->u.feedback_run.func_heavy((Ecore_Thread *) work, (void *) work->data); |
225 | |
226 | ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *)); |
227 | } |
228 | } |
229 | |
230 | static void * |
231 | _ecore_direct_worker(Ecore_Pthread_Worker *work) |
232 | { |
233 | Ecore_Pthread_Data *pth; |
234 | |
235 | pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL((void*)0)); |
236 | pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL((void*)0)); |
237 | eina_sched_prio_drop(); |
238 | |
239 | pth = malloc(sizeof (Ecore_Pthread_Data)); |
240 | if (!pth) return NULL((void*)0); |
241 | |
242 | pth->p = ecore_pipe_add(_ecore_thread_handler, NULL((void*)0)); |
243 | if (!pth->p) |
244 | { |
245 | free(pth); |
246 | return NULL((void*)0); |
247 | } |
248 | pth->thread = pthread_self(); |
249 | |
250 | work->self = pth->thread; |
251 | work->u.feedback_run.func_heavy((Ecore_Thread *) work, (void *) work->data); |
252 | |
253 | ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *)); |
254 | |
255 | work = malloc(sizeof (Ecore_Pthread_Worker)); |
256 | if (!work) |
257 | { |
258 | ecore_pipe_del(pth->p); |
259 | free(pth); |
260 | return NULL((void*)0); |
261 | } |
262 | |
263 | work->data = pth; |
264 | work->u.short_run.func_blocking = NULL((void*)0); |
265 | work->func_end = (void *) _ecore_thread_end; |
266 | work->func_cancel = NULL((void*)0); |
267 | work->cancel = EINA_FALSE((Eina_Bool)0); |
268 | work->feedback_run = EINA_FALSE((Eina_Bool)0); |
269 | work->hash = NULL((void*)0); |
270 | pthread_cond_init(&work->cond, NULL((void*)0)); |
271 | pthread_mutex_init(&work->mutex, NULL((void*)0)); |
272 | |
273 | ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *)); |
274 | |
275 | return pth->p; |
276 | } |
277 | |
278 | static void * |
279 | _ecore_thread_worker(Ecore_Pthread_Data *pth) |
280 | { |
281 | Ecore_Pthread_Worker *work; |
282 | |
283 | pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL((void*)0)); |
284 | pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL((void*)0)); |
285 | eina_sched_prio_drop(); |
286 | |
287 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
288 | _ecore_thread_count++; |
289 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
290 | |
291 | on_error: |
292 | if (_ecore_pending_job_threads) _ecore_short_job(pth->p); |
293 | if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread); |
294 | |
295 | /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */ |
296 | |
297 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
298 | if (_ecore_pending_job_threads) |
299 | { |
300 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
301 | goto on_error; |
302 | } |
303 | if (_ecore_pending_job_threads_feedback) |
304 | { |
305 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
306 | goto on_error; |
307 | } |
308 | |
309 | _ecore_thread_count--; |
310 | |
311 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
312 | |
313 | work = malloc(sizeof (Ecore_Pthread_Worker)); |
314 | if (!work) return NULL((void*)0); |
315 | |
316 | work->data = pth; |
317 | work->u.short_run.func_blocking = NULL((void*)0); |
318 | work->func_end = (void *) _ecore_thread_end; |
319 | work->func_cancel = NULL((void*)0); |
320 | work->cancel = EINA_FALSE((Eina_Bool)0); |
321 | work->feedback_run = EINA_FALSE((Eina_Bool)0); |
322 | work->hash = NULL((void*)0); |
323 | pthread_cond_init(&work->cond, NULL((void*)0)); |
324 | pthread_mutex_init(&work->mutex, NULL((void*)0)); |
325 | |
326 | ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *)); |
327 | |
328 | return pth->p; |
329 | } |
330 | |
331 | #endif |
332 | |
333 | void |
334 | _ecore_thread_init(void) |
335 | { |
336 | _ecore_thread_count_max = eina_cpu_count(); |
337 | if (_ecore_thread_count_max <= 0) |
338 | _ecore_thread_count_max = 1; |
339 | |
340 | ECORE_THREAD_PIPE_DEL = ecore_event_type_new(); |
341 | #ifdef EFL_HAVE_PTHREAD |
342 | del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL((void*)0)); |
343 | main_loop_thread = pthread_self(); |
344 | have_main_loop_thread = 1; |
345 | #endif |
346 | } |
347 | |
348 | void |
349 | _ecore_thread_shutdown(void) |
350 | { |
351 | /* FIXME: If function are still running in the background, should we kill them ? */ |
352 | #ifdef EFL_HAVE_PTHREAD |
353 | Ecore_Pthread_Worker *work; |
354 | Ecore_Pthread_Data *pth; |
355 | |
356 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
357 | |
358 | EINA_LIST_FREE(_ecore_pending_job_threads, work)for (work = eina_list_data_get(_ecore_pending_job_threads); _ecore_pending_job_threads ; _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads , _ecore_pending_job_threads), work = eina_list_data_get(_ecore_pending_job_threads )) |
359 | { |
360 | if (work->func_cancel) |
361 | work->func_cancel((void *)work->data); |
362 | free(work); |
363 | } |
364 | |
365 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
366 | |
367 | EINA_LIST_FREE(_ecore_active_job_threads, pth)for (pth = eina_list_data_get(_ecore_active_job_threads); _ecore_active_job_threads ; _ecore_active_job_threads = eina_list_remove_list(_ecore_active_job_threads , _ecore_active_job_threads), pth = eina_list_data_get(_ecore_active_job_threads )) |
368 | { |
369 | Ecore_Pipe *p; |
370 | |
371 | pthread_cancel(pth->thread); |
372 | pthread_join(pth->thread, (void **) &p); |
373 | |
374 | ecore_pipe_del(pth->p); |
375 | } |
376 | if (_ecore_thread_global_hash) |
377 | eina_hash_free(_ecore_thread_global_hash); |
378 | ecore_event_handler_del(del_handler); |
379 | have_main_loop_thread = 0; |
380 | del_handler = NULL((void*)0); |
381 | #endif |
382 | } |
383 | /** |
384 | * @addtogroup Ecore_Thread Ecore Thread Functions |
385 | * These functions allow for ecore-managed threads which integrate with ecore's main loop. |
386 | * @{ |
387 | */ |
388 | |
389 | /** |
390 | * @brief Run some blocking code in a parallel thread to avoid locking the main loop. |
391 | * @param func_blocking The function that should run in another thread. |
392 | * @param func_end The function that will be called in the main loop if the thread terminate correctly. |
393 | * @param func_cancel The function that will be called in the main loop if the thread is cancelled. |
394 | * @param data User context data to pass to all callback. |
395 | * @return A reference to the newly created thread instance, or NULL if it failed. |
396 | * |
397 | * ecore_thread_run provide a facility for easily managing blocking task in a |
398 | * parallel thread. You should provide three function. The first one, func_blocking, |
399 | * that will do the blocking work in another thread (so you should not use the |
400 | * EFL in it except Eina if you are careful). The second one, func_end, |
401 | * that will be called in Ecore main loop when func_blocking is done. So you |
402 | * can use all the EFL inside this function. The last one, func_cancel, will |
403 | * be called in the main loop if the thread is cancelled or could not run at all. |
404 | * |
405 | * Be aware, that you can't make assumption on the result order of func_end |
406 | * after many call to ecore_thread_run, as we start as much thread as the |
407 | * host CPU can handle. |
408 | */ |
409 | EAPI__attribute__ ((visibility("default"))) Ecore_Thread * |
410 | ecore_thread_run(Ecore_Cb func_blocking, |
411 | Ecore_Cb func_end, |
412 | Ecore_Cb func_cancel, |
413 | const void *data) |
414 | { |
415 | #ifdef EFL_HAVE_PTHREAD |
416 | Ecore_Pthread_Worker *work; |
417 | Ecore_Pthread_Data *pth = NULL((void*)0); |
418 | |
419 | if (!func_blocking) return NULL((void*)0); |
420 | |
421 | work = malloc(sizeof (Ecore_Pthread_Worker)); |
422 | if (!work) |
423 | { |
424 | func_cancel((void *) data); |
425 | return NULL((void*)0); |
426 | } |
427 | |
428 | work->u.short_run.func_blocking = func_blocking; |
429 | work->hash = NULL((void*)0); |
430 | pthread_cond_init(&work->cond, NULL((void*)0)); |
431 | pthread_mutex_init(&work->mutex, NULL((void*)0)); |
432 | work->func_end = func_end; |
433 | work->func_cancel = func_cancel; |
434 | work->cancel = EINA_FALSE((Eina_Bool)0); |
435 | work->feedback_run = EINA_FALSE((Eina_Bool)0); |
436 | work->data = data; |
437 | |
438 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
439 | _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work); |
440 | |
441 | if (_ecore_thread_count == _ecore_thread_count_max) |
442 | { |
443 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
444 | return (Ecore_Thread *) work; |
445 | } |
446 | |
447 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
448 | |
449 | /* One more thread could be created. */ |
450 | pth = malloc(sizeof (Ecore_Pthread_Data)); |
451 | if (!pth) goto on_error; |
452 | |
453 | pth->p = ecore_pipe_add(_ecore_thread_handler, NULL((void*)0)); |
454 | if (!pth->p) goto on_error; |
455 | |
456 | if (pthread_create(&pth->thread, NULL((void*)0), (void *) _ecore_thread_worker, pth) == 0) |
457 | return (Ecore_Thread *) work; |
458 | |
459 | on_error: |
460 | if (pth) |
461 | { |
462 | if (pth->p) ecore_pipe_del(pth->p); |
463 | free(pth); |
464 | } |
465 | |
466 | if (_ecore_thread_count == 0) |
467 | { |
468 | if (work->func_cancel) |
469 | work->func_cancel((void *) work->data); |
470 | free(work); |
471 | work = NULL((void*)0); |
472 | } |
473 | return (Ecore_Thread *) work; |
474 | #else |
475 | /* |
476 | If no thread and as we don't want to break app that rely on this |
477 | facility, we will lock the interface until we are done. |
478 | */ |
479 | func_blocking((void *)data); |
480 | func_end((void *)data); |
481 | |
482 | return NULL((void*)0); |
483 | #endif |
484 | } |
485 | |
486 | /** |
487 | * @brief Cancel a running thread. |
488 | * @param thread The thread to cancel. |
489 | * @return Will return EINA_TRUE if the thread has been cancelled, |
490 | * EINA_FALSE if it is pending. |
491 | * |
492 | * ecore_thread_cancel give the possibility to cancel a task still running. It |
493 | * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is |
494 | * cancelled after this call. |
495 | * |
496 | * You should use this function only in the main loop. |
497 | * |
498 | * func_end, func_cancel will destroy the handler, so don't use it after. |
499 | * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also. |
500 | */ |
501 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
502 | ecore_thread_cancel(Ecore_Thread *thread) |
503 | { |
504 | #ifdef EFL_HAVE_PTHREAD |
505 | Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread; |
506 | Eina_List *l; |
507 | |
508 | if (!work) |
509 | return EINA_TRUE((Eina_Bool)1); |
510 | |
511 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
512 | |
513 | if ((have_main_loop_thread) && |
514 | (pthread_equal(main_loop_thread, pthread_self()))) |
515 | { |
516 | EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)for (l = _ecore_pending_job_threads, work = eina_list_data_get (l); l; l = eina_list_next(l), work = eina_list_data_get(l)) |
517 | { |
518 | if ((void *) work == (void *) thread) |
519 | { |
520 | _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l); |
521 | |
522 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
523 | |
524 | if (work->func_cancel) |
525 | work->func_cancel((void *) work->data); |
526 | free(work); |
527 | |
528 | return EINA_TRUE((Eina_Bool)1); |
529 | } |
530 | } |
531 | } |
532 | |
533 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
534 | |
535 | /* Delay the destruction */ |
536 | ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE((Eina_Bool)1); |
537 | return EINA_FALSE((Eina_Bool)0); |
538 | #else |
539 | return EINA_TRUE((Eina_Bool)1); |
540 | #endif |
541 | } |
542 | |
543 | /** |
544 | * @brief Tell if a thread was canceled or not. |
545 | * @param thread The thread to test. |
546 | * @return EINA_TRUE if the thread is cancelled, |
547 | * EINA_FALSE if it is not. |
548 | * |
549 | * You can use this function in main loop and in the thread. |
550 | */ |
551 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
552 | ecore_thread_check(Ecore_Thread *thread) |
553 | { |
554 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
555 | |
556 | if (!worker) return EINA_TRUE((Eina_Bool)1); |
557 | return worker->cancel; |
558 | } |
559 | |
560 | /** |
561 | * @brief Run some heavy code in a parallel thread to avoid locking the main loop. |
562 | * @param func_heavy The function that should run in another thread. |
563 | * @param func_notify The function that will receive the data send by func_heavy in the main loop. |
564 | * @param func_end The function that will be called in the main loop if the thread terminate correctly. |
565 | * @param func_cancel The function that will be called in the main loop if the thread is cancelled. |
566 | * @param data User context data to pass to all callback. |
567 | * @param try_no_queue If you wan't to run outside of the thread pool. |
568 | * @return A reference to the newly created thread instance, or NULL if it failed. |
569 | * |
570 | * ecore_thread_feedback_run provide a facility for easily managing heavy task in a |
571 | * parallel thread. You should provide four functions. The first one, func_heavy, |
572 | * that will do the heavy work in another thread (so you should not use the |
573 | * EFL in it except Eina and Eet if you are careful). The second one, func_notify, |
574 | * will receive the data send from the thread function (func_heavy) by ecore_thread_notify |
575 | * in the main loop (and so, can use all the EFL). Tje third, func_end, |
576 | * that will be called in Ecore main loop when func_heavy is done. So you |
577 | * can use all the EFL inside this function. The last one, func_cancel, will |
578 | * be called in the main loop also, if the thread is cancelled or could not run at all. |
579 | * |
580 | * Be aware, that you can't make assumption on the result order of func_end |
581 | * after many call to ecore_feedback_run, as we start as much thread as the |
582 | * host CPU can handle. |
583 | * |
584 | * If you set try_no_queue, it will try to run outside of the thread pool, this can bring |
585 | * the CPU down, so be careful with that. Of course if it can't start a new thread, it will |
586 | * try to use one from the pool. |
587 | */ |
588 | EAPI__attribute__ ((visibility("default"))) Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Heavy_Cb func_heavy, |
589 | Ecore_Thread_Notify_Cb func_notify, |
590 | Ecore_Cb func_end, |
591 | Ecore_Cb func_cancel, |
592 | const void *data, |
593 | Eina_Bool try_no_queue) |
594 | { |
595 | |
596 | #ifdef EFL_HAVE_PTHREAD |
597 | Ecore_Pthread_Worker *worker; |
598 | Ecore_Pthread_Data *pth = NULL((void*)0); |
599 | |
600 | if (!func_heavy) return NULL((void*)0); |
601 | |
602 | worker = malloc(sizeof (Ecore_Pthread_Worker)); |
603 | if (!worker) goto on_error; |
604 | |
605 | worker->u.feedback_run.func_heavy = func_heavy; |
606 | worker->u.feedback_run.func_notify = func_notify; |
607 | worker->hash = NULL((void*)0); |
608 | pthread_cond_init(&worker->cond, NULL((void*)0)); |
609 | pthread_mutex_init(&worker->mutex, NULL((void*)0)); |
610 | worker->func_cancel = func_cancel; |
611 | worker->func_end = func_end; |
612 | worker->data = data; |
613 | worker->cancel = EINA_FALSE((Eina_Bool)0); |
614 | worker->feedback_run = EINA_TRUE((Eina_Bool)1); |
615 | |
616 | worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker); |
617 | |
618 | if (!try_no_queue) |
619 | { |
620 | pthread_t t; |
621 | |
622 | if (pthread_create(&t, NULL((void*)0), (void *) _ecore_direct_worker, worker) == 0) |
623 | return (Ecore_Thread *) worker; |
624 | } |
625 | |
626 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
627 | _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker); |
628 | |
629 | if (_ecore_thread_count == _ecore_thread_count_max) |
630 | { |
631 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
632 | return (Ecore_Thread *) worker; |
633 | } |
634 | |
635 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
636 | |
637 | /* One more thread could be created. */ |
638 | pth = malloc(sizeof (Ecore_Pthread_Data)); |
639 | if (!pth) goto on_error; |
640 | |
641 | pth->p = ecore_pipe_add(_ecore_thread_handler, NULL((void*)0)); |
642 | if (!pth->p) goto on_error; |
643 | |
644 | if (pthread_create(&pth->thread, NULL((void*)0), (void *) _ecore_thread_worker, pth) == 0) |
645 | return (Ecore_Thread *) worker; |
646 | |
647 | on_error: |
648 | if (pth) |
649 | { |
650 | if (pth->p) ecore_pipe_del(pth->p); |
651 | free(pth); |
652 | } |
653 | |
654 | if (_ecore_thread_count == 0) |
655 | { |
656 | if (func_cancel) func_cancel((void *) data); |
657 | |
658 | if (worker) |
659 | { |
660 | ecore_pipe_del(worker->u.feedback_run.notify); |
661 | free(worker); |
662 | worker = NULL((void*)0); |
663 | } |
664 | } |
665 | |
666 | return (Ecore_Thread *) worker; |
667 | #else |
668 | Ecore_Pthread_Worker worker; |
669 | |
670 | (void) try_no_queue; |
671 | |
672 | /* |
673 | If no thread and as we don't want to break app that rely on this |
674 | facility, we will lock the interface until we are done. |
675 | */ |
676 | worker.u.feedback_run.func_heavy = func_heavy; |
677 | worker.u.feedback_run.func_notify = func_notify; |
678 | worker.u.feedback_run.notify = NULL((void*)0); |
679 | worker.func_cancel = func_cancel; |
680 | worker.func_end = func_end; |
681 | worker.data = data; |
682 | worker.cancel = EINA_FALSE((Eina_Bool)0); |
683 | worker.feedback_run = EINA_TRUE((Eina_Bool)1); |
684 | |
685 | func_heavy((Ecore_Thread *) &worker, (void *)data); |
686 | |
687 | if (worker.cancel) func_cancel((void *)data); |
688 | else func_end((void *)data); |
689 | |
690 | return NULL((void*)0); |
691 | #endif |
692 | } |
693 | |
694 | /** |
695 | * @brief Send data to main loop from worker thread. |
696 | * @param thread The current Ecore_Thread context to send data from |
697 | * @param data Data to be transmitted to the main loop |
698 | * @return EINA_TRUE if data was successfully send to main loop, |
699 | * EINA_FALSE if anything goes wrong. |
700 | * |
701 | * After a succesfull call, the data should be considered owned |
702 | * by the main loop. |
703 | * |
704 | * You should use this function only in the func_heavy call. |
705 | */ |
706 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
707 | ecore_thread_feedback(Ecore_Thread *thread, const void *data) |
708 | { |
709 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
710 | |
711 | if (!worker) return EINA_FALSE((Eina_Bool)0); |
712 | if (!worker->feedback_run) return EINA_FALSE((Eina_Bool)0); |
713 | |
714 | #ifdef EFL_HAVE_PTHREAD |
715 | if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE((Eina_Bool)0); |
716 | |
717 | ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *)); |
718 | |
719 | return EINA_TRUE((Eina_Bool)1); |
720 | #else |
721 | worker->u.feedback_run.func_notify(thread, (void*) data, (void*) worker->data); |
722 | |
723 | return EINA_TRUE((Eina_Bool)1); |
724 | #endif |
725 | } |
726 | |
727 | /** |
728 | * @brief Get number of active thread jobs |
729 | * @return Number of active threads running jobs |
730 | * This returns the number of threads currently running jobs through the |
731 | * ecore_thread api. |
732 | */ |
733 | EAPI__attribute__ ((visibility("default"))) int |
734 | ecore_thread_active_get(void) |
735 | { |
736 | #ifdef EFL_HAVE_PTHREAD |
737 | return _ecore_thread_count; |
738 | #else |
739 | return 0; |
740 | #endif |
741 | } |
742 | |
743 | /** |
744 | * @brief Get number of pending (short) thread jobs |
745 | * @return Number of pending threads running "short" jobs |
746 | * This returns the number of threads currently running jobs through the |
747 | * ecore_thread_run api call. |
748 | */ |
749 | EAPI__attribute__ ((visibility("default"))) int |
750 | ecore_thread_pending_get(void) |
751 | { |
752 | int ret; |
753 | #ifdef EFL_HAVE_PTHREAD |
754 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
755 | ret = eina_list_count(_ecore_pending_job_threads); |
756 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
757 | return ret; |
758 | #else |
759 | return 0; |
760 | #endif |
761 | } |
762 | |
763 | /** |
764 | * @brief Get number of pending feedback thread jobs |
765 | * @return Number of pending threads running "feedback" jobs |
766 | * This returns the number of threads currently running jobs through the |
767 | * ecore_thread_feedback_run api call. |
768 | */ |
769 | EAPI__attribute__ ((visibility("default"))) int |
770 | ecore_thread_pending_feedback_get(void) |
771 | { |
772 | int ret; |
773 | #ifdef EFL_HAVE_PTHREAD |
774 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
775 | ret = eina_list_count(_ecore_pending_job_threads_feedback); |
776 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
777 | return ret; |
778 | #else |
779 | return 0; |
780 | #endif |
781 | } |
782 | |
783 | /** |
784 | * @brief Get number of pending thread jobs |
785 | * @return Number of pending threads running jobs |
786 | * This returns the number of threads currently running jobs through the |
787 | * ecore_thread_run and ecore_thread_feedback_run api calls combined. |
788 | */ |
789 | EAPI__attribute__ ((visibility("default"))) int |
790 | ecore_thread_pending_total_get(void) |
791 | { |
792 | int ret; |
793 | #ifdef EFL_HAVE_PTHREAD |
794 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
795 | ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback); |
796 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
797 | return ret; |
798 | #else |
799 | return 0; |
800 | #endif |
801 | } |
802 | |
803 | /** |
804 | * @brief Get the max number of threads that can run simultaneously |
805 | * @return Max number of threads ecore will run |
806 | * This returns the total number of threads that ecore will attempt to run |
807 | * simultaneously. |
808 | */ |
809 | EAPI__attribute__ ((visibility("default"))) int |
810 | ecore_thread_max_get(void) |
811 | { |
812 | return _ecore_thread_count_max; |
813 | } |
814 | |
815 | /** |
816 | * @brief Set the max number of threads that can run simultaneously |
817 | * @param num The new maximum |
818 | * This sets the maximum number of threads that ecore will try to run |
819 | * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus. |
820 | */ |
821 | EAPI__attribute__ ((visibility("default"))) void |
822 | ecore_thread_max_set(int num) |
823 | { |
824 | if (num < 1) return; |
825 | /* avoid doing something hilarious by blocking dumb users */ |
826 | if (num >= (2 * eina_cpu_count())) return; |
827 | |
828 | _ecore_thread_count_max = num; |
829 | } |
830 | |
831 | /** |
832 | * @brief Reset the max number of threads that can run simultaneously |
833 | * This resets the maximum number of threads that ecore will try to run |
834 | * simultaneously to the number of active cpus. |
835 | */ |
836 | EAPI__attribute__ ((visibility("default"))) void |
837 | ecore_thread_max_reset(void) |
838 | { |
839 | _ecore_thread_count_max = eina_cpu_count(); |
840 | } |
841 | |
842 | /** |
843 | * @brief Get the number of threads which are available to be used |
844 | * @return The number of available threads |
845 | * This returns the number of threads slots that ecore has currently available. |
846 | * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set |
847 | * this should be equal to (num_cpus - (active_running + active_feedback_running)) |
848 | */ |
849 | EAPI__attribute__ ((visibility("default"))) int |
850 | ecore_thread_available_get(void) |
851 | { |
852 | int ret; |
853 | #ifdef EFL_HAVE_PTHREAD |
854 | pthread_mutex_lock(&_ecore_pending_job_threads_mutex); |
855 | ret = _ecore_thread_count_max - _ecore_thread_count; |
856 | pthread_mutex_unlock(&_ecore_pending_job_threads_mutex); |
857 | return ret; |
858 | #else |
859 | return 0; |
860 | #endif |
861 | } |
862 | |
863 | /** |
864 | * @brief Add data to the thread for subsequent use |
865 | * @param thread The thread context to add to |
866 | * @param key The name string to add the data with |
867 | * @param value The data to add |
868 | * @param cb The callback to free the data with |
869 | * @param direct If true, this will not copy the key string (like eina_hash_direct_add) |
870 | * @return EINA_TRUE on success, EINA_FALSE on failure |
871 | * This adds data to the thread context, allowing the thread |
872 | * to retrieve and use it without complicated mutexing. This function can only be called by a |
873 | * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success. |
874 | * All data added to the thread will be freed with its associated callback (if present) |
875 | * upon thread termination. If no callback is specified, it is expected that the user will free the |
876 | * data, but this is most likely not what you want. |
877 | */ |
878 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
879 | ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct) |
880 | { |
881 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
882 | Ecore_Thread_Data *d; |
883 | Eina_Bool ret; |
884 | |
885 | if ((!thread) || (!key) || (!value)) |
886 | return EINA_FALSE((Eina_Bool)0); |
887 | #ifdef EFL_HAVE_PTHREAD |
888 | if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE((Eina_Bool)0); |
889 | |
890 | if (!worker->hash) |
891 | worker->hash = eina_hash_string_small_new(_ecore_thread_data_free); |
892 | |
893 | if (!worker->hash) |
894 | return EINA_FALSE((Eina_Bool)0); |
895 | |
896 | if (!(d = malloc(sizeof(Ecore_Thread_Data)))) |
897 | return EINA_FALSE((Eina_Bool)0); |
898 | |
899 | d->data = value; |
900 | d->cb = cb; |
901 | |
902 | if (direct) |
903 | ret = eina_hash_direct_add(worker->hash, key, d); |
904 | else |
905 | ret = eina_hash_add(worker->hash, key, d); |
906 | pthread_cond_broadcast(&worker->cond); |
907 | return ret; |
908 | #else |
909 | return EINA_TRUE((Eina_Bool)1); |
910 | #endif |
911 | } |
912 | |
913 | /** |
914 | * @brief Modify data in the thread, or add if not found |
915 | * @param thread The thread context |
916 | * @param key The name string to add the data with |
917 | * @param value The data to add |
918 | * @param cb The callback to free the data with |
919 | * @return The old data associated with @p key on success if modified, NULL if added |
920 | * This adds/modifies data in the thread context, adding only if modify fails. |
921 | * This function can only be called by a *_run thread INSIDE the thread. |
922 | * All data added to the thread pool will be freed with its associated callback (if present) |
923 | * upon thread termination. If no callback is specified, it is expected that the user will free the |
924 | * data, but this is most likely not what you want. |
925 | */ |
926 | EAPI__attribute__ ((visibility("default"))) void * |
927 | ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb) |
928 | { |
929 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
930 | Ecore_Thread_Data *d, *r; |
931 | void *ret; |
932 | if ((!thread) || (!key) || (!value)) |
933 | return NULL((void*)0); |
934 | #ifdef EFL_HAVE_PTHREAD |
935 | if (!pthread_equal(worker->self, pthread_self())) return NULL((void*)0); |
936 | |
937 | if (!worker->hash) |
938 | worker->hash = eina_hash_string_small_new(_ecore_thread_data_free); |
939 | |
940 | if (!worker->hash) |
941 | return NULL((void*)0); |
942 | |
943 | if (!(d = malloc(sizeof(Ecore_Thread_Data)))) |
944 | return NULL((void*)0); |
945 | |
946 | d->data = value; |
947 | d->cb = cb; |
948 | |
949 | r = eina_hash_set(worker->hash, key, d); |
950 | pthread_cond_broadcast(&worker->cond); |
951 | ret = r->data; |
952 | free(r); |
953 | return ret; |
954 | #else |
955 | return NULL((void*)0); |
956 | #endif |
957 | } |
958 | |
959 | /** |
960 | * @brief Find data in the thread's data |
961 | * @param thread The thread context |
962 | * @param key The name string the data is associated with |
963 | * @return The value, or NULL on error |
964 | * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add |
965 | * This function can only be called by a *_run thread INSIDE the thread, and will return NULL |
966 | * in any case but success. |
967 | */ |
968 | |
969 | EAPI__attribute__ ((visibility("default"))) void * |
970 | ecore_thread_local_data_find(Ecore_Thread *thread, const char *key) |
971 | { |
972 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
Value stored to 'worker' during its initialization is never read | |
973 | Ecore_Thread_Data *d; |
974 | |
975 | if ((!thread) || (!key)) |
976 | return NULL((void*)0); |
977 | #ifdef EFL_HAVE_PTHREAD |
978 | if (!pthread_equal(worker->self, pthread_self())) return NULL((void*)0); |
979 | |
980 | if (!worker->hash) |
981 | return NULL((void*)0); |
982 | |
983 | d = eina_hash_find(worker->hash, key); |
984 | return d->data; |
985 | #else |
986 | return NULL((void*)0); |
987 | #endif |
988 | } |
989 | |
990 | /** |
991 | * @brief Delete data from the thread's data |
992 | * @param thread The thread context |
993 | * @param key The name string the data is associated with |
994 | * @return EINA_TRUE on success, EINA_FALSE on failure |
995 | * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add |
996 | * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE |
997 | * in any case but success. Note that this WILL free the data if a callback was specified. |
998 | */ |
999 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
1000 | ecore_thread_local_data_del(Ecore_Thread *thread, const char *key) |
1001 | { |
1002 | Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread; |
1003 | Ecore_Thread_Data *d; |
1004 | if ((!thread) || (!key)) |
1005 | return EINA_FALSE((Eina_Bool)0); |
1006 | #ifdef EFL_HAVE_PTHREAD |
1007 | if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE((Eina_Bool)0); |
1008 | |
1009 | if (!worker->hash) |
1010 | return EINA_FALSE((Eina_Bool)0); |
1011 | if ((d = eina_hash_find(worker->hash, key))) |
1012 | _ecore_thread_data_free(d); |
1013 | return eina_hash_del_by_key(worker->hash, key); |
1014 | #else |
1015 | return EINA_TRUE((Eina_Bool)1); |
1016 | #endif |
1017 | } |
1018 | |
1019 | /** |
1020 | * @brief Add data to the global data |
1021 | * @param key The name string to add the data with |
1022 | * @param value The data to add |
1023 | * @param cb The optional callback to free the data with once ecore is shut down |
1024 | * @param direct If true, this will not copy the key string (like eina_hash_direct_add) |
1025 | * @return EINA_TRUE on success, EINA_FALSE on failure |
1026 | * This adds data to the global thread data, and will return EINA_FALSE in any case but success. |
1027 | * All data added to global can be manually freed, or a callback can be provided with @p cb which will |
1028 | * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback |
1029 | * was specified for, you will most likely encounter a segv later on. |
1030 | */ |
1031 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
1032 | ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct) |
1033 | { |
1034 | Eina_Bool ret; |
1035 | Ecore_Thread_Data *d; |
1036 | |
1037 | if ((!key) || (!value)) |
1038 | return EINA_FALSE((Eina_Bool)0); |
1039 | #ifdef EFL_HAVE_PTHREAD |
1040 | pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock); |
1041 | if (!_ecore_thread_global_hash) |
1042 | _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free); |
1043 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1044 | |
1045 | if (!(d = malloc(sizeof(Ecore_Thread_Data)))) |
1046 | return EINA_FALSE((Eina_Bool)0); |
1047 | |
1048 | d->data = value; |
1049 | d->cb = cb; |
1050 | |
1051 | if (!_ecore_thread_global_hash) |
1052 | return EINA_FALSE((Eina_Bool)0); |
1053 | pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock); |
1054 | if (direct) |
1055 | ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d); |
1056 | else |
1057 | ret = eina_hash_add(_ecore_thread_global_hash, key, d); |
1058 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1059 | pthread_cond_broadcast(&_ecore_thread_global_hash_cond); |
1060 | return ret; |
1061 | #else |
1062 | return EINA_TRUE((Eina_Bool)1); |
1063 | #endif |
1064 | } |
1065 | |
1066 | /** |
1067 | * @brief Add data to the global data |
1068 | * @param key The name string to add the data with |
1069 | * @param value The data to add |
1070 | * @param cb The optional callback to free the data with once ecore is shut down |
1071 | * @return An Ecore_Thread_Data on success, NULL on failure |
1072 | * This adds data to the global thread data and returns NULL, or replaces the previous data |
1073 | * associated with @p key and returning the previous data if it existed. To see if an error occurred, |
1074 | * one must use eina_error_get. |
1075 | * All data added to global can be manually freed, or a callback can be provided with @p cb which will |
1076 | * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback |
1077 | * was specified for, you will most likely encounter a segv later on. |
1078 | */ |
1079 | EAPI__attribute__ ((visibility("default"))) void * |
1080 | ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb) |
1081 | { |
1082 | Ecore_Thread_Data *d, *r; |
1083 | void *ret; |
1084 | |
1085 | if ((!key) || (!value)) |
1086 | return NULL((void*)0); |
1087 | #ifdef EFL_HAVE_PTHREAD |
1088 | pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock); |
1089 | if (!_ecore_thread_global_hash) |
1090 | _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free); |
1091 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1092 | |
1093 | if (!_ecore_thread_global_hash) |
1094 | return NULL((void*)0); |
1095 | |
1096 | if (!(d = malloc(sizeof(Ecore_Thread_Data)))) |
1097 | return NULL((void*)0); |
1098 | |
1099 | d->data = value; |
1100 | d->cb = cb; |
1101 | |
1102 | pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock); |
1103 | r = eina_hash_set(_ecore_thread_global_hash, key, d); |
1104 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1105 | pthread_cond_broadcast(&_ecore_thread_global_hash_cond); |
1106 | |
1107 | ret = r->data; |
1108 | free(r); |
1109 | return ret; |
1110 | #else |
1111 | return NULL((void*)0); |
1112 | #endif |
1113 | } |
1114 | |
1115 | /** |
1116 | * @brief Find data in the global data |
1117 | * @param key The name string the data is associated with |
1118 | * @return The value, or NULL on error |
1119 | * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add |
1120 | * This function will return NULL in any case but success. |
1121 | * All data added to global can be manually freed, or a callback can be provided with @p cb which will |
1122 | * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback |
1123 | * was specified for, you will most likely encounter a segv later on. |
1124 | * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex |
1125 | * if you will be doing anything with it. |
1126 | */ |
1127 | |
1128 | EAPI__attribute__ ((visibility("default"))) void * |
1129 | ecore_thread_global_data_find(const char *key) |
1130 | { |
1131 | Ecore_Thread_Data *ret; |
1132 | if (!key) |
1133 | return NULL((void*)0); |
1134 | #ifdef EFL_HAVE_PTHREAD |
1135 | if (!_ecore_thread_global_hash) return NULL((void*)0); |
1136 | |
1137 | pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock); |
1138 | ret = eina_hash_find(_ecore_thread_global_hash, key); |
1139 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1140 | return ret->data; |
1141 | #else |
1142 | return NULL((void*)0); |
1143 | #endif |
1144 | } |
1145 | |
1146 | /** |
1147 | * @brief Delete data from the global data |
1148 | * @param key The name string the data is associated with |
1149 | * @return EINA_TRUE on success, EINA_FALSE on failure |
1150 | * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add |
1151 | * This function will return EINA_FALSE in any case but success. |
1152 | * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added. |
1153 | */ |
1154 | EAPI__attribute__ ((visibility("default"))) Eina_Bool |
1155 | ecore_thread_global_data_del(const char *key) |
1156 | { |
1157 | Eina_Bool ret; |
1158 | Ecore_Thread_Data *d; |
1159 | |
1160 | if (!key) |
1161 | return EINA_FALSE((Eina_Bool)0); |
1162 | #ifdef EFL_HAVE_PTHREAD |
1163 | if (!_ecore_thread_global_hash) |
1164 | return EINA_FALSE((Eina_Bool)0); |
1165 | |
1166 | pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock); |
1167 | if ((d = eina_hash_find(_ecore_thread_global_hash, key))) |
1168 | _ecore_thread_data_free(d); |
1169 | ret = eina_hash_del_by_key(_ecore_thread_global_hash, key); |
1170 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1171 | return ret; |
1172 | #else |
1173 | return EINA_TRUE((Eina_Bool)1); |
1174 | #endif |
1175 | } |
1176 | |
1177 | /** |
1178 | * @brief Find data in the global data and optionally wait for the data if not found |
1179 | * @param key The name string the data is associated with |
1180 | * @param seconds The amount of time in seconds to wait for the data. If 0, the call will be async and not wait for data. |
1181 | * If < 0 the call will wait indefinitely for the data. |
1182 | * @return The value, or NULL on failure |
1183 | * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add |
1184 | * This function will return NULL in any case but success. |
1185 | * Use @p seconds to specify the amount of time to wait. Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely. |
1186 | * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex |
1187 | * if you will be doing anything with it. |
1188 | */ |
1189 | EAPI__attribute__ ((visibility("default"))) void * |
1190 | ecore_thread_global_data_wait(const char *key, double seconds) |
1191 | { |
1192 | double time = 0; |
1193 | Ecore_Thread_Data *ret = NULL((void*)0); |
1194 | if (!key) |
1195 | return NULL((void*)0); |
1196 | #ifdef EFL_HAVE_PTHREAD |
1197 | if (!_ecore_thread_global_hash) |
1198 | return NULL((void*)0); |
1199 | if (seconds > 0) |
1200 | time = ecore_time_get() + seconds; |
1201 | |
1202 | while (1) |
1203 | { |
1204 | struct timespec t = { 0, 0 }; |
1205 | |
1206 | t.tv_sec = (long int)time; |
1207 | t.tv_nsec = (long int)((time - (double)t.tv_sec) * 1000000000); |
1208 | pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock); |
1209 | ret = eina_hash_find(_ecore_thread_global_hash, key); |
1210 | pthread_rwlock_unlock(&_ecore_thread_global_hash_lock); |
1211 | if ((ret) || (!seconds) || ((seconds > 0) && (time <= ecore_time_get()))) |
1212 | break; |
1213 | pthread_mutex_lock(&_ecore_thread_global_hash_mutex); |
1214 | pthread_cond_timedwait(&_ecore_thread_global_hash_cond, &_ecore_thread_global_hash_mutex, &t); |
1215 | pthread_mutex_unlock(&_ecore_thread_global_hash_mutex); |
1216 | } |
1217 | if (ret) return ret->data; |
1218 | return NULL((void*)0); |
1219 | #else |
1220 | return NULL((void*)0); |
1221 | #endif |
1222 | } |
1223 | |
1224 | /** |
1225 | * @} |
1226 | */ |