Lines Matching refs:tp
47 struct lws_threadpool *tp;
72 struct lws_threadpool *tp;
172 lws_threadpool_dump(struct lws_threadpool *tp)
180 pthread_mutex_lock(&tp->lock); /* ======================== tpool lock */
182 lwsl_thread("%s: tp: %s, Queued: %d, Run: %d, Done: %d\n", __func__,
183 tp->name, tp->queue_depth, tp->running_tasks,
184 tp->done_queue_depth);
187 c = &tp->task_queue_head;
197 if (count != tp->queue_depth)
198 lwsl_err("%s: tp says queue depth %d, but actually %d\n",
199 __func__, tp->queue_depth, count);
202 for (n = 0; n < tp->threads_in_pool; n++) {
203 struct lws_pool *pool = &tp->pool_list[n];
213 if (count != tp->running_tasks)
214 lwsl_err("%s: tp says %d running_tasks, but actually %d\n",
215 __func__, tp->running_tasks, count);
218 c = &tp->task_done_head;
228 if (count != tp->done_queue_depth)
229 lwsl_err("%s: tp says done_queue_depth %d, but actually %d\n",
230 __func__, tp->done_queue_depth, count);
232 pthread_mutex_unlock(&tp->lock); /* --------------- tp unlock */
262 lwsl_thread("%s: tp %p: cleaned finished task for %s\n",
263 __func__, task->tp, lws_wsi_tag(task_to_wsi(task)));
272 struct lws_threadpool *tp = task->tp;
276 if (tp) {
277 c = &tp->task_done_head;
284 tp->done_queue_depth--;
286 lwsl_thread("%s: tp %s: reaped task %s\n", __func__,
287 tp->name, lws_wsi_tag(task_to_wsi(task)));
303 lwsl_err("%s: task->tp NULL already\n", __func__);
320 struct lws_threadpool *tp;
325 tp = context->tp_list_head;
326 while (tp) {
331 for (n = 0; n < tp->threads_in_pool; n++) {
332 struct lws_pool *pool = &tp->pool_list[n];
358 c = &tp->task_done_head;
383 tp = tp->tp_list;
405 pool->tp->name, task, task->name, lws_wsi_tag(task_to_wsi(task)));
421 "wsi to sync to\n", __func__, pool->tp->name,
462 __func__, pool->tp->name, task,
495 struct lws_threadpool *tp = pool->tp;
498 while (!tp->destroying) {
502 pthread_mutex_lock(&tp->lock); /* =================== tp lock */
508 while (!tp->task_queue_head && !tp->destroying)
509 pthread_cond_wait(&tp->wake_idle, &tp->lock);
511 if (tp->destroying) {
516 c = &tp->task_queue_head;
534 tp->queue_depth--;
541 pthread_mutex_unlock(&tp->lock); /* ------ tp unlock */
552 __func__, tp->name, pool->worker_index, buf);
553 tp->running_tasks++;
555 pthread_mutex_unlock(&tp->lock); /* --------------- tp unlock */
584 if (tp->destroying || !task_to_wsi(task)) {
597 /* if not destroying the tp, continue */
624 pthread_mutex_lock(&tp->lock); /* =================== tp lock */
626 tp->running_tasks--;
633 pool->task->task_queue_next = tp->task_done_head;
634 tp->task_done_head = task;
635 tp->done_queue_depth++;
644 __func__, tp->name, pool->worker_index,
657 __func__, tp->name, pool->worker_index,
673 pthread_mutex_unlock(&tp->lock); /* --------------- tp unlock */
691 struct lws_threadpool *tp;
695 tp = lws_malloc(sizeof(*tp) + (sizeof(struct lws_pool) * (unsigned int)args->threads),
697 if (!tp)
700 memset(tp, 0, sizeof(*tp) + (sizeof(struct lws_pool) * (unsigned int)args->threads));
701 tp->pool_list = (struct lws_pool *)(tp + 1);
702 tp->max_queue_depth = args->max_queue_depth;
705 n = vsnprintf(tp->name, sizeof(tp->name) - 1, format, ap);
710 tp->context = context;
711 tp->tp_list = context->tp_list_head;
712 context->tp_list_head = tp;
716 pthread_mutex_init(&tp->lock, NULL);
717 pthread_cond_init(&tp->wake_idle, NULL);
723 tp->pool_list[n].tp = tp;
724 tp->pool_list[n].worker_index = n;
725 pthread_mutex_init(&tp->pool_list[n].lock, NULL);
726 if (pthread_create(&tp->pool_list[n].thread, NULL,
727 lws_threadpool_worker, &tp->pool_list[n])) {
731 lws_snprintf(name, sizeof(name), "%s-%d", tp->name, n);
732 pthread_setname_np(tp->pool_list[n].thread, name);
734 tp->threads_in_pool++;
738 return tp;
742 lws_threadpool_finish(struct lws_threadpool *tp)
746 pthread_mutex_lock(&tp->lock); /* ======================== tpool lock */
750 tp->destroying = 1;
754 c = &tp->task_queue_head;
758 task->task_queue_next = tp->task_done_head;
759 tp->task_done_head = task;
761 tp->queue_depth--;
762 tp->done_queue_depth++;
768 pthread_cond_broadcast(&tp->wake_idle);
769 pthread_mutex_unlock(&tp->lock); /* -------------------- tpool unlock */
773 lws_threadpool_destroy(struct lws_threadpool *tp)
782 lws_context_lock(tp->context, __func__);
783 ptp = &tp->context->tp_list_head;
786 if (*ptp == tp) {
787 *ptp = tp->tp_list;
793 lws_context_unlock(tp->context);
799 pthread_mutex_lock(&tp->lock); /* ======================== tpool lock */
800 tp->destroying = 1;
801 pthread_cond_broadcast(&tp->wake_idle);
802 pthread_mutex_unlock(&tp->lock); /* -------------------- tpool unlock */
804 lws_threadpool_dump(tp);
811 for (n = 0; n < tp->threads_in_pool; n++) {
812 task = tp->pool_list[n].task;
814 pthread_join(tp->pool_list[n].thread, &retval);
815 pthread_mutex_destroy(&tp->pool_list[n].lock);
822 task = tp->task_done_head;
826 tp->done_queue_depth--;
830 pthread_mutex_destroy(&tp->lock);
832 memset(tp, 0xdd, sizeof(*tp));
833 lws_free(tp);
843 struct lws_threadpool *tp;
847 tp = task->tp;
848 pthread_mutex_lock(&tp->lock); /* ======================== tpool lock */
850 if (task->outlive && !tp->destroying) {
864 c = &tp->task_queue_head;
872 task->task_queue_next = tp->task_done_head;
873 tp->task_done_head = task;
875 tp->queue_depth--;
876 tp->done_queue_depth++;
879 lwsl_debug("%s: tp %p: removed queued task %s\n",
880 __func__, tp, lws_wsi_tag(task_to_wsi(task)));
889 c = &tp->task_done_head;
895 tp->done_queue_depth--;
903 for (n = 0; n < tp->threads_in_pool; n++) {
904 if (!tp->pool_list[n].task || tp->pool_list[n].task != task)
911 pthread_mutex_lock(&tp->pool_list[n].lock);
928 pthread_mutex_unlock(&tp->pool_list[n].lock);
930 lwsl_debug("%s: tp %p: request stop running task "
931 "for %s\n", __func__, tp,
937 if (n == tp->threads_in_pool) {
939 lwsl_notice("%s: tp %p: no task for %s, decoupling\n",
940 __func__, tp, lws_wsi_tag(task_to_wsi(task)));
949 pthread_mutex_unlock(&tp->lock); /* -------------------- tpool unlock */
970 lws_threadpool_enqueue(struct lws_threadpool *tp,
977 if (tp->destroying)
984 pthread_mutex_lock(&tp->lock); /* ======================== tpool lock */
991 if (tp->queue_depth == tp->max_queue_depth) {
993 tp->max_queue_depth);
1009 task->tp = tp;
1017 * add him on the tp task queue
1020 task->task_queue_next = tp->task_queue_head;
1022 tp->task_queue_head = task;
1023 tp->queue_depth++;
1026 * mark the wsi itself as depending on this tp (so wsi close for
1037 lwsl_thread("%s: tp %s: enqueued task %p (%s) for %s, depth %d\n",
1038 __func__, tp->name, task, task->name,
1039 lws_wsi_tag(task_to_wsi(task)), tp->queue_depth);
1044 pthread_cond_signal(&tp->wake_idle);
1047 pthread_mutex_unlock(&tp->lock); /* -------------------- tpool unlock */
1058 struct lws_threadpool *tp = task->tp;
1060 if (!tp)
1070 pthread_mutex_lock(&tp->lock); /* ================ tpool lock */
1073 __func__, tp->name, buf);
1076 pthread_mutex_unlock(&tp->lock); /* ------------ tpool unlock */
1119 pthread_mutex_lock(&task->tp->lock);
1121 pthread_mutex_unlock(&task->tp->lock);
1137 pthread_mutex_lock(&task1->tp->lock); /* ================ tpool lock */
1145 pthread_mutex_unlock(&task1->tp->lock); /* ------------ tpool unlock */
1151 pthread_mutex_unlock(&task1->tp->lock); /* ------------ tpool unlock */