ua_server_worker.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. #include "ua_util.h"
  2. #include "ua_server_internal.h"
  3. /**
  4. * There are three types of work:
  5. *
  6. * 1. Ordinary WorkItems (that are dispatched to worker threads if
  7. * multithreading is activated)
  8. *
  9. * 2. Timed work that is executed at a precise date (with an optional repetition
  10. * interval)
  11. *
  12. * 3. Delayed work that is executed at a later time when it is guaranteed that
  13. * all previous work has actually finished (only for multithreading)
  14. */
  15. #define MAXTIMEOUT 50000 // max timeout in usec until the next main loop iteration
  16. #define BATCHSIZE 20 // max size of worklists that are dispatched to workers
  17. static void processWork(UA_Server *server, UA_WorkItem *work, UA_Int32 workSize) {
  18. for(UA_Int32 i = 0;i<workSize;i++) {
  19. UA_WorkItem *item = &work[i];
  20. switch(item->type) {
  21. case UA_WORKITEMTYPE_BINARYNETWORKMESSAGE:
  22. UA_Server_processBinaryMessage(server, item->work.binaryNetworkMessage.connection,
  23. &item->work.binaryNetworkMessage.message);
  24. UA_free(item->work.binaryNetworkMessage.message.data);
  25. break;
  26. case UA_WORKITEMTYPE_METHODCALL:
  27. case UA_WORKITEMTYPE_DELAYEDMETHODCALL:
  28. item->work.methodCall.method(server, item->work.methodCall.data);
  29. break;
  30. default:
  31. break;
  32. }
  33. }
  34. }
  35. /*******************************/
  36. /* Worker Threads and Dispatch */
  37. /*******************************/
  38. #ifdef UA_MULTITHREADING
  39. /** Entry in the dipatch queue */
  40. struct workListNode {
  41. struct cds_wfcq_node node; // node for the queue
  42. UA_UInt32 workSize;
  43. UA_WorkItem *work;
  44. };
  45. /** Dispatch work to workers. Slices the work up if it contains more than
  46. BATCHSIZE items. The work array is freed by the worker threads. */
  47. static void dispatchWork(UA_Server *server, UA_Int32 workSize, UA_WorkItem *work) {
  48. UA_Int32 startIndex = workSize; // start at the end
  49. while(workSize > 0) {
  50. UA_Int32 size = BATCHSIZE;
  51. if(size > workSize)
  52. size = workSize;
  53. startIndex = startIndex - size;
  54. struct workListNode *wln = UA_malloc(sizeof(struct workListNode));
  55. if(startIndex > 0) {
  56. UA_WorkItem *workSlice = UA_malloc(size * sizeof(UA_WorkItem));
  57. UA_memcpy(workSlice, &work[startIndex], size * sizeof(UA_WorkItem));
  58. *wln = (struct workListNode){.workSize = size, .work = workSlice};
  59. }
  60. else {
  61. // do not alloc, but forward the original array
  62. *wln = (struct workListNode){.workSize = size, .work = work};
  63. }
  64. cds_wfcq_node_init(&wln->node);
  65. cds_wfcq_enqueue(&server->dispatchQueue_head, &server->dispatchQueue_tail, &wln->node);
  66. workSize -= size;
  67. }
  68. }
  69. // throwaway struct to bring data into the worker threads
  70. struct workerStartData {
  71. UA_Server *server;
  72. UA_UInt32 **workerCounter;
  73. };
  74. /** Waits until work arrives in the dispatch queue (restart after 10ms) and
  75. processes it. */
  76. static void * workerLoop(struct workerStartData *startInfo) {
  77. rcu_register_thread();
  78. UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32));
  79. uatomic_set(c, 0);
  80. *startInfo->workerCounter = c;
  81. UA_Server *server = startInfo->server;
  82. UA_free(startInfo);
  83. pthread_mutex_t mutex; // required for the condition variable
  84. pthread_mutex_init(&mutex,0);
  85. pthread_mutex_lock(&mutex);
  86. struct timespec to;
  87. while(*server->running) {
  88. struct workListNode *wln = (struct workListNode*)
  89. cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
  90. if(wln) {
  91. processWork(server, wln->work, wln->workSize);
  92. UA_free(wln->work);
  93. UA_free(wln);
  94. } else {
  95. clock_gettime(CLOCK_REALTIME, &to);
  96. to.tv_sec += 2;
  97. pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to);
  98. }
  99. uatomic_inc(c); // increase the workerCounter;
  100. }
  101. pthread_mutex_unlock(&mutex);
  102. pthread_mutex_destroy(&mutex);
  103. rcu_unregister_thread();
  104. return UA_NULL;
  105. }
  106. static void emptyDispatchQueue(UA_Server *server) {
  107. while(!cds_wfcq_empty(&server->dispatchQueue_head, &server->dispatchQueue_tail)) {
  108. struct workListNode *wln = (struct workListNode*)
  109. cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
  110. processWork(server, wln->work, wln->workSize);
  111. UA_free(wln->work);
  112. UA_free(wln);
  113. }
  114. }
  115. #endif
  116. /**************/
  117. /* Timed Work */
  118. /**************/
  119. struct UA_TimedWork {
  120. LIST_ENTRY(UA_TimedWork) pointers;
  121. UA_UInt16 workSize;
  122. UA_WorkItem *work;
  123. UA_Guid *workIds;
  124. UA_DateTime time;
  125. UA_UInt32 repetitionInterval; // in 100ns resolution, 0 means no repetition
  126. };
  127. /* The item is copied and not freed by this function. */
  128. static UA_StatusCode addTimedWork(UA_Server *server, const UA_WorkItem *item, UA_DateTime firstTime,
  129. UA_UInt32 repetitionInterval, UA_Guid *resultWorkGuid) {
  130. UA_TimedWork *tw, *lastTw = UA_NULL;
  131. // search for matching entry
  132. LIST_FOREACH(tw, &server->timedWork, pointers) {
  133. if(tw->repetitionInterval == repetitionInterval &&
  134. (repetitionInterval > 0 || tw->time == firstTime))
  135. break; // found a matching entry
  136. if(tw->time > firstTime) {
  137. tw = UA_NULL; // not matchin entry exists
  138. lastTw = tw;
  139. break;
  140. }
  141. }
  142. if(tw) {
  143. // append to matching entry
  144. tw->workSize++;
  145. UA_WorkItem *biggerWorkArray = UA_realloc(tw->work, sizeof(UA_WorkItem)*tw->workSize);
  146. if(!biggerWorkArray)
  147. return UA_STATUSCODE_BADOUTOFMEMORY;
  148. tw->work = biggerWorkArray;
  149. UA_Guid *biggerWorkIds = UA_realloc(tw->workIds, sizeof(UA_Guid)*tw->workSize);
  150. if(!biggerWorkIds)
  151. return UA_STATUSCODE_BADOUTOFMEMORY;
  152. tw->workIds = biggerWorkIds;
  153. tw->work[tw->workSize-1] = *item;
  154. tw->workIds[tw->workSize-1] = UA_Guid_random(&server->random_seed);
  155. if(resultWorkGuid)
  156. *resultWorkGuid = tw->workIds[tw->workSize-1];
  157. return UA_STATUSCODE_GOOD;
  158. }
  159. // create a new entry
  160. if(!(tw = UA_malloc(sizeof(UA_TimedWork))))
  161. return UA_STATUSCODE_BADOUTOFMEMORY;
  162. if(!(tw->work = UA_malloc(sizeof(UA_WorkItem)))) {
  163. UA_free(tw);
  164. return UA_STATUSCODE_BADOUTOFMEMORY;
  165. }
  166. if(!(tw->workIds = UA_malloc(sizeof(UA_Guid)))) {
  167. UA_free(tw->work);
  168. UA_free(tw);
  169. return UA_STATUSCODE_BADOUTOFMEMORY;
  170. }
  171. tw->workSize = 1;
  172. tw->time = firstTime;
  173. tw->repetitionInterval = repetitionInterval;
  174. tw->work[0] = *item;
  175. tw->workIds[0] = UA_Guid_random(&server->random_seed);
  176. if(lastTw)
  177. LIST_INSERT_AFTER(lastTw, tw, pointers);
  178. else
  179. LIST_INSERT_HEAD(&server->timedWork, tw, pointers);
  180. if(resultWorkGuid)
  181. *resultWorkGuid = tw->workIds[0];
  182. return UA_STATUSCODE_GOOD;
  183. }
  184. // Currently, these functions need to get the server mutex, but should be sufficiently fast
  185. UA_StatusCode UA_Server_addTimedWorkItem(UA_Server *server, const UA_WorkItem *work, UA_DateTime executionTime,
  186. UA_Guid *resultWorkGuid) {
  187. return addTimedWork(server, work, executionTime, 0, resultWorkGuid);
  188. }
  189. UA_StatusCode UA_Server_addRepeatedWorkItem(UA_Server *server, const UA_WorkItem *work, UA_UInt32 interval,
  190. UA_Guid *resultWorkGuid) {
  191. return addTimedWork(server, work, UA_DateTime_now() + interval, interval, resultWorkGuid);
  192. }
  193. /** Dispatches timed work, returns the timeout until the next timed work in ms */
  194. static UA_UInt16 processTimedWork(UA_Server *server) {
  195. UA_DateTime current = UA_DateTime_now();
  196. UA_TimedWork *next = LIST_FIRST(&server->timedWork);
  197. UA_TimedWork *tw = UA_NULL;
  198. while(next) {
  199. tw = next;
  200. if(tw->time > current)
  201. break;
  202. next = LIST_NEXT(tw, pointers);
  203. #ifdef UA_MULTITHREADING
  204. if(tw->repetitionInterval > 0) {
  205. // copy the entry and insert at the new location
  206. UA_WorkItem *workCopy = UA_malloc(sizeof(UA_WorkItem) * tw->workSize);
  207. UA_memcpy(workCopy, tw->work, sizeof(UA_WorkItem) * tw->workSize);
  208. dispatchWork(server, tw->workSize, workCopy); // frees the work pointer
  209. tw->time += tw->repetitionInterval;
  210. UA_TimedWork *prevTw = tw; // after which tw do we insert?
  211. while(UA_TRUE) {
  212. UA_TimedWork *n = LIST_NEXT(prevTw, pointers);
  213. if(!n || n->time > tw->time)
  214. break;
  215. prevTw = n;
  216. }
  217. if(prevTw != tw) {
  218. LIST_REMOVE(tw, pointers);
  219. LIST_INSERT_AFTER(prevTw, tw, pointers);
  220. }
  221. } else {
  222. dispatchWork(server, tw->workSize, tw->work); // frees the work pointer
  223. LIST_REMOVE(tw, pointers);
  224. UA_free(tw->workIds);
  225. UA_free(tw);
  226. }
  227. #else
  228. // 1) Process the work since it is past its due date
  229. processWork(server, tw->work, tw->workSize); // does not free the work
  230. // 2) If the work is repeated, add it back into the list. Otherwise remove it.
  231. if(tw->repetitionInterval > 0) {
  232. tw->time += tw->repetitionInterval;
  233. UA_TimedWork *prevTw = tw;
  234. while(UA_TRUE) {
  235. UA_TimedWork *n = LIST_NEXT(prevTw, pointers);
  236. if(!n || n->time > tw->time)
  237. break;
  238. prevTw = n;
  239. }
  240. if(prevTw != tw) {
  241. LIST_REMOVE(tw, pointers);
  242. LIST_INSERT_AFTER(prevTw, tw, pointers);
  243. }
  244. } else {
  245. LIST_REMOVE(tw, pointers);
  246. UA_free(tw->work);
  247. UA_free(tw->workIds);
  248. UA_free(tw);
  249. }
  250. #endif
  251. }
  252. // check if the next timed work is sooner than the usual timeout
  253. UA_TimedWork *first = LIST_FIRST(&server->timedWork);
  254. UA_UInt16 timeout = MAXTIMEOUT;
  255. if(first) {
  256. timeout = (first->time - current)/10;
  257. if(timeout > MAXTIMEOUT)
  258. timeout = MAXTIMEOUT;
  259. }
  260. return timeout;
  261. }
  262. void UA_Server_deleteTimedWork(UA_Server *server) {
  263. UA_TimedWork *current;
  264. UA_TimedWork *next = LIST_FIRST(&server->timedWork);
  265. while(next) {
  266. current = next;
  267. next = LIST_NEXT(current, pointers);
  268. LIST_REMOVE(current, pointers);
  269. UA_free(current->work);
  270. UA_free(current->workIds);
  271. UA_free(current);
  272. }
  273. }
  274. /****************/
  275. /* Delayed Work */
  276. /****************/
  277. #ifdef UA_MULTITHREADING
  278. #define DELAYEDWORKSIZE 100 // Collect delayed work until we have DELAYEDWORKSIZE items
  279. struct UA_DelayedWork {
  280. UA_DelayedWork *next;
  281. UA_UInt32 *workerCounters; // initially UA_NULL until a workitem gets the counters
  282. UA_UInt32 workItemsCount; // the size of the array is DELAYEDWORKSIZE, the count may be less
  283. UA_WorkItem *workItems; // when it runs full, a new delayedWork entry is created
  284. };
  285. // Dispatched as a methodcall-WorkItem when the delayedwork is added
  286. static void getCounters(UA_Server *server, UA_DelayedWork *delayed) {
  287. UA_UInt32 *counters = UA_malloc(server->nThreads * sizeof(UA_UInt32));
  288. for(UA_UInt16 i = 0;i<server->nThreads;i++)
  289. counters[i] = *server->workerCounters[i];
  290. delayed->workerCounters = counters;
  291. }
  292. // Call from the main thread only. This is the only function that modifies
  293. // server->delayedWork. processDelayedWorkQueue modifies the "next" (after the
  294. // head).
  295. static void addDelayedWork(UA_Server *server, UA_WorkItem work) {
  296. UA_DelayedWork *dw = server->delayedWork;
  297. if(!dw || dw->workItemsCount >= DELAYEDWORKSIZE) {
  298. UA_DelayedWork *newwork = UA_malloc(sizeof(UA_DelayedWork));
  299. newwork->workItems = UA_malloc(sizeof(UA_WorkItem)*DELAYEDWORKSIZE);
  300. newwork->workItemsCount = 0;
  301. newwork->workerCounters = UA_NULL;
  302. newwork->next = server->delayedWork;
  303. // dispatch a method that sets the counter
  304. if(dw && dw->workItemsCount >= DELAYEDWORKSIZE) {
  305. UA_WorkItem *setCounter = UA_malloc(sizeof(UA_WorkItem));
  306. *setCounter = (UA_WorkItem)
  307. {.type = UA_WORKITEMTYPE_METHODCALL,
  308. .work.methodCall = {.method = (void (*)(UA_Server*, void*))getCounters, .data = dw}};
  309. dispatchWork(server, 1, setCounter);
  310. }
  311. server->delayedWork = newwork;
  312. dw = newwork;
  313. }
  314. dw->workItems[dw->workItemsCount] = work;
  315. dw->workItemsCount++;
  316. }
  317. static void processDelayedWork(UA_Server *server) {
  318. UA_DelayedWork *dw = server->delayedWork;
  319. while(dw) {
  320. processWork(server, dw->workItems, dw->workItemsCount);
  321. UA_DelayedWork *next = dw->next;
  322. UA_free(dw->workerCounters);
  323. UA_free(dw->workItems);
  324. UA_free(dw);
  325. dw = next;
  326. }
  327. }
  328. // Execute this every N seconds (repeated work) to execute delayed work that is ready
  329. static void dispatchDelayedWork(UA_Server *server, void *data /* not used, but needed for the signature*/) {
  330. UA_DelayedWork *dw = UA_NULL;
  331. UA_DelayedWork *readydw = UA_NULL;
  332. UA_DelayedWork *beforedw = server->delayedWork;
  333. // start at the second...
  334. if(beforedw)
  335. dw = beforedw->next;
  336. // find the first delayedwork where the counters are set and have been moved
  337. while(dw) {
  338. if(!dw->workerCounters) {
  339. beforedw = dw;
  340. dw = dw->next;
  341. continue;
  342. }
  343. UA_Boolean countersMoved = UA_TRUE;
  344. for(UA_UInt16 i=0;i<server->nThreads;i++) {
  345. if(*server->workerCounters[i] == dw->workerCounters[i])
  346. countersMoved = UA_FALSE;
  347. break;
  348. }
  349. if(countersMoved) {
  350. readydw = uatomic_xchg(&beforedw->next, UA_NULL);
  351. break;
  352. } else {
  353. beforedw = dw;
  354. dw = dw->next;
  355. }
  356. }
  357. // we have a ready entry. all afterwards are also ready
  358. while(readydw) {
  359. dispatchWork(server, readydw->workItemsCount, readydw->workItems);
  360. beforedw = readydw;
  361. readydw = readydw->next;
  362. UA_free(beforedw->workerCounters);
  363. UA_free(beforedw);
  364. }
  365. }
  366. #endif
  367. /********************/
  368. /* Main Server Loop */
  369. /********************/
  370. UA_StatusCode UA_Server_run(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running) {
  371. #ifdef UA_MULTITHREADING
  372. // 1) Prepare the threads
  373. server->running = running; // the threads need to access the variable
  374. server->nThreads = nThreads;
  375. pthread_cond_init(&server->dispatchQueue_condition, 0);
  376. pthread_t *thr = UA_malloc(nThreads * sizeof(pthread_t));
  377. server->workerCounters = UA_malloc(nThreads * sizeof(UA_UInt32 *));
  378. for(UA_UInt32 i=0;i<nThreads;i++) {
  379. struct workerStartData *startData = UA_malloc(sizeof(struct workerStartData));
  380. startData->server = server;
  381. startData->workerCounter = &server->workerCounters[i];
  382. pthread_create(&thr[i], UA_NULL, (void* (*)(void*))workerLoop, startData);
  383. }
  384. UA_WorkItem processDelayed = {.type = UA_WORKITEMTYPE_METHODCALL,
  385. .work.methodCall = {.method = dispatchDelayedWork,
  386. .data = UA_NULL} };
  387. UA_Server_addRepeatedWorkItem(server, &processDelayed, 10000000, UA_NULL);
  388. #endif
  389. // 2a) Start the networklayers
  390. for(UA_Int32 i=0;i<server->nlsSize;i++)
  391. server->nls[i].start(server->nls[i].nlHandle, &server->logger);
  392. // 2b) Init server's meta-information
  393. //fill startTime
  394. server->startTime = UA_DateTime_now();
  395. //fill build date
  396. {
  397. static struct tm ct;
  398. ct.tm_year = (__DATE__[7] - '0') * 1000 + (__DATE__[8] - '0') * 100 + (__DATE__[9] - '0') * 10 + (__DATE__[10] - '0')- 1900;
  399. if (0) ;
  400. else if ((__DATE__[0]=='J') && (__DATE__[1]=='a') && (__DATE__[2]=='n')) ct.tm_mon = 1-1;
  401. else if ((__DATE__[0]=='F') && (__DATE__[1]=='e') && (__DATE__[2]=='b')) ct.tm_mon = 2-1;
  402. else if ((__DATE__[0]=='M') && (__DATE__[1]=='a') && (__DATE__[2]=='r')) ct.tm_mon = 3-1;
  403. else if ((__DATE__[0]=='A') && (__DATE__[1]=='p') && (__DATE__[2]=='r')) ct.tm_mon = 4-1;
  404. else if ((__DATE__[0]=='M') && (__DATE__[1]=='a') && (__DATE__[2]=='y')) ct.tm_mon = 5-1;
  405. else if ((__DATE__[0]=='J') && (__DATE__[1]=='u') && (__DATE__[2]=='n')) ct.tm_mon = 6-1;
  406. else if ((__DATE__[0]=='J') && (__DATE__[1]=='u') && (__DATE__[2]=='l')) ct.tm_mon = 7-1;
  407. else if ((__DATE__[0]=='A') && (__DATE__[1]=='u') && (__DATE__[2]=='g')) ct.tm_mon = 8-1;
  408. else if ((__DATE__[0]=='S') && (__DATE__[1]=='e') && (__DATE__[2]=='p')) ct.tm_mon = 9-1;
  409. else if ((__DATE__[0]=='O') && (__DATE__[1]=='c') && (__DATE__[2]=='t')) ct.tm_mon = 10-1;
  410. else if ((__DATE__[0]=='N') && (__DATE__[1]=='o') && (__DATE__[2]=='v')) ct.tm_mon = 11-1;
  411. else if ((__DATE__[0]=='D') && (__DATE__[1]=='e') && (__DATE__[2]=='c')) ct.tm_mon = 12-1;
  412. // special case to handle __DATE__ not inserting leading zero on day of month
  413. // if Day of month is less than 10 - it inserts a blank character
  414. // this results in a negative number for tm_mday
  415. if(__DATE__[4] == ' ')
  416. {
  417. ct.tm_mday = __DATE__[5]-'0';
  418. }
  419. else
  420. {
  421. ct.tm_mday = (__DATE__[4]-'0')*10 + (__DATE__[5]-'0');
  422. }
  423. ct.tm_hour = ((__TIME__[0] - '0') * 10 + __TIME__[1] - '0');
  424. ct.tm_min = ((__TIME__[3] - '0') * 10 + __TIME__[4] - '0');
  425. ct.tm_sec = ((__TIME__[6] - '0') * 10 + __TIME__[7] - '0');
  426. ct.tm_isdst = -1; // information is not available.
  427. //FIXME: next 3 lines are copy-pasted from ua_types.c
  428. #define UNIX_EPOCH_BIAS_SEC 11644473600LL // Number of seconds from 1 Jan. 1601 00:00 to 1 Jan 1970 00:00 UTC
  429. #define HUNDRED_NANOSEC_PER_USEC 10LL
  430. #define HUNDRED_NANOSEC_PER_SEC (HUNDRED_NANOSEC_PER_USEC * 1000000LL)
  431. server->buildDate = (mktime(&ct) + UNIX_EPOCH_BIAS_SEC) * HUNDRED_NANOSEC_PER_SEC;
  432. }
  433. //3) The loop
  434. while(1) {
  435. // 3.1) Process timed work
  436. UA_UInt16 timeout = processTimedWork(server);
  437. // 3.2) Get work from the networklayer and dispatch it
  438. for(UA_Int32 i=0;i<server->nlsSize;i++) {
  439. UA_ServerNetworkLayer *nl = &server->nls[i];
  440. UA_WorkItem *work;
  441. UA_Int32 workSize;
  442. if(*running) {
  443. if(i == server->nlsSize-1)
  444. workSize = nl->getWork(nl->nlHandle, &work, timeout);
  445. else
  446. workSize = nl->getWork(nl->nlHandle, &work, 0);
  447. } else {
  448. workSize = server->nls[i].stop(nl->nlHandle, &work);
  449. }
  450. #ifdef UA_MULTITHREADING
  451. // Filter out delayed work
  452. for(UA_Int32 k=0;k<workSize;k++) {
  453. if(work[k].type != UA_WORKITEMTYPE_DELAYEDMETHODCALL)
  454. continue;
  455. addDelayedWork(server, work[k]);
  456. work[k].type = UA_WORKITEMTYPE_NOTHING;
  457. }
  458. dispatchWork(server, workSize, work);
  459. if(workSize > 0)
  460. pthread_cond_broadcast(&server->dispatchQueue_condition);
  461. #else
  462. processWork(server, work, workSize);
  463. if(workSize > 0)
  464. UA_free(work);
  465. #endif
  466. }
  467. // 3.3) Exit?
  468. if(!*running)
  469. break;
  470. }
  471. #ifdef UA_MULTITHREADING
  472. // 4) Clean up: Wait until all worker threads finish, then empty the
  473. // dispatch queue, then process the remaining delayed work
  474. for(UA_UInt32 i=0;i<nThreads;i++) {
  475. pthread_join(thr[i], UA_NULL);
  476. UA_free(server->workerCounters[i]);
  477. }
  478. UA_free(server->workerCounters);
  479. UA_free(thr);
  480. emptyDispatchQueue(server);
  481. processDelayedWork(server);
  482. #endif
  483. return UA_STATUSCODE_GOOD;
  484. }