浏览代码

Merge branch 'master' into dev

Conflicts:
	src/server/ua_server_worker.c
Julius Pfrommer 9 年之前
父节点
当前提交
f7b9c622f3

+ 52 - 40
examples/networklayer_tcp.c

@@ -323,8 +323,8 @@ static void removeMappings(ServerNetworkLayerTCP *layer, struct DeleteList *d) {
     }
 }
 
-static UA_Int32 ServerNetworkLayerTCP_getWork(ServerNetworkLayerTCP *layer, UA_WorkItem **workItems,
-                                              UA_UInt16 timeout) {
+static UA_Int32 ServerNetworkLayerTCP_getJobs(ServerNetworkLayerTCP *layer, UA_Job **jobs, UA_UInt16 timeout) {
+    /* remove the deleted sockets from the array */
     struct DeleteList *deletes;
 #ifdef UA_MULTITHREADING
         deletes = uatomic_xchg(&layer->deletes, NULL);
@@ -333,27 +333,10 @@ static UA_Int32 ServerNetworkLayerTCP_getWork(ServerNetworkLayerTCP *layer, UA_W
         layer->deletes = NULL;
 #endif
     removeMappings(layer, deletes);
+
     setFDSet(layer);
     struct timeval tmptv = {0, timeout};
     UA_Int32 resultsize = select(layer->highestfd+1, &layer->fdset, NULL, NULL, &tmptv);
-    UA_WorkItem *items;
-    if(resultsize < 0 || !(items = malloc(sizeof(UA_WorkItem)*(resultsize+1)))) {
-        /* abort .. reattach the deletes so that they get deleted eventually.. */
-#ifdef UA_MULTITHREADING
-        struct DeleteList *last_delete = deletes;
-        while(last_delete->next != NULL)
-            last_delete = last_delete->next;
-        while(1) {
-            last_delete->next = layer->deletes;
-            if(uatomic_cmpxchg(&layer->deletes, last_delete->next, deletes) == last_delete->next)
-                break;
-        }
-#else
-        layer->deletes = deletes;
-#endif
-        *workItems = NULL;
-        return 0;
-    }
 
     /* accept new connections (can only be a single one) */
     if(FD_ISSET(layer->serversockfd, &layer->fdset)) {
@@ -368,7 +351,36 @@ static UA_Int32 ServerNetworkLayerTCP_getWork(ServerNetworkLayerTCP *layer, UA_W
             ServerNetworkLayerTCP_add(layer, newsockfd);
         }
     }
-    
+
+    if(!deletes && resultsize <= 0) {
+        *jobs = NULL;
+        return 0;
+    }
+    if(resultsize < 0)
+        resultsize = 0;
+    UA_Int32 deletesJob = 0;
+    if(deletes)
+        deletesJob = 1;
+        
+    UA_Job *items = malloc(sizeof(UA_Job) * (resultsize + deletesJob));
+    if(deletes && !items) {
+        /* abort. reattach the deletes so that they get deleted eventually. */
+#ifdef UA_MULTITHREADING
+        struct DeleteList *last_delete;
+        while(deletes) {
+            last_delete = deletes;
+            deletes = deletes->next;
+        }
+        while(1) {
+            last_delete->next = layer->deletes;
+            if(uatomic_cmpxchg(&layer->deletes, last_delete->next, deletes) == last_delete->next)
+                break;
+        }
+#else
+        layer->deletes = deletes;
+#endif
+    }
+
     /* read from established sockets */
     UA_Int32 j = 0;
     UA_ByteString buf = UA_BYTESTRING_NULL;
@@ -382,38 +394,38 @@ static UA_Int32 ServerNetworkLayerTCP_getWork(ServerNetworkLayerTCP *layer, UA_W
                 break;
         }
         if(socket_recv(layer->mappings[i].connection, &buf, 0) == UA_STATUSCODE_GOOD) {
-            items[j].type = UA_WORKITEMTYPE_BINARYMESSAGE;
-            items[j].work.binaryMessage.message = buf;
-            items[j].work.binaryMessage.connection = layer->mappings[i].connection;
+            items[j].type = UA_JOBTYPE_BINARYMESSAGE;
+            items[j].job.binaryMessage.message = buf;
+            items[j].job.binaryMessage.connection = layer->mappings[i].connection;
             buf.data = NULL;
         } else {
-            items[j].type = UA_WORKITEMTYPE_CLOSECONNECTION;
-            items[j].work.closeConnection = layer->mappings[i].connection;
+            items[j].type = UA_JOBTYPE_CLOSECONNECTION;
+            items[j].job.closeConnection = layer->mappings[i].connection;
         }
         j++;
     }
 
-    /* add the delayed work that frees the connections */
+    /* add the delayed job that frees the connections */
     if(deletes) {
-        items[j].type = UA_WORKITEMTYPE_DELAYEDMETHODCALL;
-        items[j].work.methodCall.data = deletes;
-        items[j].work.methodCall.method = (void (*)(UA_Server *server, void *data))freeConnections;
+        items[j].type = UA_JOBTYPE_DELAYEDMETHODCALL;
+        items[j].job.methodCall.data = deletes;
+        items[j].job.methodCall.method = (void (*)(UA_Server *server, void *data))freeConnections;
         j++;
     }
 
     if(buf.data)
         free(buf.data);
 
-    /* free the array if there is no work */
+    /* free the array if there is no job */
     if(j == 0) {
         free(items);
-        *workItems = NULL;
+        *jobs = NULL;
     } else
-        *workItems = items;
+        *jobs = items;
     return j;
 }
 
-static UA_Int32 ServerNetworkLayerTCP_stop(ServerNetworkLayerTCP *layer, UA_WorkItem **workItems) {
+static UA_Int32 ServerNetworkLayerTCP_stop(ServerNetworkLayerTCP *layer, UA_Job **jobs) {
     struct DeleteList *deletes;
 #ifdef UA_MULTITHREADING
         deletes = uatomic_xchg(&layer->deletes, NULL);
@@ -422,17 +434,17 @@ static UA_Int32 ServerNetworkLayerTCP_stop(ServerNetworkLayerTCP *layer, UA_Work
         layer->deletes = NULL;
 #endif
     removeMappings(layer, deletes);
-    UA_WorkItem *items = malloc(sizeof(UA_WorkItem) * layer->mappingsSize);
+    UA_Job *items = malloc(sizeof(UA_Job) * layer->mappingsSize);
     if(!items)
         return 0;
     for(size_t i = 0; i < layer->mappingsSize; i++) {
-        items[i].type = UA_WORKITEMTYPE_CLOSECONNECTION;
-        items[i].work.closeConnection = layer->mappings[i].connection;
+        items[i].type = UA_JOBTYPE_CLOSECONNECTION;
+        items[i].job.closeConnection = layer->mappings[i].connection;
     }
 #ifdef _WIN32
     WSACleanup();
 #endif
-    *workItems = items;
+    *jobs = items;
     return layer->mappingsSize;
 }
 
@@ -472,8 +484,8 @@ UA_ServerNetworkLayer ServerNetworkLayerTCP_new(UA_ConnectionConfig conf, UA_UIn
 
     nl.nlHandle = layer;
     nl.start = (UA_StatusCode (*)(void*, UA_Logger *logger))ServerNetworkLayerTCP_start;
-    nl.getWork = (UA_Int32 (*)(void*, UA_WorkItem**, UA_UInt16))ServerNetworkLayerTCP_getWork;
-    nl.stop = (UA_Int32 (*)(void*, UA_WorkItem**))ServerNetworkLayerTCP_stop;
+    nl.getJobs = (UA_Int32 (*)(void*, UA_Job**, UA_UInt16))ServerNetworkLayerTCP_getJobs;
+    nl.stop = (UA_Int32 (*)(void*, UA_Job**))ServerNetworkLayerTCP_stop;
     nl.free = (void (*)(void*))ServerNetworkLayerTCP_delete;
     nl.discoveryUrl = &layer->discoveryUrl;
     return nl;

+ 5 - 1
examples/server.c

@@ -8,7 +8,11 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <unistd.h> //access
+#ifdef _MSC_VER
+    #include <io.h> //access
+#else
+    #include <unistd.h> //access
+#endif
 
 #define __USE_XOPEN2K
 #ifdef UA_MULTITHREADING

+ 3 - 3
examples/server_simple.c

@@ -68,9 +68,9 @@ int main(int argc, char** argv) {
     UA_ByteString_deleteMembers(&certificate);
     UA_Server_addNetworkLayer(server, ServerNetworkLayerTCP_new(UA_ConnectionConfig_standard, 16664));
 
-    UA_WorkItem work = {.type = UA_WORKITEMTYPE_METHODCALL,
-                        .work.methodCall = {.method = testCallback, .data = NULL} };
-    UA_Server_addRepeatedWorkItem(server, &work, 2000, NULL); // call every 2 sec
+    UA_Job job = {.type = UA_JOBTYPE_METHODCALL,
+                  .job.methodCall = {.method = testCallback, .data = NULL} };
+    UA_Server_addRepeatedJob(server, job, 2000, NULL); // call every 2 sec
 
 	// add a variable node to the adresspace
     UA_Variant *myIntegerVariant = UA_Variant_new();

+ 55 - 76
include/ua_server.h

@@ -58,43 +58,35 @@ void UA_EXPORT UA_Server_setLogger(UA_Server *server, UA_Logger logger);
 UA_Logger UA_EXPORT UA_Server_getLogger(UA_Server *server);
 
 /**
- * Runs the main loop of the server. In each iteration, this calls into the
- * networklayers to see if work have arrived and checks if timed events need to
- * be triggered.
+ * Runs the main loop of the server. In each iteration, this calls into the networklayers to see if
+ * jobs have arrived and checks if repeated jobs need to be triggered.
  *
  * @param server The server object
- * @param nThreads The number of worker threads. Is ignored if MULTITHREADING is
- * not activated.
- * @param running Points to a booloean value on the heap. When running is set to
- * false, the worker threads and the main loop close and the server is shut
- * down.
+ *
+ * @param nThreads The number of worker threads. Is ignored if MULTITHREADING is not activated.
+ *
+ * @param running Points to a booloean value on the heap. When running is set to false, the worker
+ * threads and the main loop close and the server is shut down.
+ *
  * @return Indicates whether the server shut down cleanly
  *
  */
 UA_StatusCode UA_EXPORT UA_Server_run(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running);
 
-/**
- * The prologue part of UA_Server_run (no need to use if you call UA_Server_run)
- */
+/* The prologue part of UA_Server_run (no need to use if you call UA_Server_run) */
 UA_StatusCode UA_EXPORT UA_Server_run_startup(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running);
-/**
- * The epilogue part of UA_Server_run (no need to use if you call UA_Server_run)
- */
+/* The epilogue part of UA_Server_run (no need to use if you call UA_Server_run) */
 UA_StatusCode UA_EXPORT UA_Server_run_shutdown(UA_Server *server, UA_UInt16 nThreads);
-/**
- * One iteration of UA_Server_run (no need to use if you call UA_Server_run)
- */
-UA_StatusCode UA_EXPORT UA_Server_run_getAndProcessWork(UA_Server *server, UA_Boolean *running);
-
+/* One iteration of UA_Server_run (no need to use if you call UA_Server_run) */
+UA_StatusCode UA_EXPORT UA_Server_run_mainloop(UA_Server *server, UA_Boolean *running);
 
 /**
  * Datasources are the interface to local data providers. Implementors of datasources need to
  * provide functions for the callbacks in this structure. After every read, the handle needs to be
  * released to indicate that the pointer is no longer accessed. As a rule, datasources are never
  * copied, but only their content. The only way to write into a datasource is via the write-service.
- *
- * It is expected that the read and release callbacks are implemented. The write
- * callback can be set to null.
+ * It is expected that the read and release callbacks are implemented. The write callback can be set
+ * to null.
  */
 typedef struct {
     void *handle; ///> A custom pointer to reuse the same datasource functions for multiple sources
@@ -156,14 +148,14 @@ UA_Server_addDataSourceVariableNode(UA_Server *server, UA_DataSource dataSource,
                                     const UA_QualifiedName browseName, UA_NodeId nodeId,
                                     const UA_NodeId parentNodeId, const UA_NodeId referenceTypeId);
 
-/** Work that is run in the main loop (singlethreaded) or dispatched to a worker thread */
-typedef struct UA_WorkItem {
+/** Jobs describe work that is ececuted once or repeatedly. */
+typedef struct {
     enum {
-        UA_WORKITEMTYPE_NOTHING,
-        UA_WORKITEMTYPE_CLOSECONNECTION,
-        UA_WORKITEMTYPE_BINARYMESSAGE,
-        UA_WORKITEMTYPE_METHODCALL,
-        UA_WORKITEMTYPE_DELAYEDMETHODCALL,
+        UA_JOBTYPE_NOTHING,
+        UA_JOBTYPE_CLOSECONNECTION,
+        UA_JOBTYPE_BINARYMESSAGE,
+        UA_JOBTYPE_METHODCALL,
+        UA_JOBTYPE_DELAYEDMETHODCALL,
     } type;
     union {
         UA_Connection *closeConnection;
@@ -175,51 +167,38 @@ typedef struct UA_WorkItem {
             void *data;
             void (*method)(UA_Server *server, void *data);
         } methodCall;
-    } work;
-} UA_WorkItem;
+    } job;
+} UA_Job;
 
 /**
  * @param server The server object.
  *
- * @param work Pointer to the WorkItem that shall be added. The pointer is not
- *        freed but copied to an internal representation.
+ * @param job Pointer to the job that shall be added. The pointer is not freed but copied to an
+ *        internal representation.
  *
- * @param executionTime The time when the work shall be executed. If the time lies in the
- *        past, the work will be executed in the next iteration of the server's
- *        main loop
+ * @param interval The job shall be repeatedly executed with the given interval (in ms). The
+ *        interval must be larger than 5ms. The first execution occurs at now() + interval at the
+ *        latest.
  *
- * @param resultWorkGuid Upon success, the pointed value is set to the guid of
- *        the workitem in the server queue. This can be used to cancel the work
- *        later on. If the pointer is null, the guid is not set.
+ * @param jobId Set to the guid of the repeated job. This can be used to cancel the job later on. If
+ *        the pointer is null, the guid is not set.
  *
  * @return Upon sucess, UA_STATUSCODE_GOOD is returned. An error code otherwise.
  */
-UA_StatusCode UA_EXPORT
-UA_Server_addTimedWorkItem(UA_Server *server, const UA_WorkItem *work,
-                           UA_DateTime executionTime, UA_Guid *resultWorkGuid);
+UA_StatusCode UA_EXPORT UA_Server_addRepeatedJob(UA_Server *server, UA_Job job, UA_UInt32 interval,
+                                                 UA_Guid *jobId);
 
 /**
- * @param server The server object.
+ * Remove repeated job. The entry will be removed asynchronously during the
+ * next iteration of the server main loop.
  *
- * @param work Pointer to the WorkItem that shall be added. The pointer is not
- *        freed but copied to an internal representation.
- *
- * @param interval The work that is executed repeatedly with the given interval
- *        (in ms). If work with the same repetition interval already exists,
- *        the first execution might occur sooner.
+ * @param server The server object.
  *
- * @param resultWorkGuid Upon success, the pointed value is set to the guid of
- *        the workitem in the server queue. This can be used to cancel the work
- *        later on. If the pointer is null, the guid is not set.
+ * @param jobId The id of the job that shall be removed.
  *
  * @return Upon sucess, UA_STATUSCODE_GOOD is returned. An error code otherwise.
  */
-UA_StatusCode UA_EXPORT
-UA_Server_addRepeatedWorkItem(UA_Server *server, const UA_WorkItem *work,
-                              UA_UInt32 interval, UA_Guid *resultWorkGuid);
-
-/** Remove timed or repeated work */
-/* UA_Boolean UA_EXPORT UA_Server_removeWorkItem(UA_Server *server, UA_Guid workId); */
+UA_StatusCode UA_EXPORT UA_Server_removeRepeatedJob(UA_Server *server, UA_Guid jobId);
 
 /**
  * Interface to the binary network layers. This structure is returned from the
@@ -239,29 +218,29 @@ typedef struct {
     UA_StatusCode (*start)(void *nlHandle, UA_Logger *logger);
     
     /**
-     * Gets called from the main server loop and returns the work that
-     * accumulated (messages and close events) for dispatch. The networklayer
-     * does not wait on connections but returns immediately the work that
-     * accumulated.
+     * Gets called from the main server loop and returns the jobs (accumulated messages and close
+     * events) for dispatch.
+     *
+     * @param jobs When the returned integer is positive, *jobs points to an array of UA_Job of the
+     * returned size.
      *
-     * @param workItems When the returned integer is positive, *workItems points
-     * to an array of WorkItems of the returned size.
      * @param timeout The timeout during which an event must arrive in microseconds
-     * @return The size of the returned workItems array. If the result is
-     * negative, an error has occured.
+     
+     * @return The size of the jobs array. If the result is negative, an error has
+     * occured.
      */
-    UA_Int32 (*getWork)(void *nlhandle, UA_WorkItem **workItems, UA_UInt16 timeout);
+    UA_Int32 (*getJobs)(void *nlhandle, UA_Job **jobs, UA_UInt16 timeout);
 
     /**
-     * Closes the network connection and returns all the work that needs to
-     * be finished before the network layer can be safely deleted.
+     * Closes the network connection and returns all the jobs that need to be finished before the
+     * network layer can be safely deleted.
      *
-     * @param workItems When the returned integer is positive, *workItems points
-     * to an array of WorkItems of the returned size.
-     * @return The size of the returned workItems array. If the result is
-     * negative, an error has occured.
+     * @param jobs When the returned integer is positive, jobs points to an array of UA_Job of the
+     * returned size.
+     *
+     * @return The size of the jobs array. If the result is negative, an error has occured.
      */
-    UA_Int32 (*stop)(void *nlhandle, UA_WorkItem **workItems);
+    UA_Int32 (*stop)(void *nlhandle, UA_Job **jobs);
 
     /** Deletes the network layer. Call only after a successful shutdown. */
     void (*free)(void *nlhandle);
@@ -354,9 +333,9 @@ typedef struct UA_ExternalNodeStore {
 	UA_ExternalNodeStore_delete destroy;
 } UA_ExternalNodeStore;
 
-/* UA_StatusCode UA_EXPORT */
-/* UA_Server_addExternalNamespace(UA_Server *server, UA_UInt16 namespaceIndex, const UA_String *url, UA_ExternalNodeStore *nodeStore); */
 
+UA_StatusCode UA_EXPORT
+UA_Server_addExternalNamespace(UA_Server *server, UA_UInt16 namespaceIndex, const UA_String *url, UA_ExternalNodeStore *nodeStore);
 /** @} */
 
 #endif /* external nodestore */

+ 54 - 28
src/server/ua_server.c

@@ -19,6 +19,7 @@ const UA_EXPORT UA_ServerConfig UA_ServerConfig_standard = {
 /* Namespace Handling */
 /**********************/
 
+#ifdef UA_EXTERNAL_NAMESPACES
 static void UA_ExternalNamespace_init(UA_ExternalNamespace *ens) {
     ens->index = 0;
     UA_String_init(&ens->url);
@@ -29,6 +30,32 @@ static void UA_ExternalNamespace_deleteMembers(UA_ExternalNamespace *ens) {
     ens->externalNodeStore.destroy(ens->externalNodeStore.ensHandle);
 }
 
+UA_StatusCode UA_EXPORT UA_Server_addExternalNamespace(UA_Server *server, UA_UInt16 namespaceIndex,
+                                                       const UA_String *url, UA_ExternalNodeStore *nodeStore) {
+	if(nodeStore == UA_NULL)
+		return UA_STATUSCODE_BADARGUMENTSMISSING;
+
+	//do not allow double indices
+	for(UA_UInt32 i = 0; i < server->externalNamespacesSize; i++){
+		if(server->externalNamespaces[i].index == namespaceIndex)
+			return UA_STATUSCODE_BADINDEXRANGEINVALID;
+	}
+    server->externalNamespaces = UA_realloc(server->externalNamespaces, sizeof(UA_ExternalNamespace) * (server->externalNamespacesSize+1));
+    server->externalNamespaces[server->externalNamespacesSize].externalNodeStore = *nodeStore;
+    server->externalNamespaces[server->externalNamespacesSize].index = namespaceIndex;
+    UA_String_copy(url,&server->externalNamespaces[server->externalNamespacesSize].url);
+    server->externalNamespacesSize++;
+    return UA_STATUSCODE_GOOD;
+}
+#endif /* UA_EXTERNAL_NAMESPACES*/
+
+UA_UInt16 UA_Server_addNamespace(UA_Server *server, const char* name) {
+    server->namespaces = UA_realloc(server->namespaces, sizeof(UA_String) * (server->namespacesSize+1));
+    server->namespaces[server->namespacesSize] = UA_STRING_ALLOC(name);
+    server->namespacesSize++;
+    return server->namespacesSize-1;
+}
+
 /*****************/
 /* Configuration */
 /*****************/
@@ -38,8 +65,8 @@ UA_Logger UA_Server_getLogger(UA_Server *server) {
 }
 
 void UA_Server_addNetworkLayer(UA_Server *server, UA_ServerNetworkLayer networkLayer) {
-    UA_ServerNetworkLayer *newlayers =
-        UA_realloc(server->networkLayers, sizeof(UA_ServerNetworkLayer)*(server->networkLayersSize+1));
+    UA_ServerNetworkLayer *newlayers = UA_realloc(server->networkLayers,
+                                                  sizeof(UA_ServerNetworkLayer)*(server->networkLayersSize+1));
     if(!newlayers) {
         UA_LOG_ERROR(server->logger, UA_LOGCATEGORY_SERVER, "Networklayer added");
         return;
@@ -48,7 +75,7 @@ void UA_Server_addNetworkLayer(UA_Server *server, UA_ServerNetworkLayer networkL
     server->networkLayers[server->networkLayersSize] = networkLayer;
     server->networkLayersSize++;
 
-    if(networkLayer.discoveryUrl){
+    if(networkLayer.discoveryUrl) {
         if(server->description.discoveryUrlsSize < 0)
             server->description.discoveryUrlsSize = 0;
         UA_String* newUrls = UA_realloc(server->description.discoveryUrls,
@@ -76,12 +103,6 @@ void UA_Server_setLogger(UA_Server *server, UA_Logger logger) {
     server->logger = logger;
 }
 
-UA_UInt16 UA_Server_addNamespace(UA_Server *server, const char* name) {
-    server->namespaces = UA_realloc(server->namespaces, sizeof(UA_String) * (server->namespacesSize+1));
-    server->namespaces[server->namespacesSize] = UA_STRING_ALLOC(name);
-    server->namespacesSize++;
-    return server->namespacesSize-1;
-}
 
 /**********/
 /* Server */
@@ -90,7 +111,7 @@ UA_UInt16 UA_Server_addNamespace(UA_Server *server, const char* name) {
 /* The server needs to be stopped before it can be deleted */
 void UA_Server_delete(UA_Server *server) {
     // Delete the timed work
-    UA_Server_deleteTimedWork(server);
+    UA_Server_deleteAllRepeatedJobs(server);
 
     // Delete all internal data
     UA_ApplicationDescription_deleteMembers(&server->description);
@@ -111,6 +132,7 @@ void UA_Server_delete(UA_Server *server) {
 #ifdef UA_MULTITHREADING
     pthread_cond_destroy(&server->dispatchQueue_condition); // so the workers don't spin if the queue is empty
     rcu_barrier(); // wait for all scheduled call_rcu work to complete
+   	rcu_unregister_thread();
 #endif
     UA_free(server);
 }
@@ -283,11 +305,13 @@ UA_Server * UA_Server_new(UA_ServerConfig config) {
     //FIXME: config contains strings, for now its okay, but consider copying them aswell
     server->config = config;
 
-    LIST_INIT(&server->timedWork);
+    LIST_INIT(&server->repeatedJobs);
 #ifdef UA_MULTITHREADING
     rcu_init();
+   	rcu_register_thread();
     cds_wfcq_init(&server->dispatchQueue_head, &server->dispatchQueue_tail);
-    server->delayedWork = UA_NULL;
+    cds_lfs_init(&server->mainLoopJobs);
+    server->delayedJobs = UA_NULL;
 #endif
 
     // logger
@@ -378,9 +402,9 @@ UA_Server * UA_Server_new(UA_ServerConfig config) {
 
     server->nodestore = UA_NodeStore_new();
 
-    UA_WorkItem cleanup = {.type = UA_WORKITEMTYPE_METHODCALL,
-                           .work.methodCall = {.method = UA_Server_cleanup, .data = NULL} };
-    UA_Server_addRepeatedWorkItem(server, &cleanup, 10000, NULL);
+    UA_Job cleanup = {.type = UA_JOBTYPE_METHODCALL,
+                      .job.methodCall = {.method = UA_Server_cleanup, .data = NULL} };
+    UA_Server_addRepeatedJob(server, cleanup, 10000, NULL);
 
     /**********************/
     /* Server Information */
@@ -388,19 +412,21 @@ UA_Server * UA_Server_new(UA_ServerConfig config) {
 
     server->startTime = UA_DateTime_now();
     static struct tm ct;
-    ct.tm_year = (__DATE__[7] - '0') * 1000 +  (__DATE__[8] - '0') * 100 + (__DATE__[9] - '0') * 10 + (__DATE__[10] - '0')- 1900;
-    if ((__DATE__[0]=='J') && (__DATE__[1]=='a') && (__DATE__[2]=='n')) ct.tm_mon = 1-1;
-    else if ((__DATE__[0]=='F') && (__DATE__[1]=='e') && (__DATE__[2]=='b')) ct.tm_mon = 2-1;
-    else if ((__DATE__[0]=='M') && (__DATE__[1]=='a') && (__DATE__[2]=='r')) ct.tm_mon = 3-1;
-    else if ((__DATE__[0]=='A') && (__DATE__[1]=='p') && (__DATE__[2]=='r')) ct.tm_mon = 4-1;
-    else if ((__DATE__[0]=='M') && (__DATE__[1]=='a') && (__DATE__[2]=='y')) ct.tm_mon = 5-1;
-    else if ((__DATE__[0]=='J') && (__DATE__[1]=='u') && (__DATE__[2]=='n')) ct.tm_mon = 6-1;
-    else if ((__DATE__[0]=='J') && (__DATE__[1]=='u') && (__DATE__[2]=='l')) ct.tm_mon = 7-1;
-    else if ((__DATE__[0]=='A') && (__DATE__[1]=='u') && (__DATE__[2]=='g')) ct.tm_mon = 8-1;
-    else if ((__DATE__[0]=='S') && (__DATE__[1]=='e') && (__DATE__[2]=='p')) ct.tm_mon = 9-1;
-    else if ((__DATE__[0]=='O') && (__DATE__[1]=='c') && (__DATE__[2]=='t')) ct.tm_mon = 10-1;
-    else if ((__DATE__[0]=='N') && (__DATE__[1]=='o') && (__DATE__[2]=='v')) ct.tm_mon = 11-1;
-    else if ((__DATE__[0]=='D') && (__DATE__[1]=='e') && (__DATE__[2]=='c')) ct.tm_mon = 12-1;
+    ct.tm_year = (__DATE__[7] - '0') * 1000 +  (__DATE__[8] - '0') * 100 +
+        (__DATE__[9] - '0') * 10 + (__DATE__[10] - '0')- 1900;
+
+    if(__DATE__[0]=='J' && __DATE__[1]=='a' && __DATE__[2]=='n') ct.tm_mon = 1-1;
+    else if(__DATE__[0]=='F' && __DATE__[1]=='e' && __DATE__[2]=='b') ct.tm_mon = 2-1;
+    else if(__DATE__[0]=='M' && __DATE__[1]=='a' && __DATE__[2]=='r') ct.tm_mon = 3-1;
+    else if(__DATE__[0]=='A' && __DATE__[1]=='p' && __DATE__[2]=='r') ct.tm_mon = 4-1;
+    else if(__DATE__[0]=='M' && __DATE__[1]=='a' && __DATE__[2]=='y') ct.tm_mon = 5-1;
+    else if(__DATE__[0]=='J' && __DATE__[1]=='u' && __DATE__[2]=='n') ct.tm_mon = 6-1;
+    else if(__DATE__[0]=='J' && __DATE__[1]=='u' && __DATE__[2]=='l') ct.tm_mon = 7-1;
+    else if(__DATE__[0]=='A' && __DATE__[1]=='u' && __DATE__[2]=='g') ct.tm_mon = 8-1;
+    else if(__DATE__[0]=='S' && __DATE__[1]=='e' && __DATE__[2]=='p') ct.tm_mon = 9-1;
+    else if(__DATE__[0]=='O' && __DATE__[1]=='c' && __DATE__[2]=='t') ct.tm_mon = 10-1;
+    else if(__DATE__[0]=='N' && __DATE__[1]=='o' && __DATE__[2]=='v') ct.tm_mon = 11-1;
+    else if(__DATE__[0]=='D' && __DATE__[1]=='e' && __DATE__[2]=='c') ct.tm_mon = 12-1;
 
     // special case to handle __DATE__ not inserting leading zero on day of month
     // if Day of month is less than 10 - it inserts a blank character

+ 14 - 12
src/server/ua_server_internal.h

@@ -23,11 +23,6 @@ typedef struct UA_ExternalNamespace {
 	UA_ExternalNodeStore externalNodeStore;
 } UA_ExternalNamespace;
 
-// forward declarations
-struct TimedWork;
-
-struct DelayedWork;
-
 struct UA_Server {
     /* Config */
     UA_ServerConfig config;
@@ -57,20 +52,25 @@ struct UA_Server {
     size_t externalNamespacesSize;
     UA_ExternalNamespace *externalNamespaces;
 
-    /* Workload Management */
-    LIST_HEAD(TimedWorkList, TimedWork) timedWork;
+    /* Jobs with a repetition interval */
+    LIST_HEAD(RepeatedJobsList, RepeatedJobs) repeatedJobs;
 
 #ifdef UA_MULTITHREADING
+    /* Dispatch queue head for the worker threads (the tail should not be in the same cache line) */
+	struct cds_wfcq_head dispatchQueue_head;
+
     UA_Boolean *running;
     UA_UInt16 nThreads;
     UA_UInt32 **workerCounters;
     pthread_t *thr;
-    struct DelayedWork *delayedWork;
 
-    // worker threads wait on the queue
-	struct cds_wfcq_head dispatchQueue_head;
+    struct cds_lfs_stack mainLoopJobs; /* Work that shall be executed only in the main loop and not
+                                          by worker threads */
+    struct DelayedJobs *delayedJobs;
+
+    pthread_cond_t dispatchQueue_condition; /* so the workers don't spin if the queue is empty */
+    /* Dispatch queue tail for the worker threads */
 	struct cds_wfcq_tail dispatchQueue_tail;
-    pthread_cond_t dispatchQueue_condition; // so the workers don't spin if the queue is empty
 #endif
 };
 
@@ -85,7 +85,9 @@ UA_AddNodesResult UA_Server_addNode(UA_Server *server, UA_Node *node, const UA_E
 
 UA_StatusCode UA_Server_addReferenceWithSession(UA_Server *server, UA_Session *session, const UA_AddReferencesItem *item);
 
-void UA_Server_deleteTimedWork(UA_Server *server);
+UA_StatusCode UA_Server_addDelayedJob(UA_Server *server, UA_Job job);
+
+void UA_Server_deleteAllRepeatedJobs(UA_Server *server);
 
 #define ADDREFERENCE(NODEID, REFTYPE_NODEID, TARGET_EXPNODEID) do {     \
         UA_AddReferencesItem item;                                      \

+ 376 - 297
src/server/ua_server_worker.c

@@ -2,38 +2,51 @@
 #include "ua_server_internal.h"
 
 /**
- * There are three types of work:
+ * There are four types of job execution:
  *
- * 1. Ordinary WorkItems (that are dispatched to worker threads if
- *    multithreading is activated)
- * 2. Timed work that is executed at a precise date (with an optional repetition
- *    interval)
- * 3. Delayed work that is executed at a later time when it is guaranteed that
- *    all previous work has actually finished (only for multithreading)
+ * 1. Normal jobs (dispatched to worker threads if multithreading is activated)
+ *
+ * 2. Repeated jobs with a repetition interval (dispatched to worker threads)
+ *
+ * 3. Mainloop jobs are executed (once) from the mainloop and not in the worker threads. The server
+ * contains a stack structure where all threads can add mainloop jobs for the next mainloop
+ * iteration. This is used e.g. to trigger adding and removing repeated jobs without blocking the
+ * mainloop.
+ *
+ * 4. Delayed jobs are executed once in a worker thread. But only when all normal jobs that were
+ * dispatched earlier have been executed. This is achieved by a counter in the worker threads. We
+ * compute from the counter if all previous jobs have finished. The delay can be very long, since we
+ * try to not interfere too much with normal execution. A use case is to eventually free obsolete
+ * structures that _could_ still be accessed from concurrent threads.
+ *
+ * - Remove the entry from the list
+ * - mark it as "dead" with an atomic operation
+ * - add a delayed job that frees the memory when all concurrent operations have completed
  */
 
 #define MAXTIMEOUT 50000 // max timeout in microsec until the next main loop iteration
-#define BATCHSIZE 20 // max size of worklists that are dispatched to workers
-
-static void processWork(UA_Server *server, UA_WorkItem *work, size_t workSize) {
-    for(size_t i = 0; i < workSize; i++) {
-        UA_WorkItem *item = &work[i];
-        switch(item->type) {
-        case UA_WORKITEMTYPE_BINARYMESSAGE:
-            UA_Server_processBinaryMessage(server, item->work.binaryMessage.connection,
-                                           &item->work.binaryMessage.message);
-            item->work.binaryMessage.connection->releaseBuffer(item->work.binaryMessage.connection,
-                                                               &item->work.binaryMessage.message);
+#define BATCHSIZE 20 // max number of jobs that are dispatched at once to workers
+
+static void processJobs(UA_Server *server, UA_Job *jobs, size_t jobsSize) {
+    for(size_t i = 0; i < jobsSize; i++) {
+        UA_Job *job = &jobs[i];
+        switch(job->type) {
+        case UA_JOBTYPE_BINARYMESSAGE:
+            UA_Server_processBinaryMessage(server, job->job.binaryMessage.connection,
+                                           &job->job.binaryMessage.message);
+            UA_Connection *c = job->job.binaryMessage.connection;
+            c->releaseBuffer(job->job.binaryMessage.connection, &job->job.binaryMessage.message);
             break;
-        case UA_WORKITEMTYPE_CLOSECONNECTION:
-            UA_Connection_detachSecureChannel(item->work.closeConnection);
-            item->work.closeConnection->close(item->work.closeConnection);
+        case UA_JOBTYPE_CLOSECONNECTION:
+            UA_Connection_detachSecureChannel(job->job.closeConnection);
+            job->job.closeConnection->close(job->job.closeConnection);
             break;
-        case UA_WORKITEMTYPE_METHODCALL:
-        case UA_WORKITEMTYPE_DELAYEDMETHODCALL:
-            item->work.methodCall.method(server, item->work.methodCall.data);
+        case UA_JOBTYPE_METHODCALL:
+        case UA_JOBTYPE_DELAYEDMETHODCALL:
+            job->job.methodCall.method(server, job->job.methodCall.data);
             break;
         default:
+            UA_LOG_WARNING(server->logger, UA_LOGCATEGORY_SERVER, "Trying to execute a job of unknown type");
             break;
         }
     }
@@ -45,35 +58,40 @@ static void processWork(UA_Server *server, UA_WorkItem *work, size_t workSize) {
 
 #ifdef UA_MULTITHREADING
 
-/** Entry in the dipatch queue */
-struct workListNode {
+struct MainLoopJob {
+    struct cds_lfs_node node;
+    UA_Job job;
+};
+
+/** Entry in the dispatch queue */
+struct DispatchJobsList {
     struct cds_wfcq_node node; // node for the queue
-    UA_UInt32 workSize;
-    UA_WorkItem *work;
+    size_t jobsSize;
+    UA_Job *jobs;
 };
 
-/** Dispatch work to workers. Slices the work up if it contains more than
-    BATCHSIZE items. The work array is freed by the worker threads. */
-static void dispatchWork(UA_Server *server, UA_Int32 workSize, UA_WorkItem *work) {
-    UA_Int32 startIndex = workSize; // start at the end
-    while(workSize > 0) {
-        UA_Int32 size = BATCHSIZE;
-        if(size > workSize)
-            size = workSize;
+/** Dispatch jobs to workers. Slices the job array up if it contains more than BATCHSIZE items. The jobs
+    array is freed in the worker threads. */
+static void dispatchJobs(UA_Server *server, UA_Job *jobs, size_t jobsSize) {
+    size_t startIndex = jobsSize; // start at the end
+    while(jobsSize > 0) {
+        size_t size = BATCHSIZE;
+        if(size > jobsSize)
+            size = jobsSize;
         startIndex = startIndex - size;
-        struct workListNode *wln = UA_malloc(sizeof(struct workListNode));
+        struct DispatchJobsList *wln = UA_malloc(sizeof(struct DispatchJobsList));
         if(startIndex > 0) {
-            UA_WorkItem *workSlice = UA_malloc(size * sizeof(UA_WorkItem));
-            UA_memcpy(workSlice, &work[startIndex], size * sizeof(UA_WorkItem));
-            *wln = (struct workListNode){.workSize = size, .work = workSlice};
-        }
-        else {
-            // do not alloc, but forward the original array
-            *wln = (struct workListNode){.workSize = size, .work = work};
+            wln->jobs = UA_malloc(size * sizeof(UA_Job));
+            UA_memcpy(wln->jobs, &jobs[startIndex], size * sizeof(UA_Job));
+            wln->jobsSize = size;
+        } else {
+            /* forward the original array */
+            wln->jobsSize = size;
+            wln->jobs = jobs;
         }
         cds_wfcq_node_init(&wln->node);
         cds_wfcq_enqueue(&server->dispatchQueue_head, &server->dispatchQueue_tail, &wln->node);
-        workSize -= size;
+        jobsSize -= size;
     } 
 }
 
@@ -83,13 +101,11 @@ struct workerStartData {
     UA_UInt32 **workerCounter;
 };
 
-/** Waits until work arrives in the dispatch queue (restart after 10ms) and
-    processes it. */
+/** Waits until jobs arrive in the dispatch queue and processes them. */
 static void * workerLoop(struct workerStartData *startInfo) {
    	rcu_register_thread();
     UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32));
     uatomic_set(c, 0);
-
     *startInfo->workerCounter = c;
     UA_Server *server = startInfo->server;
     UA_free(startInfo);
@@ -100,13 +116,14 @@ static void * workerLoop(struct workerStartData *startInfo) {
     struct timespec to;
 
     while(*server->running) {
-        struct workListNode *wln = (struct workListNode*)
+        struct DispatchJobsList *wln = (struct DispatchJobsList*)
             cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
         if(wln) {
-            processWork(server, wln->work, wln->workSize);
-            UA_free(wln->work);
+            processJobs(server, wln->jobs, wln->jobsSize);
+            UA_free(wln->jobs);
             UA_free(wln);
         } else {
+            /* sleep until a work arrives (and wakes up all worker threads) */
             clock_gettime(CLOCK_REALTIME, &to);
             to.tv_sec += 2;
             pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to);
@@ -115,133 +132,156 @@ static void * workerLoop(struct workerStartData *startInfo) {
     }
     pthread_mutex_unlock(&mutex);
     pthread_mutex_destroy(&mutex);
+
+    rcu_barrier(); // wait for all scheduled call_rcu work to complete
    	rcu_unregister_thread();
+
+    /* we need to return _something_ for pthreads */
     return UA_NULL;
 }
 
 static void emptyDispatchQueue(UA_Server *server) {
     while(!cds_wfcq_empty(&server->dispatchQueue_head, &server->dispatchQueue_tail)) {
-        struct workListNode *wln = (struct workListNode*)
+        struct DispatchJobsList *wln = (struct DispatchJobsList*)
             cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail);
-        processWork(server, wln->work, wln->workSize);
-        UA_free(wln->work);
+        processJobs(server, wln->jobs, wln->jobsSize);
+        UA_free(wln->jobs);
         UA_free(wln);
     }
 }
 
 #endif
 
-/**************/
-/* Timed Work */
-/**************/
+/*****************/
+/* Repeated Jobs */
+/*****************/
+
+struct IdentifiedJob {
+    UA_Job job;
+    UA_Guid id;
+};
 
 /**
- * The TimedWork structure contains an array of workitems that are either executed at the same time
- * or in the same repetition inverval. The linked list is sorted, so we can stop traversing when the
- * first element has nextTime > now.
+ * The RepeatedJobs structure contains an array of jobs that are either executed with the same
+ * repetition inverval. The linked list is sorted, so we can stop traversing when the first element
+ * has nextTime > now.
  */
-struct TimedWork {
-    LIST_ENTRY(TimedWork) pointers;
-    UA_DateTime nextTime;
-    UA_UInt32 interval; ///> in 100ns resolution, 0 means no repetition
-    size_t workSize;
-    UA_WorkItem *work;
-    UA_Guid workIds[];
+struct RepeatedJobs {
+    LIST_ENTRY(RepeatedJobs) pointers; ///> Links to the next list of repeated jobs (with a different) interval
+    UA_DateTime nextTime; ///> The next time when the jobs are to be executed
+    UA_UInt32 interval; ///> Interval in 100ns resolution
+    size_t jobsSize; ///> Number of jobs contained
+    struct IdentifiedJob jobs[]; ///> The jobs. This is not a pointer, instead the struct is variable sized.
+};
+
+/* throwaway struct for the mainloop callback */
+struct AddRepeatedJob {
+    struct IdentifiedJob job;
+    UA_UInt32 interval;
 };
 
-/* Traverse the list until there is a TimedWork to which the item can be added or we reached the
-   end. The item is copied into the TimedWork and not freed by this function. The interval is in
-   100ns resolution */
-static UA_StatusCode addTimedWork(UA_Server *server, const UA_WorkItem *item, UA_DateTime firstTime,
-                                  UA_UInt32 interval, UA_Guid *resultWorkGuid) {
-    struct TimedWork *matchingTw = UA_NULL; // add the item here
-    struct TimedWork *lastTw = UA_NULL; // if there is no matchingTw, add a new TimedWork after this entry
-    struct TimedWork *tempTw;
+/* internal. call only from the main loop. */
+static UA_StatusCode addRepeatedJob(UA_Server *server, struct AddRepeatedJob * restrict arw) {
+    struct RepeatedJobs *matchingTw = UA_NULL; // add the item here
+    struct RepeatedJobs *lastTw = UA_NULL; // if there is no repeated job, add a new one this entry
+    struct RepeatedJobs *tempTw;
 
     /* search for matching entry */
-    tempTw = LIST_FIRST(&server->timedWork);
-    if(interval == 0) {
-        /* single execution. the time needs to match */
-        while(tempTw) {
-            if(tempTw->nextTime >= firstTime) {
-                if(tempTw->nextTime == firstTime)
-                    matchingTw = tempTw;
-                break;
-            }
-            lastTw = tempTw;
-            tempTw = LIST_NEXT(lastTw, pointers);
-        }
-    } else {
-        /* repeated execution. the interval needs to match */
-        while(tempTw) {
-            if(interval == tempTw->interval) {
-                matchingTw = tempTw;
-                break;
-            }
-            if(tempTw->nextTime > firstTime)
-                break;
-            lastTw = tempTw;
-            tempTw = LIST_NEXT(lastTw, pointers);
+    UA_DateTime firstTime = UA_DateTime_now() + arw->interval;
+    tempTw = LIST_FIRST(&server->repeatedJobs);
+    while(tempTw) {
+        if(arw->interval == tempTw->interval) {
+            matchingTw = tempTw;
+            break;
         }
+        if(tempTw->nextTime > firstTime)
+            break;
+        lastTw = tempTw;
+        tempTw = LIST_NEXT(lastTw, pointers);
     }
     
     if(matchingTw) {
         /* append to matching entry */
-        matchingTw = UA_realloc(matchingTw, sizeof(struct TimedWork) + sizeof(UA_Guid)*(matchingTw->workSize + 1));
-        if(!matchingTw)
+        matchingTw = UA_realloc(matchingTw, sizeof(struct RepeatedJobs) +
+                                (sizeof(struct IdentifiedJob) * (matchingTw->jobsSize + 1)));
+        if(!matchingTw) {
+            UA_free(arw);
             return UA_STATUSCODE_BADOUTOFMEMORY;
+        }
+
+        /* point the realloced struct */
         if(matchingTw->pointers.le_next)
             matchingTw->pointers.le_next->pointers.le_prev = &matchingTw->pointers.le_next;
         if(matchingTw->pointers.le_prev)
             *matchingTw->pointers.le_prev = matchingTw;
-        UA_WorkItem *newItems = UA_realloc(matchingTw->work, sizeof(UA_WorkItem)*(matchingTw->workSize + 1));
-        if(!newItems)
-            return UA_STATUSCODE_BADOUTOFMEMORY;
-        matchingTw->work = newItems;
     } else {
         /* create a new entry */
-        matchingTw = UA_malloc(sizeof(struct TimedWork) + sizeof(UA_Guid));
-        if(!matchingTw)
-            return UA_STATUSCODE_BADOUTOFMEMORY;
-        matchingTw->work = UA_malloc(sizeof(UA_WorkItem));
-        if(!matchingTw->work) {
-            UA_free(matchingTw);
+        matchingTw = UA_malloc(sizeof(struct RepeatedJobs) + sizeof(struct IdentifiedJob));
+        if(!matchingTw) {
+            UA_free(arw);
             return UA_STATUSCODE_BADOUTOFMEMORY;
         }
-        matchingTw->workSize = 0;
+        matchingTw->jobsSize = 0;
         matchingTw->nextTime = firstTime;
-        matchingTw->interval = interval;
+        matchingTw->interval = arw->interval;
         if(lastTw)
             LIST_INSERT_AFTER(lastTw, matchingTw, pointers);
         else
-            LIST_INSERT_HEAD(&server->timedWork, matchingTw, pointers);
-    }
-    matchingTw->work[matchingTw->workSize] = *item;
-    matchingTw->workSize++;
-
-    /* create a guid for finding and deleting the timed work later on */
-    if(resultWorkGuid) {
-        matchingTw->workIds[matchingTw->workSize] = UA_Guid_random(&server->random_seed);
-        *resultWorkGuid = matchingTw->workIds[matchingTw->workSize];
+            LIST_INSERT_HEAD(&server->repeatedJobs, matchingTw, pointers);
     }
+    matchingTw->jobs[matchingTw->jobsSize] = arw->job;
+    matchingTw->jobsSize++;
+    UA_free(arw);
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_Server_addTimedWorkItem(UA_Server *server, const UA_WorkItem *work, UA_DateTime executionTime,
-                                         UA_Guid *resultWorkGuid) {
-    return addTimedWork(server, work, executionTime, 0, resultWorkGuid);
-}
+UA_StatusCode UA_Server_addRepeatedJob(UA_Server *server, UA_Job job, UA_UInt32 interval, UA_Guid *jobId) {
+    /* the interval needs to be at least 5ms */
+    if(interval < 5)
+        return UA_STATUSCODE_BADINTERNALERROR;
+    interval *= 10000; // from ms to 100ns resolution
 
-UA_StatusCode UA_Server_addRepeatedWorkItem(UA_Server *server, const UA_WorkItem *work, UA_UInt32 interval,
-                                            UA_Guid *resultWorkGuid) {
-    return addTimedWork(server, work, UA_DateTime_now() + interval * 10000, interval * 10000, resultWorkGuid);
+#ifdef UA_MULTITHREADING
+    struct AddRepeatedJob *arw = UA_malloc(sizeof(struct AddRepeatedJob));
+    if(!arw)
+        return UA_STATUSCODE_BADOUTOFMEMORY;
+
+    arw->interval = interval;
+    arw->job.job = job;
+    if(jobId) {
+        arw->job.id = UA_Guid_random(&server->random_seed);
+        *jobId = arw->job.id;
+    } else
+        UA_Guid_init(&arw->job.id);
+
+    struct MainLoopJob *mlw = UA_malloc(sizeof(struct MainLoopJob));
+    if(!mlw) {
+        UA_free(arw);
+        return UA_STATUSCODE_BADOUTOFMEMORY;
+    }
+    mlw->job = (UA_Job) {
+        .type = UA_JOBTYPE_METHODCALL,
+        .job.methodCall = {.data = arw, .method = (void (*)(UA_Server*, void*))addRepeatedJob}};
+    cds_lfs_push(&server->mainLoopJobs, &mlw->node);
+#else
+    struct AddRepeatedJob arw;
+    arw.interval = interval;
+    arw.job.job = job;
+    if(jobId) {
+        arw.job.id = UA_Guid_random(&server->random_seed);
+        *jobId = arw.job.id;
+    } else
+        UA_Guid_init(&arw.job.id);
+    addRepeatedJob(server, &arw);
+#endif
+    return UA_STATUSCODE_GOOD;
 }
 
-/** Dispatches timed work, returns the timeout until the next timed work in ms */
-static UA_UInt16 processTimedWork(UA_Server *server) {
+/* Returns the timeout until the next repeated job in ms */
+static UA_UInt16 processRepeatedJobs(UA_Server *server) {
     UA_DateTime current = UA_DateTime_now();
-    struct TimedWork *next = LIST_FIRST(&server->timedWork);
-    struct TimedWork *tw = UA_NULL;
+    struct RepeatedJobs *next = LIST_FIRST(&server->repeatedJobs);
+    struct RepeatedJobs *tw = UA_NULL;
 
     while(next) {
         tw = next;
@@ -250,58 +290,35 @@ static UA_UInt16 processTimedWork(UA_Server *server) {
         next = LIST_NEXT(tw, pointers);
 
 #ifdef UA_MULTITHREADING
-        if(tw->interval > 0) {
-            // copy the entry and insert at the new location
-            UA_WorkItem *workCopy = (UA_WorkItem *) UA_malloc(sizeof(UA_WorkItem) * tw->workSize);
-            UA_memcpy(workCopy, tw->work, sizeof(UA_WorkItem) * tw->workSize);
-            dispatchWork(server, tw->workSize, workCopy); // frees the work pointer
-            tw->nextTime += tw->interval;
-            struct TimedWork *prevTw = tw; // after which tw do we insert?
-            while(UA_TRUE) {
-                struct TimedWork *n = LIST_NEXT(prevTw, pointers);
-                if(!n || n->nextTime > tw->nextTime)
-                    break;
-                prevTw = n;
-            }
-            if(prevTw != tw) {
-                LIST_REMOVE(tw, pointers);
-                LIST_INSERT_AFTER(prevTw, tw, pointers);
-            }
-        } else {
-            dispatchWork(server, tw->workSize, tw->work); // frees the work pointer
-            LIST_REMOVE(tw, pointers);
-            UA_free(tw);
+        // copy the entry and insert at the new location
+        UA_Job *jobsCopy = UA_malloc(sizeof(UA_Job) * tw->jobsSize);
+        if(!jobsCopy) {
+            UA_LOG_ERROR(server->logger, UA_LOGCATEGORY_SERVER, "Not enough memory to dispatch delayed jobs");
+            break;
         }
+        for(size_t i=0;i<tw->jobsSize;i++)
+            jobsCopy[i] = tw->jobs[i].job;
+        dispatchJobs(server, jobsCopy, tw->jobsSize); // frees the job pointer
 #else
-        // 1) Process the work since it is past its due date
-        processWork(server, tw->work, tw->workSize); // does not free the work ptr
-
-        // 2) If the work is repeated, add it back into the list. Otherwise remove it.
-        if(tw->interval > 0) {
-            tw->nextTime += tw->interval;
-            if(tw->nextTime < current)
-                tw->nextTime = current;
-            struct TimedWork *prevTw = tw;
-            while(UA_TRUE) {
-                struct TimedWork *n = LIST_NEXT(prevTw, pointers);
-                if(!n || n->nextTime > tw->nextTime)
-                    break;
-                prevTw = n;
-            }
-            if(prevTw != tw) {
-                LIST_REMOVE(tw, pointers);
-                LIST_INSERT_AFTER(prevTw, tw, pointers);
-            }
-        } else {
+        for(size_t i=0;i<tw->jobsSize;i++)
+            processJobs(server, &tw->jobs[i].job, 1); // does not free the job ptr
+#endif
+        tw->nextTime += tw->interval;
+        struct RepeatedJobs *prevTw = tw; // after which tw do we insert?
+        while(UA_TRUE) {
+            struct RepeatedJobs *n = LIST_NEXT(prevTw, pointers);
+            if(!n || n->nextTime > tw->nextTime)
+                break;
+            prevTw = n;
+        }
+        if(prevTw != tw) {
             LIST_REMOVE(tw, pointers);
-            UA_free(tw->work);
-            UA_free(tw);
+            LIST_INSERT_AFTER(prevTw, tw, pointers);
         }
-#endif
     }
 
-    // check if the next timed work is sooner than the usual timeout
-    struct TimedWork *first = LIST_FIRST(&server->timedWork);
+    // check if the next repeated job is sooner than the usual timeout
+    struct RepeatedJobs *first = LIST_FIRST(&server->repeatedJobs);
     UA_UInt16 timeout = MAXTIMEOUT;
     if(first) {
         timeout = (first->nextTime - current)/10;
@@ -311,35 +328,73 @@ static UA_UInt16 processTimedWork(UA_Server *server) {
     return timeout;
 }
 
-void UA_Server_deleteTimedWork(UA_Server *server) {
-    struct TimedWork *current;
-    struct TimedWork *next = LIST_FIRST(&server->timedWork);
-    while(next) {
-        current = next;
-        next = LIST_NEXT(current, pointers);
+/* Call this function only from the main loop! */
+static void removeRepeatedJob(UA_Server *server, UA_Guid *jobId) {
+    struct RepeatedJobs *tw;
+    LIST_FOREACH(tw, &server->repeatedJobs, pointers) {
+        for(size_t i = 0; i < tw->jobsSize; i++) {
+            if(!UA_Guid_equal(jobId, &tw->jobs[i].id))
+                continue;
+            if(tw->jobsSize == 1) {
+                LIST_REMOVE(tw, pointers);
+                UA_free(tw);
+            } else {
+                tw->jobsSize--;
+                tw->jobs[i] = tw->jobs[tw->jobsSize]; // move the last entry to overwrite
+            }
+            goto finish; // ugly break
+        }
+    }
+ finish:
+#ifdef UA_MULTITHREADING
+    UA_free(jobId);
+#endif
+    return;
+}
+
+UA_StatusCode UA_Server_removeRepeatedJob(UA_Server *server, UA_Guid jobId) {
+#ifdef UA_MULTITHREADING
+    UA_Guid *idptr = UA_malloc(sizeof(UA_Guid));
+    if(!idptr)
+        return UA_STATUSCODE_BADOUTOFMEMORY;
+    *idptr = jobId;
+    // dispatch to the mainloopjobs stack
+    struct MainLoopJob *mlw = UA_malloc(sizeof(struct MainLoopJob));
+    mlw->job = (UA_Job) {
+        .type = UA_JOBTYPE_METHODCALL,
+        .job.methodCall = {.data = idptr, .method = (void (*)(UA_Server*, void*))removeRepeatedJob}};
+    cds_lfs_push(&server->mainLoopJobs, &mlw->node);
+#else
+    removeRepeatedJob(server, &jobId);
+#endif
+    return UA_STATUSCODE_GOOD;
+}
+
+void UA_Server_deleteAllRepeatedJobs(UA_Server *server) {
+    struct RepeatedJobs *current;
+    while((current = LIST_FIRST(&server->repeatedJobs))) {
         LIST_REMOVE(current, pointers);
-        UA_free(current->work);
         UA_free(current);
     }
 }
 
 /****************/
-/* Delayed Work */
+/* Delayed Jobs */
 /****************/
 
 #ifdef UA_MULTITHREADING
 
-#define DELAYEDWORKSIZE 100 // Collect delayed work until we have DELAYEDWORKSIZE items
+#define DELAYEDJOBSSIZE 100 // Collect delayed jobs until we have DELAYEDWORKSIZE items
 
-struct DelayedWork {
-    struct DelayedWork *next;
-    UA_UInt32 *workerCounters; // initially UA_NULL until a workitem gets the counters
-    UA_UInt32 workItemsCount; // the size of the array is DELAYEDWORKSIZE, the count may be less
-    UA_WorkItem *workItems; // when it runs full, a new delayedWork entry is created
+struct DelayedJobs {
+    struct DelayedJobs *next;
+    UA_UInt32 *workerCounters; // initially UA_NULL until the counter are set
+    UA_UInt32 jobsCount; // the size of the array is DELAYEDJOBSSIZE, the count may be less
+    UA_Job jobs[DELAYEDJOBSSIZE]; // when it runs full, a new delayedJobs entry is created
 };
 
-// Dispatched as a methodcall-WorkItem when the delayedwork is added
-static void getCounters(UA_Server *server, struct DelayedWork *delayed) {
+/* Dispatched as an ordinary job when the DelayedJobs list is full */
+static void getCounters(UA_Server *server, struct DelayedJobs *delayed) {
     UA_UInt32 *counters = UA_malloc(server->nThreads * sizeof(UA_UInt32));
     for(UA_UInt16 i = 0;i<server->nThreads;i++)
         counters[i] = *server->workerCounters[i];
@@ -349,84 +404,83 @@ static void getCounters(UA_Server *server, struct DelayedWork *delayed) {
 // Call from the main thread only. This is the only function that modifies
 // server->delayedWork. processDelayedWorkQueue modifies the "next" (after the
 // head).
-static void addDelayedWork(UA_Server *server, UA_WorkItem work) {
-    struct DelayedWork *dw = server->delayedWork;
-    if(!dw || dw->workItemsCount >= DELAYEDWORKSIZE) {
-        struct DelayedWork *newwork = UA_malloc(sizeof(struct DelayedWork));
-        newwork->workItems = UA_malloc(sizeof(UA_WorkItem)*DELAYEDWORKSIZE);
-        newwork->workItemsCount = 0;
-        newwork->workerCounters = UA_NULL;
-        newwork->next = server->delayedWork;
-
-        // dispatch a method that sets the counter
-        if(dw && dw->workItemsCount >= DELAYEDWORKSIZE) {
-            UA_WorkItem *setCounter = UA_malloc(sizeof(UA_WorkItem));
-            *setCounter = (UA_WorkItem)
-                {.type = UA_WORKITEMTYPE_METHODCALL,
-                 .work.methodCall = {.method = (void (*)(UA_Server*, void*))getCounters, .data = dw}};
-            dispatchWork(server, 1, setCounter);
+static void addDelayedJob(UA_Server *server, UA_Job *job) {
+    struct DelayedJobs *dj = server->delayedJobs;
+    if(!dj || dj->jobsCount >= DELAYEDJOBSSIZE) {
+        /* create a new DelayedJobs and add it to the linked list */
+        dj = UA_malloc(sizeof(struct DelayedJobs));
+        if(!dj) {
+            UA_LOG_ERROR(server->logger, UA_LOGCATEGORY_SERVER, "Not enough memory to add a delayed job");
+            return;
+        }
+        dj->jobsCount = 0;
+        dj->workerCounters = UA_NULL;
+        dj->next = server->delayedJobs;
+        server->delayedJobs = dj;
+
+        /* dispatch a method that sets the counter for the full list that comes afterwards */
+        if(dj->next) {
+            UA_Job *setCounter = UA_malloc(sizeof(UA_Job));
+            *setCounter = (UA_Job) {.type = UA_JOBTYPE_METHODCALL, .job.methodCall =
+                                    {.method = (void (*)(UA_Server*, void*))getCounters, .data = dj->next}};
+            dispatchJobs(server, setCounter, 1);
         }
-
-        server->delayedWork = newwork;
-        dw = newwork;
     }
-    dw->workItems[dw->workItemsCount] = work;
-    dw->workItemsCount++;
+    dj->jobs[dj->jobsCount] = *job;
+    dj->jobsCount++;
 }
 
-static void processDelayedWork(UA_Server *server) {
-    struct DelayedWork *dw = server->delayedWork;
-    while(dw) {
-        processWork(server, dw->workItems, dw->workItemsCount);
-        struct DelayedWork *next = dw->next;
-        UA_free(dw->workerCounters);
-        UA_free(dw->workItems);
-        UA_free(dw);
-        dw = next;
-    }
+static void addDelayedJobAsync(UA_Server *server, UA_Job *job) {
+    addDelayedJob(server, job);
+    UA_free(job);
 }
 
-// Execute this every N seconds (repeated work) to execute delayed work that is ready
-static void dispatchDelayedWork(UA_Server *server, void *data /* not used, but needed for the signature*/) {
-    struct DelayedWork *dw = UA_NULL;
-    struct DelayedWork *readydw = UA_NULL;
-    struct DelayedWork *beforedw = server->delayedWork;
+UA_StatusCode UA_Server_addDelayedJob(UA_Server *server, UA_Job job) {
+    UA_Job *j = UA_malloc(sizeof(UA_Job));
+    if(!j)
+        return UA_STATUSCODE_BADOUTOFMEMORY;
+    *j = job;
+    struct MainLoopJob *mlw = UA_malloc(sizeof(struct MainLoopJob));
+    mlw->job = (UA_Job) {.type = UA_JOBTYPE_METHODCALL, .job.methodCall =
+                         {.data = j, .method = (void (*)(UA_Server*, void*))addDelayedJobAsync}};
+    cds_lfs_push(&server->mainLoopJobs, &mlw->node);
+    return UA_STATUSCODE_GOOD;
+}
 
-    // start at the second...
-    if(beforedw)
-        dw = beforedw->next;
+/* Find out which delayed jobs can be executed now */
+static void dispatchDelayedJobs(UA_Server *server, void *data /* not used, but needed for the signature*/) {
+    /* start at the second */
+    struct DelayedJobs *dw = server->delayedJobs, *beforedw = dw;
+    if(dw)
+        dw = dw->next;
 
-    // find the first delayedwork where the counters are set and have been moved
+    /* find the first delayedwork where the counters have been set and have moved */
     while(dw) {
         if(!dw->workerCounters) {
             beforedw = dw;
             dw = dw->next;
             continue;
         }
-
-        UA_Boolean countersMoved = UA_TRUE;
+        UA_Boolean allMoved = UA_TRUE;
         for(UA_UInt16 i=0;i<server->nThreads;i++) {
-            if(*server->workerCounters[i] == dw->workerCounters[i])
-                countersMoved = UA_FALSE;
+            if(dw->workerCounters[i] == *server->workerCounters[i]) {
+                allMoved = UA_FALSE;
                 break;
+            }
         }
-        
-        if(countersMoved) {
-            readydw = uatomic_xchg(&beforedw->next, UA_NULL);
+        if(allMoved)
             break;
-        } else {
-            beforedw = dw;
-            dw = dw->next;
-        }
+        beforedw = dw;
+        dw = dw->next;
     }
 
-    // we have a ready entry. all afterwards are also ready
-    while(readydw) {
-        dispatchWork(server, readydw->workItemsCount, readydw->workItems);
-        beforedw = readydw;
-        readydw = readydw->next;
-        UA_free(beforedw->workerCounters);
-        UA_free(beforedw);
+    /* process and free all delayed jobs from here on */
+    while(dw) {
+        processJobs(server, dw->jobs, dw->jobsCount);
+        struct DelayedJobs *next = uatomic_xchg(&beforedw->next, UA_NULL);
+        UA_free(dw);
+        UA_free(dw->workerCounters);
+        dw = next;
     }
 }
 
@@ -436,9 +490,26 @@ static void dispatchDelayedWork(UA_Server *server, void *data /* not used, but n
 /* Main Server Loop */
 /********************/
 
-UA_StatusCode UA_Server_run_startup(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running){
 #ifdef UA_MULTITHREADING
-    // 1) Prepare the threads
+static void processMainLoopJobs(UA_Server *server) {
+    /* no synchronization required if we only use push and pop_all */
+    struct cds_lfs_head *head = __cds_lfs_pop_all(&server->mainLoopJobs);
+    if(!head)
+        return;
+    struct MainLoopJob *mlw = (struct MainLoopJob*)&head->node;
+    struct MainLoopJob *next;
+    do {
+        processJobs(server, &mlw->job, 1);
+        next = (struct MainLoopJob*)mlw->node.next;
+        UA_free(mlw);
+    } while((mlw = next));
+    //UA_free(head);
+}
+#endif
+
+UA_StatusCode UA_Server_run_startup(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running) {
+#ifdef UA_MULTITHREADING
+    /* Prepare the worker threads */
     server->running = running; // the threads need to access the variable
     server->nThreads = nThreads;
     pthread_cond_init(&server->dispatchQueue_condition, 0);
@@ -451,52 +522,59 @@ UA_StatusCode UA_Server_run_startup(UA_Server *server, UA_UInt16 nThreads, UA_Bo
         pthread_create(&server->thr[i], UA_NULL, (void* (*)(void*))workerLoop, startData);
     }
 
-    UA_WorkItem processDelayed = {.type = UA_WORKITEMTYPE_METHODCALL,
-            .work.methodCall = {.method = dispatchDelayedWork,
-                    .data = UA_NULL} };
-    UA_Server_addRepeatedWorkItem(server, &processDelayed, 10000000, UA_NULL);
+    /* try to execute the delayed callbacks every 10 sec */
+    UA_Job processDelayed = {.type = UA_JOBTYPE_METHODCALL,
+                             .job.methodCall = {.method = dispatchDelayedJobs, .data = UA_NULL} };
+    UA_Server_addRepeatedJob(server, processDelayed, 10000, UA_NULL);
 #endif
 
-    // 2) Start the networklayers
+    /* Start the networklayers */
     for(size_t i = 0; i <server->networkLayersSize; i++)
         server->networkLayers[i].start(server->networkLayers[i].nlHandle, &server->logger);
 
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_Server_run_getAndProcessWork(UA_Server *server, UA_Boolean *running){
-    // 3.1) Process timed work
-    UA_UInt16 timeout = processTimedWork(server);
+UA_StatusCode UA_Server_run_mainloop(UA_Server *server, UA_Boolean *running) {
+#ifdef UA_MULTITHREADING
+    /* Run Work in the main loop */
+    processMainLoopJobs(server);
+#endif
+    /* Process repeated work */
+    UA_UInt16 timeout = processRepeatedJobs(server);
 
-    // 3.2) Get work from the networklayer and dispatch it
+    /* Get work from the networklayer */
     for(size_t i = 0; i < server->networkLayersSize; i++) {
         UA_ServerNetworkLayer *nl = &server->networkLayers[i];
-        UA_WorkItem *work;
-        UA_Int32 workSize;
+        UA_Job *jobs;
+        UA_Int32 jobsSize;
         if(*running) {
             if(i == server->networkLayersSize-1)
-                workSize = nl->getWork(nl->nlHandle, &work, timeout);
+                jobsSize = nl->getJobs(nl->nlHandle, &jobs, timeout);
             else
-                workSize = nl->getWork(nl->nlHandle, &work, 0);
-        } else {
-            workSize = server->networkLayers[i].stop(nl->nlHandle, &work);
-        }
+                jobsSize = nl->getJobs(nl->nlHandle, &jobs, 0);
+        } else
+            jobsSize = server->networkLayers[i].stop(nl->nlHandle, &jobs);
 
 #ifdef UA_MULTITHREADING
-// Filter out delayed work
-for(UA_Int32 k=0;k<workSize;k++) {
-    if(work[k].type != UA_WORKITEMTYPE_DELAYEDMETHODCALL)
-        continue;
-    addDelayedWork(server, work[k]);
-    work[k].type = UA_WORKITEMTYPE_NOTHING;
-}
-dispatchWork(server, workSize, work);
-if(workSize > 0)
-    pthread_cond_broadcast(&server->dispatchQueue_condition);
+        /* Filter out delayed work */
+        for(UA_Int32 k=0;k<jobsSize;k++) {
+            if(jobs[k].type != UA_JOBTYPE_DELAYEDMETHODCALL)
+                continue;
+            addDelayedJob(server, &jobs[k]);
+            jobs[k].type = UA_JOBTYPE_NOTHING;
+        }
+
+        /* Dispatch work to the worker threads */
+        dispatchJobs(server, jobs, jobsSize);
+
+        /* Trigger sleeping worker threads */
+        if(jobsSize > 0)
+            pthread_cond_broadcast(&server->dispatchQueue_condition);
 #else
-processWork(server, work, workSize);
-if(workSize > 0)
-    UA_free(work);
+        processJobs(server, jobs, jobsSize);
+        if(jobsSize > 0)
+            UA_free(jobs);
 #endif
     }
     return UA_STATUSCODE_GOOD;
@@ -504,34 +582,35 @@ if(workSize > 0)
 
 UA_StatusCode UA_Server_run_shutdown(UA_Server *server, UA_UInt16 nThreads){
 #ifdef UA_MULTITHREADING
-    // 4) Clean up: Wait until all worker threads finish, then empty the
-    // dispatch queue, then process the remaining delayed work
+    /* Wait for all worker threads to finish */
     for(UA_UInt32 i=0;i<nThreads;i++) {
         pthread_join(server->thr[i], UA_NULL);
         UA_free(server->workerCounters[i]);
     }
     UA_free(server->workerCounters);
     UA_free(server->thr);
+
+    /* Manually finish the work still enqueued */
     emptyDispatchQueue(server);
-    processDelayedWork(server);
-#endif
 
+    /* Process the remaining delayed work */
+    struct DelayedJobs *dw = server->delayedJobs;
+    while(dw) {
+        processJobs(server, dw->jobs, dw->jobsCount);
+        struct DelayedJobs *next = dw->next;
+        UA_free(dw->workerCounters);
+        UA_free(dw);
+        dw = next;
+    }
+#endif
     return UA_STATUSCODE_GOOD;
 }
 
 UA_StatusCode UA_Server_run(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *running) {
     UA_Server_run_startup(server, nThreads, running);
-
-    // 3) The loop
-    while(1) {
-        UA_Server_run_getAndProcessWork(server, running);
-
-        // 3.3) Exit?
-        if(!*running)
-            break;
+    while(*running) {
+        UA_Server_run_mainloop(server, running);
     }
-
     UA_Server_run_shutdown(server, nThreads);
-
     return UA_STATUSCODE_GOOD;
 }

+ 16 - 2
src/server/ua_services_attribute.c

@@ -8,7 +8,11 @@
 static UA_StatusCode parse_numericrange(const UA_String str, UA_NumericRange *range) {
     if(str.length < 0 || str.length >= 1023)
         return UA_STATUSCODE_BADINTERNALERROR;
+#ifdef NO_ALLOCA
+    char cstring[str.length+1];
+#else
     char *cstring = UA_alloca(str.length+1);
+#endif
     UA_memcpy(cstring, str.data, str.length);
     cstring[str.length] = 0;
     UA_Int32 index = 0;
@@ -370,9 +374,14 @@ void Service_Read(UA_Server *server, UA_Session *session, const UA_ReadRequest *
     }
 
 #ifdef UA_EXTERNAL_NAMESPACES
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[size];
+    UA_UInt32 indices[size];
+#else
     UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
-    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     for(size_t j = 0;j<server->externalNamespacesSize;j++) {
         size_t indexSize = 0;
         for(size_t i = 0;i < size;i++) {
@@ -592,9 +601,14 @@ void Service_Write(UA_Server *server, UA_Session *session, const UA_WriteRequest
     }
 
 #ifdef UA_EXTERNAL_NAMESPACES
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[request->nodesToWriteSize];
+    UA_UInt32 indices[request->nodesToWriteSize];
+#else
     UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * request->nodesToWriteSize);
-    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean)*request->nodesToWriteSize);
     UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * request->nodesToWriteSize);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean)*request->nodesToWriteSize);
     for(size_t j = 0; j < server->externalNamespacesSize; j++) {
         UA_UInt32 indexSize = 0;
         for(UA_Int32 i = 0; i < request->nodesToWriteSize; i++) {

+ 5 - 1
src/server/ua_services_discovery.c

@@ -18,7 +18,11 @@ void Service_FindServers(UA_Server *server, const UA_FindServersRequest *request
 
 void Service_GetEndpoints(UA_Server *server, const UA_GetEndpointsRequest *request, UA_GetEndpointsResponse *response) {
     /* test if the supported binary profile shall be returned */
-    UA_Boolean *relevant_endpoints = UA_alloca(sizeof(UA_Boolean)*server->endpointDescriptionsSize);
+#ifdef NO_ALLOCA
+	UA_Boolean relevant_endpoints[server->endpointDescriptionsSize];
+#else
+	UA_Boolean *relevant_endpoints = UA_alloca(sizeof(UA_Boolean)*server->endpointDescriptionsSize);
+#endif /*NO_ALLOCA */
     size_t relevant_count = 0;
     for(UA_Int32 j = 0; j < server->endpointDescriptionsSize; j++) {
         relevant_endpoints[j] = UA_FALSE;

+ 14 - 4
src/server/ua_services_nodemanagement.c

@@ -251,9 +251,14 @@ void Service_AddNodes(UA_Server *server, UA_Session *session, const UA_AddNodesR
     }
 
 #ifdef UA_EXTERNAL_NAMESPACES
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[size];
+    UA_UInt32 indices[size];
+#else
     UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
-    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     for(size_t j = 0; j <server->externalNamespacesSize; j++) {
         size_t indexSize = 0;
         for(size_t i = 0;i < size;i++) {
@@ -297,9 +302,14 @@ void Service_AddReferences(UA_Server *server, UA_Session *session, const UA_AddR
 	UA_memset(response->results, UA_STATUSCODE_GOOD, sizeof(UA_StatusCode) * size);
 
 #ifdef UA_EXTERNAL_NAMESPACES
-	UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
-	UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
-	UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[size];
+    UA_UInt32 indices[size];
+#else
+    UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
+    UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
 	for(size_t j = 0; j < server->externalNamespacesSize; j++) {
 		size_t indicesSize = 0;
 		for(size_t i = 0;i < size;i++) {

+ 91 - 8
src/server/ua_services_view.c

@@ -43,7 +43,7 @@ static UA_StatusCode fillrefdescr(UA_NodeStore *ns, const UA_Node *curr, UA_Refe
 
 /* Tests if the node is relevant to the browse request and shall be returned. If
    so, it is retrieved from the Nodestore. If not, null is returned. */
-static const UA_Node *relevant_node(UA_NodeStore *ns, const UA_BrowseDescription *descr,
+static const UA_Node *relevant_node(UA_Server *server, UA_NodeStore *ns, const UA_BrowseDescription *descr,
                                     UA_Boolean return_all, UA_ReferenceNode *reference,
                                     UA_NodeId *relevant, size_t relevant_count)
 {
@@ -60,9 +60,82 @@ static const UA_Node *relevant_node(UA_NodeStore *ns, const UA_BrowseDescription
         return UA_NULL;
     }
 is_relevant: ;
+#ifdef UA_EXTERNAL_NAMESPACES
+	const UA_Node *node = NULL;
+	UA_Boolean isExternal = UA_FALSE;
+	size_t nsIndex;
+	for(nsIndex = 0; nsIndex < server->externalNamespacesSize; nsIndex++) {
+		if(reference->targetId.nodeId.namespaceIndex != server->externalNamespaces[nsIndex].index)
+			continue;
+		else{
+			isExternal = UA_TRUE;
+			break;
+		}
+	}
+	if(isExternal == UA_FALSE){
+		node = UA_NodeStore_get(ns, &reference->targetId.nodeId);
+	} else {
+		/*	prepare a read request in the external nodestore	*/
+		UA_ExternalNodeStore *ens = &server->externalNamespaces[nsIndex].externalNodeStore;
+		UA_ReadValueId *readValueIds = UA_Array_new(&UA_TYPES[UA_TYPES_READVALUEID], 6);
+		UA_UInt32 *indices = UA_Array_new(&UA_TYPES[UA_TYPES_UINT32], 6);
+		UA_UInt32 indicesSize = 6;
+		UA_DataValue *readNodesResults = UA_Array_new(&UA_TYPES[UA_TYPES_DATAVALUE], 6);
+		UA_DiagnosticInfo *diagnosticInfos = UA_Array_new(&UA_TYPES[UA_TYPES_DIAGNOSTICINFO], 6);
+		for(UA_UInt32 i = 0; i<6; i++){
+			readValueIds[i].nodeId = reference->targetId.nodeId;
+			UA_String_init(&(readValueIds[i].indexRange));
+		    UA_QualifiedName_init(&(readValueIds[i].dataEncoding));
+		    indices[i] = i;
+		    UA_DataValue_init(&(readNodesResults[i]));
+		    UA_DiagnosticInfo_init(&(diagnosticInfos[i]));
+		}
+		readValueIds[0].attributeId = UA_ATTRIBUTEID_NODECLASS;
+		readValueIds[1].attributeId = UA_ATTRIBUTEID_BROWSENAME;
+		readValueIds[2].attributeId = UA_ATTRIBUTEID_DISPLAYNAME;
+		readValueIds[3].attributeId = UA_ATTRIBUTEID_DESCRIPTION;
+		readValueIds[4].attributeId = UA_ATTRIBUTEID_WRITEMASK;
+		readValueIds[5].attributeId = UA_ATTRIBUTEID_USERWRITEMASK;
+
+		ens->readNodes(ens->ensHandle, NULL, readValueIds,
+				indices, indicesSize, readNodesResults, UA_FALSE, diagnosticInfos);
+		/*	create and fill a dummy nodeStructure	*/
+		UA_Node *tempNode = (UA_Node*) UA_ObjectNode_new();
+		UA_NodeId_copy(&(reference->targetId.nodeId), &(tempNode->nodeId));
+		if(readNodesResults[0].status == UA_STATUSCODE_GOOD){
+			UA_NodeClass_copy((UA_NodeClass*)readNodesResults[0].value.data, &(tempNode->nodeClass));
+		}
+		if(readNodesResults[1].status == UA_STATUSCODE_GOOD){
+			UA_QualifiedName_copy((UA_QualifiedName*)readNodesResults[1].value.data, &(tempNode->browseName));
+		}
+		if(readNodesResults[2].status == UA_STATUSCODE_GOOD){
+			UA_LocalizedText_copy((UA_LocalizedText*)readNodesResults[2].value.data, &(tempNode->displayName));
+		}
+		if(readNodesResults[3].status == UA_STATUSCODE_GOOD){
+			UA_LocalizedText_copy((UA_LocalizedText*)readNodesResults[3].value.data, &(tempNode->description));
+		}
+		if(readNodesResults[4].status == UA_STATUSCODE_GOOD){
+			UA_UInt32_copy((UA_UInt32*)readNodesResults[4].value.data, &(tempNode->writeMask));
+		}
+		if(readNodesResults[5].status == UA_STATUSCODE_GOOD){
+			UA_UInt32_copy((UA_UInt32*)readNodesResults[5].value.data, &(tempNode->userWriteMask));
+		}
+		UA_Array_delete(readValueIds, &UA_TYPES[UA_TYPES_READVALUEID], 6);
+		UA_Array_delete(indices, &UA_TYPES[UA_TYPES_UINT32], 6);
+		UA_Array_delete(readNodesResults, &UA_TYPES[UA_TYPES_DATAVALUE], 6);
+		UA_Array_delete(diagnosticInfos, &UA_TYPES[UA_TYPES_DIAGNOSTICINFO], 6);
+		node = tempNode;
+	}
+#else
     const UA_Node *node = UA_NodeStore_get(ns, &reference->targetId.nodeId);
+#endif
     if(node && descr->nodeClassMask != 0 && (node->nodeClass & descr->nodeClassMask) == 0) {
-        UA_NodeStore_release(node);
+#ifdef UA_EXTERNAL_NAMESPACES
+    	if(isExternal == UA_TRUE){
+    		;
+    	} else
+#endif
+    	UA_NodeStore_release(node);
         return UA_NULL;
     }
     return node;
@@ -154,7 +227,7 @@ static void removeCp(struct ContinuationPointEntry *cp, UA_Session* session){
  * @param maxrefs The maximum number of references the client has requested
  * @param result The entry in the request
  */
-static void browse(UA_Session *session, UA_NodeStore *ns, struct ContinuationPointEntry *cp,
+static void browse(UA_Server *server, UA_Session *session, UA_NodeStore *ns, struct ContinuationPointEntry *cp,
                    const UA_BrowseDescription *descr, UA_UInt32 maxrefs, UA_BrowseResult *result) {
     UA_UInt32 continuationIndex = 0;
     size_t referencesCount = 0;
@@ -231,7 +304,7 @@ static void browse(UA_Session *session, UA_NodeStore *ns, struct ContinuationPoi
     /* loop over the node's references */
     size_t skipped = 0;
     for(; referencesIndex < node->referencesSize && referencesCount < real_maxrefs; referencesIndex++) {
-        const UA_Node *current = relevant_node(ns, descr, all_refs, &node->references[referencesIndex],
+        const UA_Node *current = relevant_node(server, ns, descr, all_refs, &node->references[referencesIndex],
                                                relevant_refs, relevant_refs_size);
         if(!current)
             continue;
@@ -319,9 +392,14 @@ void Service_Browse(UA_Server *server, UA_Session *session, const UA_BrowseReque
     response->resultsSize = size;
     
 #ifdef UA_EXTERNAL_NAMESPACES
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[size];
+    UA_UInt32 indices[size];
+#else
     UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
-    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     for(size_t j = 0; j < server->externalNamespacesSize; j++) {
         size_t indexSize = 0;
         for(size_t i = 0; i < size; i++) {
@@ -343,7 +421,7 @@ void Service_Browse(UA_Server *server, UA_Session *session, const UA_BrowseReque
 #ifdef UA_EXTERNAL_NAMESPACES
         if(!isExternal[i])
 #endif
-            browse(session, server->nodestore, UA_NULL, &request->nodesToBrowse[i],
+            browse(server, session, server->nodestore, UA_NULL, &request->nodesToBrowse[i],
                    request->requestedMaxReferencesPerNode, &response->results[i]);
     }
 }
@@ -367,7 +445,7 @@ void Service_BrowseNext(UA_Server *server, UA_Session *session, const UA_BrowseN
            struct ContinuationPointEntry *cp;
            LIST_FOREACH(cp, &session->continuationPoints, pointers) {
                if(UA_ByteString_equal(&cp->identifier, &request->continuationPoints[i])) {
-                   browse(session, server->nodestore, cp, UA_NULL, 0, &response->results[i]);
+                   browse(server, session, server->nodestore, cp, UA_NULL, 0, &response->results[i]);
                    break;
                }
            }
@@ -530,9 +608,14 @@ void Service_TranslateBrowsePathsToNodeIds(UA_Server *server, UA_Session *sessio
     }
 
 #ifdef UA_EXTERNAL_NAMESPACES
+#ifdef NO_ALLOCA
+    UA_Boolean isExternal[size];
+    UA_UInt32 indices[size];
+#else
     UA_Boolean *isExternal = UA_alloca(sizeof(UA_Boolean) * size);
-    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     UA_UInt32 *indices = UA_alloca(sizeof(UA_UInt32) * size);
+#endif /*NO_ALLOCA */
+    UA_memset(isExternal, UA_FALSE, sizeof(UA_Boolean) * size);
     for(size_t j = 0; j < server->externalNamespacesSize; j++) {
     	size_t indexSize = 0;
     	for(size_t i = 0;i < size;i++) {

+ 37 - 37
src/ua_types_encoding_binary.c

@@ -24,7 +24,7 @@ static size_t UA_Array_calcSizeBinary(const void *p, UA_Int32 noElements, const
 }
 
 static UA_StatusCode UA_Array_encodeBinary(const void *src, UA_Int32 noElements, const UA_DataType *dataType,
-                                           UA_ByteString *dst, size_t *restrict offset) {
+                                           UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if(noElements <= -1)
         noElements = -1;
     UA_Int32_encodeBinary(&noElements, dst, offset);
@@ -37,7 +37,7 @@ static UA_StatusCode UA_Array_encodeBinary(const void *src, UA_Int32 noElements,
     return retval;
 }
 
-static UA_StatusCode UA_Array_decodeBinary(const UA_ByteString *src, size_t *restrict offset,
+static UA_StatusCode UA_Array_decodeBinary(const UA_ByteString *src, size_t *UA_RESTRICT offset,
                                            UA_Int32 noElements, void **dst, const UA_DataType *dataType) {
     if(noElements <= 0) {
         *dst = UA_NULL;
@@ -80,16 +80,16 @@ static UA_StatusCode UA_Array_decodeBinary(const UA_ByteString *src, size_t *res
     size_t TYPE##_calcSizeBinary(TYPE const *p) {                       \
         return TYPE_AS##_calcSizeBinary((const TYPE_AS *)p);            \
     }                                                                   \
-    UA_StatusCode TYPE##_encodeBinary(TYPE const *src, UA_ByteString *dst, size_t *restrict offset) { \
+    UA_StatusCode TYPE##_encodeBinary(TYPE const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) { \
         return TYPE_AS##_encodeBinary((const TYPE_AS *)src, dst, offset); \
     }                                                                   \
-    UA_StatusCode TYPE##_decodeBinary(UA_ByteString const *src, size_t *restrict offset, TYPE *dst) { \
+    UA_StatusCode TYPE##_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, TYPE *dst) { \
         return TYPE_AS##_decodeBinary(src, offset, (TYPE_AS *)dst);     \
     }
 
 /* Boolean */
 UA_TYPE_CALCSIZEBINARY_MEMSIZE(UA_Boolean)
-UA_StatusCode UA_Boolean_encodeBinary(const UA_Boolean *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_Boolean_encodeBinary(const UA_Boolean *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + sizeof(UA_Boolean)) > dst->length)
         return UA_STATUSCODE_BADENCODINGERROR;
     dst->data[*offset] = (UA_Byte)*src;
@@ -97,7 +97,7 @@ UA_StatusCode UA_Boolean_encodeBinary(const UA_Boolean *src, UA_ByteString *dst,
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_Boolean_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_Boolean *dst) {
+UA_StatusCode UA_Boolean_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_Boolean *dst) {
     if((UA_Int32)(*offset + sizeof(UA_Boolean)) > src->length )
         return UA_STATUSCODE_BADDECODINGERROR;
     *dst = (src->data[*offset] > 0) ? UA_TRUE : UA_FALSE;
@@ -110,7 +110,7 @@ UA_TYPE_BINARY_ENCODING_AS(UA_SByte, UA_Byte)
 
 /* Byte */
 UA_TYPE_CALCSIZEBINARY_MEMSIZE(UA_Byte)
-UA_StatusCode UA_Byte_encodeBinary(const UA_Byte *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_Byte_encodeBinary(const UA_Byte *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + sizeof(UA_Byte)) > dst->length)
         return UA_STATUSCODE_BADENCODINGERROR;
     dst->data[*offset] = (UA_Byte)*src;
@@ -118,7 +118,7 @@ UA_StatusCode UA_Byte_encodeBinary(const UA_Byte *src, UA_ByteString *dst, size_
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_Byte_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_Byte *dst) {
+UA_StatusCode UA_Byte_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_Byte *dst) {
     if((UA_Int32)(*offset + sizeof(UA_Byte)) > src->length )
         return UA_STATUSCODE_BADDECODINGERROR;
     *dst = src->data[*offset];
@@ -131,7 +131,7 @@ UA_TYPE_BINARY_ENCODING_AS(UA_Int16, UA_UInt16)
 
 /* UInt16 */
 UA_TYPE_CALCSIZEBINARY_MEMSIZE(UA_UInt16)
-UA_StatusCode UA_UInt16_encodeBinary(UA_UInt16 const *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_UInt16_encodeBinary(UA_UInt16 const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + sizeof(UA_UInt16)) > dst->length )
         return UA_STATUSCODE_BADENCODINGERROR;
     UA_UInt16 *dst_ptr = (UA_UInt16*)&dst->data[*offset];
@@ -144,7 +144,7 @@ UA_StatusCode UA_UInt16_encodeBinary(UA_UInt16 const *src, UA_ByteString *dst, s
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_UInt16_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_UInt16 *dst) {
+UA_StatusCode UA_UInt16_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_UInt16 *dst) {
     if((UA_Int32)(*offset + sizeof(UA_UInt16)) > src->length)
         return UA_STATUSCODE_BADDECODINGERROR;
     UA_UInt16 value = *((UA_UInt16*)&src->data[*offset]);
@@ -161,7 +161,7 @@ UA_TYPE_BINARY_ENCODING_AS(UA_Int32, UA_UInt32)
 
 /* UInt32 */
 UA_TYPE_CALCSIZEBINARY_MEMSIZE(UA_UInt32)
-UA_StatusCode UA_UInt32_encodeBinary(UA_UInt32 const *src, UA_ByteString * dst, size_t *restrict offset) {
+UA_StatusCode UA_UInt32_encodeBinary(UA_UInt32 const *src, UA_ByteString * dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + sizeof(UA_UInt32)) > dst->length )
         return UA_STATUSCODE_BADENCODINGERROR;
     UA_UInt32 *dst_ptr = (UA_UInt32*)&dst->data[*offset];
@@ -174,7 +174,7 @@ UA_StatusCode UA_UInt32_encodeBinary(UA_UInt32 const *src, UA_ByteString * dst,
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_UInt32_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_UInt32 * dst) {
+UA_StatusCode UA_UInt32_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_UInt32 * dst) {
     if((UA_Int32)(*offset + sizeof(UA_UInt32)) > src->length)
         return UA_STATUSCODE_BADDECODINGERROR;
     UA_UInt32 value = *((UA_UInt32*)&src->data[*offset]);
@@ -191,7 +191,7 @@ UA_TYPE_BINARY_ENCODING_AS(UA_Int64, UA_UInt64)
 
 /* UInt64 */
 UA_TYPE_CALCSIZEBINARY_MEMSIZE(UA_UInt64)
-UA_StatusCode UA_UInt64_encodeBinary(UA_UInt64 const *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_UInt64_encodeBinary(UA_UInt64 const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + sizeof(UA_UInt64)) > dst->length )
         return UA_STATUSCODE_BADENCODINGERROR;
     UA_UInt64 *dst_ptr = (UA_UInt64*)&dst->data[*offset];
@@ -204,7 +204,7 @@ UA_StatusCode UA_UInt64_encodeBinary(UA_UInt64 const *src, UA_ByteString *dst, s
     return UA_STATUSCODE_GOOD;
 }
 
-UA_StatusCode UA_UInt64_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_UInt64 * dst) {
+UA_StatusCode UA_UInt64_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_UInt64 * dst) {
     if((UA_Int32)(*offset + sizeof(UA_UInt64)) > src->length)
         return UA_STATUSCODE_BADDECODINGERROR;
     UA_UInt64 value = *((UA_UInt64*)&src->data[*offset]);
@@ -271,7 +271,7 @@ size_t UA_String_calcSizeBinary(UA_String const *string) {
         return sizeof(UA_Int32);
 }
 
-UA_StatusCode UA_String_encodeBinary(UA_String const *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_String_encodeBinary(UA_String const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     if((UA_Int32)(*offset + UA_String_calcSizeBinary(src)) > dst->length)
         return UA_STATUSCODE_BADENCODINGERROR;
 
@@ -283,7 +283,7 @@ UA_StatusCode UA_String_encodeBinary(UA_String const *src, UA_ByteString *dst, s
     return retval;
 }
 
-UA_StatusCode UA_String_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_String *dst) {
+UA_StatusCode UA_String_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_String *dst) {
     UA_String_init(dst);
     UA_Int32 length;
     if(UA_Int32_decodeBinary(src, offset, &length))
@@ -316,7 +316,7 @@ size_t UA_Guid_calcSizeBinary(UA_Guid const *p) {
     return 16;
 }
 
-UA_StatusCode UA_Guid_encodeBinary(UA_Guid const *src, UA_ByteString * dst, size_t *restrict offset) {
+UA_StatusCode UA_Guid_encodeBinary(UA_Guid const *src, UA_ByteString * dst, size_t *UA_RESTRICT offset) {
     UA_StatusCode retval = UA_UInt32_encodeBinary(&src->data1, dst, offset);
     retval |= UA_UInt16_encodeBinary(&src->data2, dst, offset);
     retval |= UA_UInt16_encodeBinary(&src->data3, dst, offset);
@@ -325,7 +325,7 @@ UA_StatusCode UA_Guid_encodeBinary(UA_Guid const *src, UA_ByteString * dst, size
     return retval;
 }
 
-UA_StatusCode UA_Guid_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_Guid * dst) {
+UA_StatusCode UA_Guid_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_Guid * dst) {
     // This could be done with a single memcpy (if the compiler does no fancy realigning of structs)
     UA_StatusCode retval = UA_UInt32_decodeBinary(src, offset, &dst->data1);
     retval |= UA_UInt16_decodeBinary(src, offset, &dst->data2);
@@ -377,7 +377,7 @@ size_t UA_NodeId_calcSizeBinary(UA_NodeId const *p) {
 }
 
 static UA_StatusCode UA_NodeId_encodeBinary_nodeid_offset(UA_NodeId const *src, UA_ByteString * dst,
-                                                          size_t *restrict offset, UA_Int32 nodeid_offset) {
+                                                          size_t *UA_RESTRICT offset, UA_Int32 nodeid_offset) {
     UA_StatusCode retval = UA_STATUSCODE_GOOD;
     // temporary variables for endian-save code
     UA_Byte srcByte;
@@ -429,11 +429,11 @@ static UA_StatusCode UA_NodeId_encodeBinary_nodeid_offset(UA_NodeId const *src,
     return retval;
 }
 
-UA_StatusCode UA_NodeId_encodeBinary(UA_NodeId const *src, UA_ByteString * dst, size_t *restrict offset) {
+UA_StatusCode UA_NodeId_encodeBinary(UA_NodeId const *src, UA_ByteString * dst, size_t *UA_RESTRICT offset) {
 	return UA_NodeId_encodeBinary_nodeid_offset(src, dst, offset, 0);
 }
 
-UA_StatusCode UA_NodeId_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_NodeId *dst) {
+UA_StatusCode UA_NodeId_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_NodeId *dst) {
     // temporary variables to overcome decoder's non-endian-saveness for datatypes with different length
     UA_Byte   dstByte = 0;
     UA_UInt16 dstUInt16 = 0;
@@ -503,7 +503,7 @@ size_t UA_ExpandedNodeId_calcSizeBinary(UA_ExpandedNodeId const *p) {
 #define UA_EXPANDEDNODEID_SERVERINDEX_FLAG 0x40
 
 UA_StatusCode UA_ExpandedNodeId_encodeBinary(UA_ExpandedNodeId const *src, UA_ByteString *dst,
-                                             size_t *restrict offset) {
+                                             size_t *UA_RESTRICT offset) {
     UA_Byte flags = 0;
     UA_UInt32 start = *offset;
     UA_StatusCode retval = UA_NodeId_encodeBinary(&src->nodeId, dst, offset);
@@ -521,7 +521,7 @@ UA_StatusCode UA_ExpandedNodeId_encodeBinary(UA_ExpandedNodeId const *src, UA_By
     return retval;
 }
 
-UA_StatusCode UA_ExpandedNodeId_decodeBinary(UA_ByteString const *src, size_t *restrict offset,
+UA_StatusCode UA_ExpandedNodeId_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset,
                                              UA_ExpandedNodeId *dst) {
     UA_ExpandedNodeId_init(dst);
     // get encodingflags and leave a "clean" nodeidtype
@@ -553,7 +553,7 @@ size_t UA_QualifiedName_calcSizeBinary(UA_QualifiedName const *p) {
     return length;
 }
 
-UA_StatusCode UA_QualifiedName_decodeBinary(UA_ByteString const *src, size_t *restrict offset,
+UA_StatusCode UA_QualifiedName_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset,
                                             UA_QualifiedName *dst) {
     UA_QualifiedName_init(dst);
     UA_StatusCode retval = UA_UInt16_decodeBinary(src, offset, &dst->namespaceIndex);
@@ -564,7 +564,7 @@ UA_StatusCode UA_QualifiedName_decodeBinary(UA_ByteString const *src, size_t *re
 }
 
 UA_StatusCode UA_QualifiedName_encodeBinary(UA_QualifiedName const *src, UA_ByteString* dst,
-                                            size_t *restrict offset) {
+                                            size_t *UA_RESTRICT offset) {
     UA_StatusCode retval = UA_UInt16_encodeBinary(&src->namespaceIndex, dst, offset);
     retval |= UA_String_encodeBinary(&src->name, dst, offset);
     return retval;
@@ -584,7 +584,7 @@ size_t UA_LocalizedText_calcSizeBinary(UA_LocalizedText const *p) {
 }
 
 UA_StatusCode UA_LocalizedText_encodeBinary(UA_LocalizedText const *src, UA_ByteString * dst,
-                                            size_t *restrict offset) {
+                                            size_t *UA_RESTRICT offset) {
     UA_Byte encodingMask = 0;
     if(src->locale.data != UA_NULL)
         encodingMask |= UA_LOCALIZEDTEXT_ENCODINGMASKTYPE_LOCALE;
@@ -598,7 +598,7 @@ UA_StatusCode UA_LocalizedText_encodeBinary(UA_LocalizedText const *src, UA_Byte
     return retval;
 }
 
-UA_StatusCode UA_LocalizedText_decodeBinary(UA_ByteString const *src, size_t *restrict offset,
+UA_StatusCode UA_LocalizedText_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset,
                                             UA_LocalizedText *dst) {
     UA_LocalizedText_init(dst);
     UA_Byte encodingMask = 0;
@@ -632,7 +632,7 @@ size_t UA_ExtensionObject_calcSizeBinary(UA_ExtensionObject const *p) {
 }
 
 UA_StatusCode UA_ExtensionObject_encodeBinary(UA_ExtensionObject const *src, UA_ByteString * dst,
-                                              size_t *restrict offset) {
+                                              size_t *UA_RESTRICT offset) {
     UA_StatusCode retval = UA_NodeId_encodeBinary(&src->typeId, dst, offset);
     retval |= UA_Byte_encodeBinary((const UA_Byte*)&src->encoding, dst, offset);
     switch(src->encoding) {
@@ -650,7 +650,7 @@ UA_StatusCode UA_ExtensionObject_encodeBinary(UA_ExtensionObject const *src, UA_
     return retval;
 }
 
-UA_StatusCode UA_ExtensionObject_decodeBinary(UA_ByteString const *src, size_t *restrict offset,
+UA_StatusCode UA_ExtensionObject_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset,
                                               UA_ExtensionObject *dst) {
     UA_ExtensionObject_init(dst);
     UA_Byte encoding = 0;
@@ -696,7 +696,7 @@ size_t UA_DataValue_calcSizeBinary(UA_DataValue const *p) {
     return length;
 }
 
-UA_StatusCode UA_DataValue_encodeBinary(UA_DataValue const *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_DataValue_encodeBinary(UA_DataValue const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     UA_StatusCode retval = UA_Byte_encodeBinary((const UA_Byte*)src, dst, offset);
     if(src->hasValue)
         retval |= UA_Variant_encodeBinary(&src->value, dst, offset);
@@ -714,7 +714,7 @@ UA_StatusCode UA_DataValue_encodeBinary(UA_DataValue const *src, UA_ByteString *
 }
 
 #define MAX_PICO_SECONDS 1000
-UA_StatusCode UA_DataValue_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_DataValue *dst) {
+UA_StatusCode UA_DataValue_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_DataValue *dst) {
     UA_DataValue_init(dst);
     UA_StatusCode retval = UA_Byte_decodeBinary(src, offset, (UA_Byte*)dst);
     if(retval != UA_STATUSCODE_GOOD)
@@ -781,7 +781,7 @@ size_t UA_Variant_calcSizeBinary(UA_Variant const *p) {
     return length;
 }
 
-UA_StatusCode UA_Variant_encodeBinary(UA_Variant const *src, UA_ByteString *dst, size_t *restrict offset) {
+UA_StatusCode UA_Variant_encodeBinary(UA_Variant const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset) {
     UA_Byte encodingByte = 0;
     UA_Boolean isArray = src->arrayLength != -1 || !src->data;  // a single element is not an array
     UA_Boolean hasDimensions = isArray && src->arrayDimensions != UA_NULL;
@@ -830,7 +830,7 @@ UA_StatusCode UA_Variant_encodeBinary(UA_Variant const *src, UA_ByteString *dst,
 
 /* The resulting variant always has the storagetype UA_VARIANT_DATA. Currently,
    we only support ns0 types (todo: attach typedescriptions to datatypenodes) */
-UA_StatusCode UA_Variant_decodeBinary(UA_ByteString const *src, size_t *restrict offset, UA_Variant *dst) {
+UA_StatusCode UA_Variant_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, UA_Variant *dst) {
     UA_Variant_init(dst);
     UA_Byte encodingByte;
     UA_StatusCode retval = UA_Byte_decodeBinary(src, offset, &encodingByte);
@@ -924,7 +924,7 @@ size_t UA_DiagnosticInfo_calcSizeBinary(UA_DiagnosticInfo const *ptr) {
 }
 
 UA_StatusCode UA_DiagnosticInfo_encodeBinary(const UA_DiagnosticInfo *src, UA_ByteString * dst,
-                                             size_t *restrict offset) {
+                                             size_t *UA_RESTRICT offset) {
     UA_StatusCode retval = UA_Byte_encodeBinary((const UA_Byte *)src, dst, offset);
     if(src->hasSymbolicId)
         retval |= UA_Int32_encodeBinary(&src->symbolicId, dst, offset);
@@ -943,7 +943,7 @@ UA_StatusCode UA_DiagnosticInfo_encodeBinary(const UA_DiagnosticInfo *src, UA_By
     return retval;
 }
 
-UA_StatusCode UA_DiagnosticInfo_decodeBinary(UA_ByteString const *src, size_t *restrict offset,
+UA_StatusCode UA_DiagnosticInfo_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset,
                                              UA_DiagnosticInfo *dst) {
     UA_DiagnosticInfo_init(dst);
     UA_StatusCode retval = UA_Byte_decodeBinary(src, offset, (UA_Byte*)dst);
@@ -1066,7 +1066,7 @@ size_t UA_calcSizeBinary(const void *p, const UA_DataType *dataType) {
 }
 
 UA_StatusCode UA_encodeBinary(const void *src, const UA_DataType *dataType, UA_ByteString *dst,
-                              size_t *restrict offset) {
+                              size_t *UA_RESTRICT offset) {
     uintptr_t ptr = (uintptr_t)src;
     UA_StatusCode retval = UA_STATUSCODE_GOOD;
     UA_Byte membersSize = dataType->membersSize;
@@ -1149,7 +1149,7 @@ UA_StatusCode UA_encodeBinary(const void *src, const UA_DataType *dataType, UA_B
     return retval;
 }
 
-UA_StatusCode UA_decodeBinary(const UA_ByteString *src, size_t *restrict offset, void *dst,
+UA_StatusCode UA_decodeBinary(const UA_ByteString *src, size_t *UA_RESTRICT offset, void *dst,
                               const UA_DataType *dataType) {
     UA_init(dst, dataType);
     uintptr_t ptr = (uintptr_t)dst;

+ 5 - 4
src/ua_types_encoding_binary.h

@@ -2,6 +2,7 @@
 #define UA_TYPES_ENCODING_BINARY_H_
 
 #include "ua_types.h"
+#include "ua_util.h"
 
 /**
  * @ingroup types
@@ -32,8 +33,8 @@
 
 #define UA_TYPE_BINARY_ENCODING(TYPE)                                   \
     size_t TYPE##_calcSizeBinary(TYPE const *p);                        \
-    UA_StatusCode TYPE##_encodeBinary(TYPE const *src, UA_ByteString *dst, size_t *restrict offset); \
-    UA_StatusCode TYPE##_decodeBinary(UA_ByteString const *src, size_t *restrict offset, TYPE *dst);
+    UA_StatusCode TYPE##_encodeBinary(TYPE const *src, UA_ByteString *dst, size_t *UA_RESTRICT offset); \
+    UA_StatusCode TYPE##_decodeBinary(UA_ByteString const *src, size_t *UA_RESTRICT offset, TYPE *dst);
 
 UA_TYPE_BINARY_ENCODING(UA_Boolean)
 UA_TYPE_BINARY_ENCODING(UA_SByte)
@@ -63,8 +64,8 @@ UA_TYPE_BINARY_ENCODING(UA_DiagnosticInfo)
 
 size_t UA_calcSizeBinary(const void *p, const UA_DataType *dataType);
 UA_StatusCode UA_encodeBinary(const void *src, const UA_DataType *dataType, UA_ByteString *dst,
-                              size_t *restrict offset);
-UA_StatusCode UA_decodeBinary(const UA_ByteString *src, size_t *restrict offset, void *dst,
+                              size_t *UA_RESTRICT offset);
+UA_StatusCode UA_decodeBinary(const UA_ByteString *src, size_t * UA_RESTRICT offset, void *dst,
                               const UA_DataType *dataType);
 /// @} /* end of group */
 

+ 11 - 1
src/ua_util.h

@@ -33,6 +33,13 @@
 # include <malloc.h>
 #endif
 
+/* Visual Studio needs __restrict */
+#ifdef _MSC_VER
+    #define UA_RESTRICT __restrict
+#else
+    #define UA_RESTRICT restrict
+#endif
+
 #define UA_NULL ((void *)0)
 
 // subtract from nodeids to get from the encoding to the content
@@ -49,6 +56,8 @@
 #define UA_memcpy(dst, src, size) memcpy(dst, src, size)
 #define UA_memset(ptr, value, size) memset(ptr, value, size)
 
+#ifdef NO_ALLOCA
+#else
 #ifdef _WIN32
     # define UA_alloca(SIZE) _alloca(SIZE)
 #else
@@ -59,7 +68,7 @@
     # define UA_alloca(SIZE) alloca(SIZE)
  #endif
 #endif
-
+#endif /* NO_ALLOCA */
 /********************/
 /* System Libraries */
 /********************/
@@ -109,6 +118,7 @@
 # include <urcu/compiler.h> // for caa_container_of
 # include <urcu/uatomic.h>
 # include <urcu/rculfhash.h>
+#include <urcu/lfstack.h>
 #endif
 
 #endif /* UA_UTIL_H_ */