Quellcode durchsuchen

concurrent namespace (enabled for multithreading)

Julius Pfrommer vor 10 Jahren
Ursprung
Commit
083ef8ad36

+ 4 - 3
Makefile.am

@@ -19,7 +19,7 @@ AM_LDFLAGS += --coverage
 endif
 
 if MULTITHREADING
-AM_LDADD += -lpthread
+AM_LDADD += -lpthread -lurcu-cds -lurcu
 endif
 
 UA_ENCODING_AMOUNT = 1
@@ -33,12 +33,13 @@ export GLOBAL_AM_CFLAGS = $(AM_CFLAGS)
 export GLOBAL_AM_LDADD = $(AM_LDADD)
 
 SUBDIRS=src
+
 if HAVE_CHECK
-	SUBDIRS+= tests
+	SUBDIRS += tests
 endif
 
 if ENABLE_DOXYGEN
-	SUBDIRS+= doc
+	SUBDIRS += doc
 endif
 
 if TARGET_WIN

+ 2 - 4
examples/src/networklayer.c

@@ -62,9 +62,9 @@ void NL_checkFdSet(void* payload) {
 	  c->reader((void*)c);
   }
 }
-UA_Int32 NL_msgLoop(NL_data* nl, struct timeval *tv, UA_Int32(*worker)(void*), void *arg)  {
+UA_Int32 NL_msgLoop(NL_data* nl, struct timeval *tv, UA_Int32(*worker)(void*), void *arg, UA_Boolean *running)  {
 	UA_Int32 result;
-	while (UA_TRUE) {
+	while (*running) {
 		// determine the largest handle
 		nl->maxReaderHandle = 0;
 		UA_list_iteratePayload(&(nl->connections),NL_setFdSet);
@@ -100,9 +100,7 @@ UA_Int32 NL_msgLoop(NL_data* nl, struct timeval *tv, UA_Int32(*worker)(void*), v
 
 		}
 		worker(arg);
-
 	}
-
 	return UA_SUCCESS;
 }
 #endif

+ 1 - 1
examples/src/networklayer.h

@@ -53,7 +53,7 @@ typedef struct NL_Connection {
 } NL_Connection;
 
 NL_data* NL_init(NL_Description* tlDesc, UA_Int32 port);
-UA_Int32 NL_msgLoop(NL_data* nl, struct timeval* tv,UA_Int32 (*timeoutCallBack)(void*),void *arg);
+UA_Int32 NL_msgLoop(NL_data* nl, struct timeval* tv,UA_Int32 (*timeoutCallBack)(void*),void *arg, UA_Boolean *running);
 UA_Int32 NL_TCP_writer(struct TL_Connection const * c, UA_ByteString const * const * gather_buf, UA_UInt32 gather_len);
 
 #endif /* NETWORKLAYER_H_ */

+ 17 - 5
examples/src/opcuaServer.c

@@ -11,6 +11,13 @@
 #include <time.h>
 #include <fcntl.h>
 
+#include <signal.h>
+
+UA_Boolean running = UA_TRUE;
+
+void stopHandler(int sign) {
+	running = UA_FALSE;
+}
 
 UA_Int32 serverCallback(void * arg) {
 	char *name = (char *) arg;
@@ -18,9 +25,9 @@ UA_Int32 serverCallback(void * arg) {
 
 	Namespace* ns0 = (Namespace*)UA_indexedList_find(appMockup.namespaces, 0)->payload;
 	UA_Int32 retval;
-	UA_Node const * node;
+	const UA_Node * node;
 	UA_ExpandedNodeId serverStatusNodeId = NS0EXPANDEDNODEID(2256);
-	retval = Namespace_get(ns0, &(serverStatusNodeId.nodeId),&node, UA_NULL);
+	retval = Namespace_get(ns0, &serverStatusNodeId.nodeId, &node);
 	if(retval == UA_SUCCESS){
 		((UA_ServerStatusDataType*)(((UA_VariableNode*)node)->value.data))->currentTime = UA_DateTime_now();
 	}
@@ -28,12 +35,17 @@ UA_Int32 serverCallback(void * arg) {
 	return UA_SUCCESS;
 }
 
-
 int main(int argc, char** argv) {
+
+	/* gets called at ctrl-c */
+	signal(SIGINT, stopHandler);
+	
 	appMockup_init();
-	NL_data* nl = NL_init(&NL_Description_TcpBinary,16664);
+	NL_data* nl = NL_init(&NL_Description_TcpBinary, 16664);
 
 	struct timeval tv = {1, 0}; // 1 second
-  	NL_msgLoop(nl, &tv, serverCallback, argv[0]);
+  	NL_msgLoop(nl, &tv, serverCallback, argv[0], &running);
 
+	printf("Shutting down after Ctrl-C.\n");
+	exit(0);
 }

+ 45 - 17
examples/src/opcuaServerMT.c

@@ -1,23 +1,51 @@
-/*
- ============================================================================
- Name        : opcuaServer.c
- Author      :
- Version     :
- Copyright   : Your copyright notice
- Description :
- ============================================================================
- */
-
 #include <stdio.h>
 #include <stdlib.h>
-#include <unistd.h>
+
 #include "networklayer.h"
+#include "ua_application.h"
 
-int main(int argc, char** argv) {
-	NL_init(&NL_Description_TcpBinary,16664);
-	while (UA_TRUE) {
-		printf("%s does whatever servers do\n",argv[0]);
-		sleep(2);
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+
+#include <signal.h>
+
+UA_Boolean running = UA_TRUE;
+
+void stopHandler(int sign) {
+	running = UA_FALSE;
+}
+
+UA_Int32 serverCallback(void * arg) {
+	char *name = (char *) arg;
+	printf("%s does whatever servers do\n",name);
+
+	Namespace* ns0 = (Namespace*)UA_indexedList_find(appMockup.namespaces, 0)->payload;
+	UA_Int32 retval;
+	const UA_Node * node;
+	UA_ExpandedNodeId serverStatusNodeId = NS0EXPANDEDNODEID(2256);
+	retval = Namespace_get(ns0, &serverStatusNodeId.nodeId, &node);
+	if(retval == UA_SUCCESS){
+		((UA_ServerStatusDataType*)(((UA_VariableNode*)node)->value.data))->currentTime = UA_DateTime_now();
 	}
-	return EXIT_SUCCESS;
+
+	return UA_SUCCESS;
+}
+
+int main(int argc, char** argv) {
+
+	/* gets called at ctrl-c */
+	signal(SIGINT, stopHandler);
+	
+	appMockup_init();
+	NL_data* nl = NL_init(&NL_Description_TcpBinary, 16664);
+
+	struct timeval tv = {1, 0}; // 1 second
+  	NL_msgLoop(nl, &tv, serverCallback, argv[0], &running);
+
+	printf("Shutting down after Ctrl-C.\n");
+	exit(0);
 }

+ 7 - 2
src/Makefile.am

@@ -5,9 +5,9 @@ lib_LTLIBRARIES = libopen62541.la
 lib_LTLIBRARIES: generate_types generate_namespace_0
 libopen62541_la_LDFLAGS = -avoid-version -no-undefined
 libopen62541_la_SOURCES = ua_types.c \
+						  ua_namespace_0.c \
 						  ua_types_encoding_binary.c \
 						  ua_types_generated.c \
-						  ua_namespace_0.c \
 						  util/ua_util.c \
 						  util/ua_list.c \
 						  util/ua_indexedList.c \
@@ -15,7 +15,6 @@ libopen62541_la_SOURCES = ua_types.c \
 						  ua_transport.c \
 						  ua_transport_binary.c \
 						  ua_transport_binary_secure.c \
-						  ua_namespace.c \
 						  ua_services_attribute.c \
 						  ua_services_session.c \
 						  ua_services_discovery.c \
@@ -26,6 +25,12 @@ libopen62541_la_SOURCES = ua_types.c \
 						  ua_services_monitoreditems.c\
 						  ua_application.c
 
+if MULTITHREADING
+libopen62541_la_SOURCES += ua_namespace_concurrent.c
+else
+libopen62541_la_SOURCES += ua_namespace.c
+endif
+
 WITH_XML =
 if UA_ENCODING_XML
 libopen62541_la_SOURCES += ua_types_encoding_xml.c \

+ 51 - 48
src/ua_application.c

@@ -34,10 +34,10 @@ void appMockup_init() {
 	// create namespaces
 	// TODO: A table that maps the namespaceUris to Ids
 	Namespace* ns0;
-	Namespace_new(&ns0, 100, 0); //C2UA_STRING("http://opcfoundation.org/UA/"));
+	Namespace_new(&ns0, 0); //C2UA_STRING("http://opcfoundation.org/UA/"));
 
 	Namespace* local;
-	Namespace_new(&local, 100, 1); //C2UA_STRING("http://localhost:16664/open62541/"));
+	Namespace_new(&local, 1); //C2UA_STRING("http://localhost:16664/open62541/"));
 
 	// add to list of namespaces
 	UA_indexedList_init(appMockup.namespaces);
@@ -82,7 +82,7 @@ void appMockup_init() {
 	references->description = UA_LOCALIZEDTEXT_STATIC("References");
 	references->isAbstract = UA_TRUE;
 	references->symmetric = UA_TRUE;
-	Namespace_insert(ns0,(UA_Node*)references);
+	Namespace_insert(ns0,(const UA_Node**)&references, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hierarchicalreferences;
 	UA_ReferenceTypeNode_new(&hierarchicalreferences);
@@ -95,7 +95,7 @@ void appMockup_init() {
 	hierarchicalreferences->symmetric = UA_FALSE;
 	AddReference((UA_Node*)hierarchicalreferences, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_References, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hierarchicalreferences);
+	Namespace_insert(ns0,(const UA_Node**)&hierarchicalreferences, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *nonhierarchicalreferences;
 	UA_ReferenceTypeNode_new(&nonhierarchicalreferences);
@@ -108,7 +108,7 @@ void appMockup_init() {
 	nonhierarchicalreferences->symmetric = UA_FALSE;
 	AddReference((UA_Node*)nonhierarchicalreferences, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_References, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)nonhierarchicalreferences);
+	Namespace_insert(ns0,(const UA_Node**)&nonhierarchicalreferences, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *haschild;
 	UA_ReferenceTypeNode_new(&haschild);
@@ -121,7 +121,7 @@ void appMockup_init() {
 	haschild->symmetric = UA_FALSE;
 	AddReference((UA_Node*)haschild, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)haschild);
+	Namespace_insert(ns0,(const UA_Node**)&haschild, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *organizes;
 	UA_ReferenceTypeNode_new(&organizes);
@@ -135,7 +135,7 @@ void appMockup_init() {
 	organizes->inverseName = UA_LOCALIZEDTEXT_STATIC("OrganizedBy");
 	AddReference((UA_Node*)organizes, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)organizes);
+	Namespace_insert(ns0,(const UA_Node**)&organizes, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *haseventsource;
 	UA_ReferenceTypeNode_new(&haseventsource);
@@ -149,7 +149,7 @@ void appMockup_init() {
 	haseventsource->inverseName = UA_LOCALIZEDTEXT_STATIC("EventSourceOf");
 	AddReference((UA_Node*)haseventsource, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)haseventsource);
+	Namespace_insert(ns0,(const UA_Node**)&haseventsource, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasmodellingrule;
 	UA_ReferenceTypeNode_new(&hasmodellingrule);
@@ -163,7 +163,7 @@ void appMockup_init() {
 	hasmodellingrule->inverseName = UA_LOCALIZEDTEXT_STATIC("ModellingRuleOf");
 	AddReference((UA_Node*)hasmodellingrule, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasmodellingrule);
+	Namespace_insert(ns0,(const UA_Node**)&hasmodellingrule, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasencoding;
 	UA_ReferenceTypeNode_new(&hasencoding);
@@ -177,7 +177,7 @@ void appMockup_init() {
 	hasencoding->inverseName = UA_LOCALIZEDTEXT_STATIC("EncodingOf");
 	AddReference((UA_Node*)hasencoding, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasencoding);
+	Namespace_insert(ns0,(const UA_Node**)&hasencoding, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasdescription;
 	UA_ReferenceTypeNode_new(&hasdescription);
@@ -191,7 +191,7 @@ void appMockup_init() {
 	hasdescription->inverseName = UA_LOCALIZEDTEXT_STATIC("DescriptionOf");
 	AddReference((UA_Node*)hasdescription, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasdescription);
+	Namespace_insert(ns0,(const UA_Node**)&hasdescription, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hastypedefinition;
 	UA_ReferenceTypeNode_new(&hastypedefinition);
@@ -205,7 +205,7 @@ void appMockup_init() {
 	hastypedefinition->inverseName = UA_LOCALIZEDTEXT_STATIC("TypeDefinitionOf");
 	AddReference((UA_Node*)hastypedefinition, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hastypedefinition);
+	Namespace_insert(ns0,(const UA_Node**)&hastypedefinition, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *generatesevent;
 	UA_ReferenceTypeNode_new(&generatesevent);
@@ -219,7 +219,7 @@ void appMockup_init() {
 	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("GeneratedBy");
 	AddReference((UA_Node*)generatesevent, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)generatesevent);
+	Namespace_insert(ns0,(const UA_Node**)&generatesevent, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *aggregates;
 	UA_ReferenceTypeNode_new(&aggregates);
@@ -232,7 +232,7 @@ void appMockup_init() {
 	aggregates->symmetric = UA_FALSE;
 	AddReference((UA_Node*)aggregates, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HasChild, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)aggregates);
+	Namespace_insert(ns0,(const UA_Node**)&aggregates, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hassubtype;
 	UA_ReferenceTypeNode_new(&hassubtype);
@@ -243,10 +243,10 @@ void appMockup_init() {
 	hassubtype->description = UA_LOCALIZEDTEXT_STATIC("HasSubtype");
 	hassubtype->isAbstract = UA_FALSE;
 	hassubtype->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("SubtypeOf");
+	hassubtype->inverseName = UA_LOCALIZEDTEXT_STATIC("SubtypeOf");
 	AddReference((UA_Node*)hassubtype, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HasChild, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hassubtype);
+	Namespace_insert(ns0,(const UA_Node**)&hassubtype, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasproperty;
 	UA_ReferenceTypeNode_new(&hasproperty);
@@ -257,10 +257,10 @@ void appMockup_init() {
 	hasproperty->description = UA_LOCALIZEDTEXT_STATIC("HasProperty");
 	hasproperty->isAbstract = UA_FALSE;
 	hasproperty->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("PropertyOf");
+	hasproperty->inverseName = UA_LOCALIZEDTEXT_STATIC("PropertyOf");
 	AddReference((UA_Node*)hasproperty, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_Aggregates, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasproperty);
+	Namespace_insert(ns0,(const UA_Node**)&hasproperty, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hascomponent;
 	UA_ReferenceTypeNode_new(&hascomponent);
@@ -271,10 +271,10 @@ void appMockup_init() {
 	hascomponent->description = UA_LOCALIZEDTEXT_STATIC("HasComponent");
 	hascomponent->isAbstract = UA_FALSE;
 	hascomponent->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("ComponentOf");
+	hascomponent->inverseName = UA_LOCALIZEDTEXT_STATIC("ComponentOf");
 	AddReference((UA_Node*)hascomponent, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_Aggregates, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hascomponent);
+	Namespace_insert(ns0,(const UA_Node**)&hascomponent, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasnotifier;
 	UA_ReferenceTypeNode_new(&hasnotifier);
@@ -285,10 +285,10 @@ void appMockup_init() {
 	hasnotifier->description = UA_LOCALIZEDTEXT_STATIC("HasNotifier");
 	hasnotifier->isAbstract = UA_FALSE;
 	hasnotifier->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("NotifierOf");
+	hasnotifier->inverseName = UA_LOCALIZEDTEXT_STATIC("NotifierOf");
 	AddReference((UA_Node*)hasnotifier, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HasEventSource, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasnotifier);
+	Namespace_insert(ns0,(const UA_Node**)&hasnotifier, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasorderedcomponent;
 	UA_ReferenceTypeNode_new(&hasorderedcomponent);
@@ -299,10 +299,10 @@ void appMockup_init() {
 	hasorderedcomponent->description = UA_LOCALIZEDTEXT_STATIC("HasOrderedComponent");
 	hasorderedcomponent->isAbstract = UA_FALSE;
 	hasorderedcomponent->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("OrderedComponentOf");
+	hasorderedcomponent->inverseName = UA_LOCALIZEDTEXT_STATIC("OrderedComponentOf");
 	AddReference((UA_Node*)hasorderedcomponent, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_HasComponent, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasorderedcomponent);
+	Namespace_insert(ns0,(const UA_Node**)&hasorderedcomponent, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hasmodelparent;
 	UA_ReferenceTypeNode_new(&hasmodelparent);
@@ -313,10 +313,10 @@ void appMockup_init() {
 	hasmodelparent->description = UA_LOCALIZEDTEXT_STATIC("HasModelParent");
 	hasmodelparent->isAbstract = UA_FALSE;
 	hasmodelparent->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("ModelParentOf");
+	hasmodelparent->inverseName = UA_LOCALIZEDTEXT_STATIC("ModelParentOf");
 	AddReference((UA_Node*)hasmodelparent, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hasmodelparent);
+	Namespace_insert(ns0,(const UA_Node**)&hasmodelparent, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *fromstate;
 	UA_ReferenceTypeNode_new(&fromstate);
@@ -327,10 +327,10 @@ void appMockup_init() {
 	fromstate->description = UA_LOCALIZEDTEXT_STATIC("FromState");
 	fromstate->isAbstract = UA_FALSE;
 	fromstate->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("ToTransition");
+	fromstate->inverseName = UA_LOCALIZEDTEXT_STATIC("ToTransition");
 	AddReference((UA_Node*)fromstate, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)fromstate);
+	Namespace_insert(ns0,(const UA_Node**)&fromstate, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *tostate;
 	UA_ReferenceTypeNode_new(&tostate);
@@ -341,10 +341,10 @@ void appMockup_init() {
 	tostate->description = UA_LOCALIZEDTEXT_STATIC("ToState");
 	tostate->isAbstract = UA_FALSE;
 	tostate->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("FromTransition");
+	tostate->inverseName = UA_LOCALIZEDTEXT_STATIC("FromTransition");
 	AddReference((UA_Node*)tostate, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)tostate);
+	Namespace_insert(ns0,(const UA_Node**)&tostate, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hascause;
 	UA_ReferenceTypeNode_new(&hascause);
@@ -355,10 +355,10 @@ void appMockup_init() {
 	hascause->description = UA_LOCALIZEDTEXT_STATIC("HasCause");
 	hascause->isAbstract = UA_FALSE;
 	hascause->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("MayBeCausedBy");
+	hascause->inverseName = UA_LOCALIZEDTEXT_STATIC("MayBeCausedBy");
 	AddReference((UA_Node*)hascause, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hascause);
+	Namespace_insert(ns0,(const UA_Node**)&hascause, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *haseffect;
 	UA_ReferenceTypeNode_new(&haseffect);
@@ -369,10 +369,10 @@ void appMockup_init() {
 	haseffect->description = UA_LOCALIZEDTEXT_STATIC("HasEffect");
 	haseffect->isAbstract = UA_FALSE;
 	haseffect->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("MayBeEffectedBy");
+	haseffect->inverseName = UA_LOCALIZEDTEXT_STATIC("MayBeEffectedBy");
 	AddReference((UA_Node*)haseffect, &(UA_ReferenceNode){RefTypeId_HasSubtype, UA_TRUE,
 		(UA_ExpandedNodeId){RefTypeId_NonHierarchicalReferences, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)haseffect);
+	Namespace_insert(ns0,(const UA_Node**)&haseffect, NAMESPACE_INSERT_UNIQUE);
 
 	UA_ReferenceTypeNode *hashistoricalconfiguration;
 	UA_ReferenceTypeNode_new(&hashistoricalconfiguration);
@@ -383,10 +383,10 @@ void appMockup_init() {
 	hashistoricalconfiguration->description = UA_LOCALIZEDTEXT_STATIC("HasHistoricalConfiguration");
 	hashistoricalconfiguration->isAbstract = UA_FALSE;
 	hashistoricalconfiguration->symmetric = UA_FALSE;
-	generatesevent->inverseName = UA_LOCALIZEDTEXT_STATIC("HistoricalConfigurationOf");
+	hashistoricalconfiguration->inverseName = UA_LOCALIZEDTEXT_STATIC("HistoricalConfigurationOf");
 	AddReference((UA_Node*)hashistoricalconfiguration, &(UA_ReferenceNode){RefTypeId_HasSubtype,
 		UA_TRUE, (UA_ExpandedNodeId){RefTypeId_Aggregates, UA_STRING_NULL, 0}}, ns0);
-	Namespace_insert(ns0,(UA_Node*)hashistoricalconfiguration);
+	Namespace_insert(ns0,(const UA_Node**)&hashistoricalconfiguration, NAMESPACE_INSERT_UNIQUE);
 
 
 	// ObjectTypes (Ids only)
@@ -411,7 +411,7 @@ void appMockup_init() {
 	folderType->browseName = UA_QUALIFIEDNAME_STATIC("FolderType");
 	folderType->displayName = UA_LOCALIZEDTEXT_STATIC("FolderType");
 	folderType->description = UA_LOCALIZEDTEXT_STATIC("FolderType");
-	Namespace_insert(ns0,(UA_Node*)folderType);
+	Namespace_insert(ns0,(const UA_Node**)&folderType, NAMESPACE_INSERT_UNIQUE);
 
 	// Root
 	UA_ObjectNode *root;
@@ -425,7 +425,8 @@ void appMockup_init() {
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_ObjectsFolder}, ns0);
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_TypesFolder}, ns0);
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_ViewsFolder}, ns0);
-	Namespace_insert(ns0,(UA_Node*)root);
+	/* root becomes a managed node. we need to release it at the end.*/
+	Namespace_insert(ns0,(const UA_Node**)&root, NAMESPACE_INSERT_UNIQUE | NAMESPACE_INSERT_GETMANAGED);
 
 	// Objects
 	UA_ObjectNode *objects;
@@ -437,7 +438,7 @@ void appMockup_init() {
 	objects->description = UA_LOCALIZEDTEXT_STATIC("Objects");
 	AddReference((UA_Node*)objects, &(UA_ReferenceNode){RefTypeId_HasTypeDefinition, UA_FALSE, ObjTypeId_FolderType}, ns0);
 	AddReference((UA_Node*)objects, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_Server}, ns0);
-	Namespace_insert(ns0,(UA_Node*)objects);
+	Namespace_insert(ns0,(const UA_Node**)&objects, NAMESPACE_INSERT_UNIQUE);
 
 	// Types
 	UA_ObjectNode *types;
@@ -448,7 +449,7 @@ void appMockup_init() {
 	types->displayName = UA_LOCALIZEDTEXT_STATIC("Types");
 	types->description = UA_LOCALIZEDTEXT_STATIC("Types");
 	AddReference((UA_Node*)types, &(UA_ReferenceNode){RefTypeId_HasTypeDefinition, UA_FALSE, ObjTypeId_FolderType}, ns0);
-	Namespace_insert(ns0,(UA_Node*)types);
+	Namespace_insert(ns0,(const UA_Node**)&types, NAMESPACE_INSERT_UNIQUE);
 
 	// Views
 	UA_ObjectNode *views;
@@ -459,7 +460,7 @@ void appMockup_init() {
 	views->displayName = UA_LOCALIZEDTEXT_STATIC("Views");
 	views->description = UA_LOCALIZEDTEXT_STATIC("Views");
 	AddReference((UA_Node*)views, &(UA_ReferenceNode){RefTypeId_HasTypeDefinition, UA_FALSE, ObjTypeId_FolderType}, ns0);
-	Namespace_insert(ns0,(UA_Node*)views);
+	Namespace_insert(ns0,(const UA_Node**)&views, NAMESPACE_INSERT_UNIQUE);
 
 	// Server
 	UA_ObjectNode *server;
@@ -473,7 +474,7 @@ void appMockup_init() {
 	AddReference((UA_Node*)server, &(UA_ReferenceNode){RefTypeId_HasComponent, UA_FALSE, ObjId_NamespaceArray}, ns0);
 	AddReference((UA_Node*)server, &(UA_ReferenceNode){RefTypeId_HasProperty, UA_FALSE, ObjId_ServerStatus}, ns0);
 	AddReference((UA_Node*)server, &(UA_ReferenceNode){RefTypeId_HasProperty, UA_FALSE, ObjId_ServerArray}, ns0);
-	Namespace_insert(ns0,(UA_Node*)server);
+	Namespace_insert(ns0,(const UA_Node**)&server, NAMESPACE_INSERT_UNIQUE);
 
 	// NamespaceArray
 	UA_VariableNode *namespaceArray;
@@ -497,7 +498,7 @@ void appMockup_init() {
 	namespaceArray->valueRank = 1;
 	namespaceArray->minimumSamplingInterval = 1.0;
 	namespaceArray->historizing = UA_FALSE;
-	Namespace_insert(ns0,(UA_Node*)namespaceArray);
+	Namespace_insert(ns0,(const UA_Node**)&namespaceArray, NAMESPACE_INSERT_UNIQUE);
 
 	// ServerStatus
 	UA_VariableNode *serverstatus;
@@ -524,7 +525,7 @@ void appMockup_init() {
 	serverstatus->value.vt = &UA_.types[UA_SERVERSTATUSDATATYPE]; // gets encoded as an extensionobject
 	serverstatus->value.arrayLength = 1;
 	serverstatus->value.data = status;
-	Namespace_insert(ns0,(UA_Node*)serverstatus);
+	Namespace_insert(ns0,(const UA_Node**)&serverstatus, NAMESPACE_INSERT_UNIQUE);
 
 	// State (Component of ServerStatus)
 	UA_VariableNode *state;
@@ -537,7 +538,7 @@ void appMockup_init() {
 	state->value.vt = &UA_borrowed_.types[UA_SERVERSTATE];
 	state->value.arrayLength = 1;
 	state->value.data = &status->state; // points into the other object.
-	Namespace_insert(ns0,(UA_Node*)state);
+	Namespace_insert(ns0,(const UA_Node**)&state, NAMESPACE_INSERT_UNIQUE);
 
 	//TODO: free(namespaceArray->value.data) later or forget it
 
@@ -583,7 +584,7 @@ void appMockup_init() {
 	tmpNodeValue->vt = &UA_.types[UA_FLOAT];
 
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_temperature1}, ns0);
-	Namespace_insert(ns0,(UA_Node*)temperature1);
+	Namespace_insert(ns0,(const UA_Node**)&temperature1, NAMESPACE_INSERT_UNIQUE);
 
 
 	UA_ExpandedNodeId ObjId_redLED = (UA_ExpandedNodeId){.nodeId = (UA_NodeId){.encodingByte = UA_NODEIDTYPE_TWOBYTE, .namespace = 0, .identifier.numeric = 109}, .namespaceUri = {-1, ((void *)0)}, .serverIndex = 0};
@@ -608,7 +609,7 @@ void appMockup_init() {
 	tmpNodeValue1->vt = &UA_.types[UA_BOOLEAN];
 
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_redLED}, ns0);
-	Namespace_insert(ns0,(UA_Node*)redLED);
+	Namespace_insert(ns0,(const UA_Node**)&redLED, NAMESPACE_INSERT_UNIQUE);
 
 
 	UA_ExpandedNodeId ObjId_yellowLED = (UA_ExpandedNodeId){.nodeId = (UA_NodeId){.encodingByte = UA_NODEIDTYPE_TWOBYTE, .namespace = 0, .identifier.numeric = 110}, .namespaceUri = {-1, ((void *)0)}, .serverIndex = 0};
@@ -633,7 +634,9 @@ void appMockup_init() {
 	tmpNodeValue2->vt = &UA_.types[UA_BOOLEAN];
 
 	AddReference((UA_Node*)root, &(UA_ReferenceNode){RefTypeId_Organizes, UA_FALSE, ObjId_yellowLED}, ns0);
-	Namespace_insert(ns0,(UA_Node*)yellowLED);
+	Namespace_insert(ns0,(const UA_Node**)&yellowLED, NAMESPACE_INSERT_UNIQUE);
+	
+	Namespace_releaseManagedNode((const UA_Node*)root);
 
 #if defined(DEBUG) && defined(VERBOSE)
 	uint32_t i;

+ 149 - 267
src/ua_namespace.c

@@ -2,247 +2,164 @@
 #include <string.h>
 #include <stdio.h>
 
-/*****************************************/
-/* Internal (not exported) functionality */
-/*****************************************/
-
-struct Namespace_Entry_Lock {
-	Namespace_Entry *entry;
-};
-
-/* The tombstone (entry.node == 0x01) indicates that an entry was deleted at the position in the
-   hash-map. This is information is used to decide whether the entire table shall be rehashed so
-   that entries are found faster. */
-#define ENTRY_EMPTY UA_NULL
-#define ENTRY_TOMBSTONE 0x01
-
-/* The central data structure is a hash-map of UA_Node objects. Entry lookup via Algorithm D from
-   Knuth's TAOCP (no linked lists here). Table of primes and mod-functions are from libiberty
-   (licensed under LGPL) */ typedef UA_UInt32 hash_t;
-struct prime_ent {
-	hash_t prime;
-	hash_t inv;
-	hash_t inv_m2;	/* inverse of prime-2 */
-	hash_t shift;
+struct Namespace {
+	UA_UInt32 namespaceId;
+	const UA_Node ** entries;
+	UA_UInt32 size;
+	UA_UInt32 count;
+	UA_UInt32 sizePrimeIndex;
 };
 
-static struct prime_ent const prime_tab[] = {
-	{7, 0x24924925, 0x9999999b, 2},
-	{13, 0x3b13b13c, 0x745d1747, 3},
-	{31, 0x08421085, 0x1a7b9612, 4},
-	{61, 0x0c9714fc, 0x15b1e5f8, 5},
-	{127, 0x02040811, 0x0624dd30, 6},
-	{251, 0x05197f7e, 0x073260a5, 7},
-	{509, 0x01824366, 0x02864fc8, 8},
-	{1021, 0x00c0906d, 0x014191f7, 9},
-	{2039, 0x0121456f, 0x0161e69e, 10},
-	{4093, 0x00300902, 0x00501908, 11},
-	{8191, 0x00080041, 0x00180241, 12},
-	{16381, 0x000c0091, 0x00140191, 13},
-	{32749, 0x002605a5, 0x002a06e6, 14},
-	{65521, 0x000f00e2, 0x00110122, 15},
-	{131071, 0x00008001, 0x00018003, 16},
-	{262139, 0x00014002, 0x0001c004, 17},
-	{524287, 0x00002001, 0x00006001, 18},
-	{1048573, 0x00003001, 0x00005001, 19},
-	{2097143, 0x00004801, 0x00005801, 20},
-	{4194301, 0x00000c01, 0x00001401, 21},
-	{8388593, 0x00001e01, 0x00002201, 22},
-	{16777213, 0x00000301, 0x00000501, 23},
-	{33554393, 0x00001381, 0x00001481, 24},
-	{67108859, 0x00000141, 0x000001c1, 25},
-	{134217689, 0x000004e1, 0x00000521, 26},
-	{268435399, 0x00000391, 0x000003b1, 27},
-	{536870909, 0x00000019, 0x00000029, 28},
-	{1073741789, 0x0000008d, 0x00000095, 29},
-	{2147483647, 0x00000003, 0x00000007, 30},
-	/* Avoid "decimal constant so large it is unsigned" for 4294967291.  */
-	{0xfffffffb, 0x00000006, 0x00000008, 31}
+typedef UA_UInt32 hash_t;
+/* The size of the hash-map is always a prime number. They are chosen to be
+   close to the next power of 2. So the size ca. doubles with each prime. */
+static hash_t const primes[] = {
+	7,         13,        31,        61,         127,        251,
+	509,       1021,      2039,      4093,       8191,       16381,
+	32749,     65521,     131071,    262139,     524287,     1048573,
+	2097143,   4194301,   8388593,   16777213,   33554393,   67108859,
+	134217689, 268435399, 536870909, 1073741789, 2147483647, 4294967291
 };
 
-/* Hashing inspired by code from from http://www.azillionmonkeys.com/qed/hash.html, licensed under
-   the LGPL 2.1 */
-#undef get16bits
-#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || \
-	defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
-#define get16bits(d) (*((const uint16_t *) (d)))
-#endif
+static inline hash_t mod(hash_t h, hash_t size) { return h % size; }
+static inline hash_t mod2(hash_t h, hash_t size) { return 1 + (h % (size - 2)); }
 
-#if !defined (get16bits)
-#define get16bits(d) ((((UA_UInt32)(((const uint8_t *)(d))[1])) << 8) + \
-					  (UA_UInt32)(((const uint8_t *)(d))[0]) )
-#endif
+static inline UA_Int16 higher_prime_index(hash_t n) {
+	UA_UInt16 low  = 0;
+	UA_UInt16 high = sizeof(primes) / sizeof(hash_t);
+	while(low != high) {
+		UA_UInt16 mid = low + (high - low) / 2;
+		if(n > primes[mid])
+			low = mid + 1;
+		else
+			high = mid;
+	}
+	return low;
+}
 
-static inline hash_t hash_string(const UA_Byte * data, UA_Int32 len) {
+/* Based on Murmur-Hash 3 by Austin Appleby (public domain, freely usable) */
+static inline hash_t hash_array(const UA_Byte *data, UA_UInt32 len) {
+	static const uint32_t c1 = 0xcc9e2d51;
+	static const uint32_t c2 = 0x1b873593;
+	static const uint32_t r1 = 15;
+	static const uint32_t r2 = 13;
+	static const uint32_t m  = 5;
+	static const uint32_t n  = 0xe6546b64;
 	hash_t hash = len;
-	hash_t tmp;
-	int rem;
-
-	if(len <= 0 || data == UA_NULL)
-		return 0;
 
-	rem = len & 3;
-	len >>= 2;
-
-	/* Main loop */
-	for(; len > 0; len--) {
-		hash += get16bits(data);
-		tmp = (get16bits(data + 2) << 11) ^ hash;
-		hash = (hash << 16) ^ tmp;
-		data += 2 * sizeof(uint16_t);
-		hash += hash >> 11;
+	if(data == UA_NULL) return 0;
+
+	const int32_t   nblocks = len / 4;
+	const uint32_t *blocks  = (const uint32_t *)data;
+	for(int32_t i = 0;i < nblocks;i++) {
+		uint32_t k = blocks[i];
+		k    *= c1;
+		k     = (k << r1) | (k >> (32 - r1));
+		k    *= c2;
+		hash ^= k;
+		hash  = ((hash << r2) | (hash >> (32 - r2))) * m + n;
 	}
 
-	/* Handle end cases */
-	switch (rem) {
+	const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
+	uint32_t       k1   = 0;
+
+	switch(len & 3) {
 	case 3:
-		hash += get16bits(data);
-		hash ^= hash << 16;
-		hash ^= ((signed char)data[sizeof(uint16_t)]) << 18;
-		hash += hash >> 11;
-		break;
+		k1 ^= tail[2] << 16;
 	case 2:
-		hash += get16bits(data);
-		hash ^= hash << 11;
-		hash += hash >> 17;
-		break;
+		k1 ^= tail[1] << 8;
 	case 1:
-		hash += (signed char)*data;
-		hash ^= hash << 10;
-		hash += hash >> 1;
-		break;
+		k1   ^= tail[0];
+		k1   *= c1;
+		k1    = (k1 << r1) | (k1 >> (32 - r1));
+		k1   *= c2;
+		hash ^= k1;
 	}
 
-	/* Force "avalanching" of final 127 bits */
-	hash ^= hash << 3;
-	hash += hash >> 5;
-	hash ^= hash << 4;
-	hash += hash >> 17;
-	hash ^= hash << 25;
-	hash += hash >> 6;
+	hash ^= len;
+	hash ^= (hash >> 16);
+	hash *= 0x85ebca6b;
+	hash ^= (hash >> 13);
+	hash *= 0xc2b2ae35;
+	hash ^= (hash >> 16);
 
 	return hash;
 }
 
-static inline hash_t hash(const UA_NodeId * n) {
-	switch (n->encodingByte) {
+static inline hash_t hash(const UA_NodeId *n) {
+	switch(n->encodingByte) {
 	case UA_NODEIDTYPE_TWOBYTE:
 	case UA_NODEIDTYPE_FOURBYTE:
 	case UA_NODEIDTYPE_NUMERIC:
-		return (n->identifier.numeric * 2654435761) % 2 ^ 32;	// Knuth's multiplicative hashing
+		/*  Knuth's multiplicative hashing */
+		return n->identifier.numeric * 2654435761;   // mod(2^32) is implicit
 	case UA_NODEIDTYPE_STRING:
-		return hash_string(n->identifier.string.data, n->identifier.string.length);
+		return hash_array(n->identifier.string.data, n->identifier.string.length);
 	case UA_NODEIDTYPE_GUID:
-		return hash_string((UA_Byte *) & (n->identifier.guid), sizeof(UA_Guid));
+		return hash_array((UA_Byte *)&(n->identifier.guid), sizeof(UA_Guid));
 	case UA_NODEIDTYPE_BYTESTRING:
-		return hash_string((UA_Byte *) n->identifier.byteString.data, n->identifier.byteString.length);
+		return hash_array((UA_Byte *)n->identifier.byteString.data, n->identifier.byteString.length);
 	default:
 		return 0;
 	}
 }
 
-/* The following function returns an index into the above table of the nearest prime number which
-   is greater than N, and near a power of two. */
-static inline unsigned int higher_prime_index(unsigned long n) {
-	unsigned int low = 0;
-	unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]);
-
-	while(low != high) {
-		unsigned int mid = low + (high - low) / 2;
-		if(n > prime_tab[mid].prime)
-			low = mid + 1;
-		else
-			high = mid;
-	}
-
-	return low;
-}
-
-static inline hash_t mod_1(hash_t x, hash_t y, hash_t inv, int shift) {
-	/* The multiplicative inverses computed above are for 32-bit types, and requires that we are
-	   able to compute a highpart multiply.  */
-#ifdef UNSIGNED_64BIT_TYPE
-	__extension__ typedef UNSIGNED_64BIT_TYPE ull;
-	if(sizeof(hash_t) * CHAR_BIT <= 32) {
-		hash_t t1, t2, t3, t4, q, r;
-		t1 = ((ull) x * inv) >> 32;
-		t2 = x - t1;
-		t3 = t2 >> 1;
-		t4 = t1 + t3;
-		q = t4 >> shift;
-		r = x - (q * y);
-		return r;
-	}
-#endif
-	return x % y;
-}
-
-static inline hash_t mod(hash_t h, const Namespace * ns) {
-	const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
-	return mod_1(h, p->prime, p->inv, p->shift);
-}
-
-static inline hash_t mod_m2(hash_t h, const Namespace * ns) {
-	const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];	// eventually return the namespace object that has been allocated in UA_NodeSet_init
-
-	return 1 + mod_1(h, p->prime - 2, p->inv_m2, p->shift);
-}
-
-static inline void clear_entry(Namespace * ns, Namespace_Entry * entry) {
-	if(entry->node == UA_NULL)
+static inline void clear_entry(Namespace * ns, const UA_Node ** entry) {
+	if(entry == UA_NULL || *entry == UA_NULL)
 		return;
 
-	switch (entry->node->nodeClass) {
+	const UA_Node *node = *entry;
+	switch (node->nodeClass) {
 	case UA_NODECLASS_OBJECT:
-		UA_ObjectNode_delete((UA_ObjectNode *) entry->node);
+		UA_ObjectNode_delete((UA_ObjectNode *) node);
 		break;
 	case UA_NODECLASS_VARIABLE:
-		UA_VariableNode_delete((UA_VariableNode *) entry->node);
+		UA_VariableNode_delete((UA_VariableNode *) node);
 		break;
 	case UA_NODECLASS_METHOD:
-		UA_MethodNode_delete((UA_MethodNode *) entry->node);
+		UA_MethodNode_delete((UA_MethodNode *) node);
 		break;
 	case UA_NODECLASS_OBJECTTYPE:
-		UA_ObjectTypeNode_delete((UA_ObjectTypeNode *) entry->node);
+		UA_ObjectTypeNode_delete((UA_ObjectTypeNode *) node);
 		break;
 	case UA_NODECLASS_VARIABLETYPE:
-		UA_VariableTypeNode_delete((UA_VariableTypeNode *) entry->node);
+		UA_VariableTypeNode_delete((UA_VariableTypeNode *) node);
 		break;
 	case UA_NODECLASS_REFERENCETYPE:
-		UA_ReferenceTypeNode_delete((UA_ReferenceTypeNode *) entry->node);
+		UA_ReferenceTypeNode_delete((UA_ReferenceTypeNode *) node);
 		break;
 	case UA_NODECLASS_DATATYPE:
-		UA_DataTypeNode_delete((UA_DataTypeNode *) entry->node);
+		UA_DataTypeNode_delete((UA_DataTypeNode *) node);
 		break;
 	case UA_NODECLASS_VIEW:
-		UA_ViewNode_delete((UA_ViewNode *) entry->node);
+		UA_ViewNode_delete((UA_ViewNode *) node);
 		break;
 	default:
 		break;
 	}
-	entry->node = UA_NULL;
+	entry = UA_NULL;
 	ns->count--;
 }
 
 /* Returns UA_SUCCESS if an entry was found. Otherwise, UA_ERROR is returned and the "entry"
    argument points to the first free entry under the NodeId. */
-static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid, Namespace_Entry ** entry) {
+static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid, const UA_Node *** entry) {
 	hash_t h = hash(nodeid);
-	hash_t index = mod(h, ns);
 	UA_UInt32 size = ns->size;
-	Namespace_Entry *e = &ns->entries[index];
+	hash_t index = mod(h, size);
+	const UA_Node **e = &ns->entries[index];
 
-	if(e->node == UA_NULL) {
+	if(*e == UA_NULL) {
 		*entry = e;
 		return UA_ERROR;
 	}
 
-	if(UA_NodeId_equal(&e->node->nodeId, nodeid) == UA_EQUAL) {
+	if(UA_NodeId_equal(&(*e)->nodeId, nodeid) == UA_EQUAL) {
 		*entry = e;
 		return UA_SUCCESS;
 	}
 
-	hash_t hash2 = mod_m2(h, ns);
+	hash_t hash2 = mod2(h, size);
 	for(;;) {
 		index += hash2;
 		if(index >= size)
@@ -250,12 +167,12 @@ static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid
 
 		e = &ns->entries[index];
 
-		if(e->node == UA_NULL) {
+		if(*e == UA_NULL) {
 			*entry = e;
 			return UA_ERROR;
 		}
 
-		if(UA_NodeId_equal(&e->node->nodeId, nodeid) == UA_EQUAL) {
+		if(UA_NodeId_equal(&(*e)->nodeId, nodeid) == UA_EQUAL) {
 			*entry = e;
 			return UA_SUCCESS;
 		}
@@ -265,19 +182,18 @@ static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid
 	return UA_SUCCESS;
 }
 
-/* The following function changes size of memory allocated for the entries and repeatedly inserts
-   the table elements. The occupancy of the table after the call will be about 50%. Naturally the
-   hash table must already exist. Remember also that the place of the table entries is changed. If
-   memory allocation failures are allowed, this function will return zero, indicating that the
-   table could not be expanded. If all goes well, it will return a non-zero value. */
+/* The following function changes size of memory allocated for the entries and
+   repeatedly inserts the table elements. The occupancy of the table after the
+   call will be about 50%. If memory allocation failures occur, this function
+   will return UA_ERROR. */
 static UA_Int32 expand(Namespace * ns) {
-	Namespace_Entry *nentries;
+	const UA_Node **nentries;
 	int32_t nsize;
 	UA_UInt32 nindex;
 
-	Namespace_Entry *oentries = ns->entries;
+	const UA_Node **oentries = ns->entries;
 	int32_t osize = ns->size;
-	Namespace_Entry *olimit = &oentries[osize];
+	const UA_Node **olimit = &oentries[osize];
 	int32_t count = ns->count;
 
 	/* Resize only when table after removal of unused elements is either too full or too empty.  */
@@ -286,21 +202,21 @@ static UA_Int32 expand(Namespace * ns) {
 	}
 
 	nindex = higher_prime_index(count * 2);
-	nsize = prime_tab[nindex].prime;
+	nsize = primes[nindex];
 
-	if(UA_alloc((void **)&nentries, sizeof(Namespace_Entry) * nsize) != UA_SUCCESS)
+	if(UA_alloc((void **)&nentries, sizeof(UA_Node *) * nsize) != UA_SUCCESS)
 		return UA_ERR_NO_MEMORY;
 
-	memset(nentries, 0, nsize * sizeof(Namespace_Entry));
+	memset(nentries, 0, nsize * sizeof(UA_Node *));
 	ns->entries = nentries;
 	ns->size = nsize;
 	ns->sizePrimeIndex = nindex;
 
-	Namespace_Entry *p = oentries;
+	const UA_Node **p = oentries;
 	do {
-		if(p->node != UA_NULL) {
-			Namespace_Entry *e;
-			find_entry(ns, &p->node->nodeId, &e);	/* We know this returns an empty entry here */
+		if(*p != UA_NULL) {
+			const UA_Node **e;
+			find_entry(ns, &(*p)->nodeId, &e);	/* We know this returns an empty entry here */
 			*e = *p;
 		}
 		p++;
@@ -314,122 +230,86 @@ static UA_Int32 expand(Namespace * ns) {
 /* Exported functions */
 /**********************/
 
-UA_Int32 Namespace_new(Namespace ** result, UA_UInt32 size, UA_UInt32 namespaceId) {
+UA_Int32 Namespace_new(Namespace ** result, UA_UInt32 namespaceId) {
 	Namespace *ns;
 	if(UA_alloc((void **)&ns, sizeof(Namespace)) != UA_SUCCESS)
 		return UA_ERR_NO_MEMORY;
 
-	UA_UInt32 sizePrimeIndex = higher_prime_index(size);
-	size = prime_tab[sizePrimeIndex].prime;
-	if(UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * size) != UA_SUCCESS) {
+	UA_UInt32 sizePrimeIndex = higher_prime_index(32);
+	UA_UInt32 size = primes[sizePrimeIndex];
+	if(UA_alloc((void **)&ns->entries, sizeof(UA_Node *) * size) != UA_SUCCESS) {
 		UA_free(ns);
 		return UA_ERR_NO_MEMORY;
 	}
 
 	/* set entries to zero */
-	memset(ns->entries, 0, size * sizeof(Namespace_Entry));
+	memset(ns->entries, 0, size * sizeof(UA_Node *));
 
 	*ns = (Namespace) {namespaceId, ns->entries, size, 0, sizePrimeIndex};
 	*result = ns;
 	return UA_SUCCESS;
 }
 
-static void Namespace_clear(Namespace * ns) {
+
+UA_Int32 Namespace_delete(Namespace * ns) {
 	UA_UInt32 size = ns->size;
-	Namespace_Entry *entries = ns->entries;
+	const UA_Node **entries = ns->entries;
+
 	for(UA_UInt32 i = 0; i < size; i++)
 		clear_entry(ns, &entries[i]);
-	ns->count = 0;
-}
-
-void Namespace_empty(Namespace * ns) {
-	Namespace_clear(ns);
-
-	/* Downsize the table.  */
-	if(ns->size > 1024 * 1024 / sizeof(Namespace_Entry)) {
-		int nindex = higher_prime_index(1024 / sizeof(Namespace_Entry));
-		int nsize = prime_tab[nindex].prime;
-		UA_free(ns->entries);
-		UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * nsize);	// FIXME: Check return
-		// value
-		ns->size = nsize;
-		ns->sizePrimeIndex = nindex;
-	}
-}
-
-void Namespace_delete(Namespace * ns) {
-	Namespace_clear(ns);
+	
 	UA_free(ns->entries);
 	UA_free(ns);
-}
-
-UA_Int32 Namespace_insert(Namespace * ns, const UA_Node * node) {
-	if(ns->size * 3 <= ns->count * 4) {
-		if(expand(ns) != UA_SUCCESS)
-			return UA_ERROR;
-	}
-
-	Namespace_Entry *entry;
-	UA_Int32 found = find_entry(ns, &node->nodeId, &entry);
-
-	if(found == UA_SUCCESS)
-		return UA_ERROR;	/* There is already an entry for that nodeid */
-
-	entry->node = node;
-	ns->count++;
 	return UA_SUCCESS;
 }
 
-UA_Int32 Namespace_insertUnique(Namespace * ns, UA_Node * node) {
+UA_Int32 Namespace_insert(Namespace *ns, const UA_Node **node, UA_Byte flags) {
+	if(ns == UA_NULL || node == UA_NULL || *node == UA_NULL)
+		return UA_ERROR;
+	
 	if(ns->size * 3 <= ns->count * 4) {
 		if(expand(ns) != UA_SUCCESS)
 			return UA_ERROR;
 	}
-	// find unoccupied numeric nodeid
-	node->nodeId.namespace = ns->namespaceId;
-	node->nodeId.encodingByte = UA_NODEIDTYPE_NUMERIC;
-	node->nodeId.identifier.numeric = ns->count;
 
-	hash_t h = hash(&node->nodeId);
-	hash_t hash2 = mod_m2(h, ns);
-	UA_UInt32 size = ns->size;
+	const UA_Node ** entry;
+	UA_Int32 found = find_entry(ns, &(*node)->nodeId, &entry);
 
-	// advance integer (hash) until a free entry is found
-	Namespace_Entry *entry = UA_NULL;
-	while(1) {
-		if(find_entry(ns, &node->nodeId, &entry) != UA_SUCCESS)
-			break;
-		node->nodeId.identifier.numeric += hash2;
-		if(node->nodeId.identifier.numeric >= size)
-			node->nodeId.identifier.numeric -= size;
+	if(flags & NAMESPACE_INSERT_UNIQUE) {
+		if(found == UA_SUCCESS)
+			return UA_ERROR;	/* There is already an entry for that nodeid */
+		else
+			*entry = *node;
+	} else {
+		if(found == UA_SUCCESS)
+			clear_entry(ns, entry);
+		*entry = *node;
 	}
 
-	entry->node = node;
+	if(!(flags & NAMESPACE_INSERT_GETMANAGED))
+		*node = UA_NULL;
+	
 	ns->count++;
 	return UA_SUCCESS;
 }
 
-UA_Int32 Namespace_contains(const Namespace * ns, const UA_NodeId * nodeid) {
-	Namespace_Entry *entry;
-	return (find_entry(ns, nodeid, &entry) == UA_SUCCESS ? UA_TRUE : UA_FALSE);
-}
-
-UA_Int32 Namespace_get(Namespace const *ns, const UA_NodeId * nodeid, const UA_Node **result,
-					   Namespace_Entry_Lock ** lock) {
-	Namespace_Entry *entry;
+UA_Int32 Namespace_get(const Namespace *ns, const UA_NodeId *nodeid, const UA_Node **managedNode) {
+	if(ns == UA_NULL || nodeid == UA_NULL || managedNode == UA_NULL)
+		return UA_ERROR;
+	const UA_Node **entry;
 	if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
 		return UA_ERROR;
 
-	*result = entry->node;
+	*managedNode = *entry;
 	return UA_SUCCESS;
 }
 
 UA_Int32 Namespace_remove(Namespace * ns, const UA_NodeId * nodeid) {
-	Namespace_Entry *entry;
+	const UA_Node **entry;
 	if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
 		return UA_ERROR;
 
-	// TODO: Check if deleting the node makes the Namespace inconsistent.
+	// Check before if deleting the node makes the Namespace inconsistent.
 	clear_entry(ns, entry);
 
 	/* Downsize the hashmap if it is very empty */
@@ -440,15 +320,17 @@ UA_Int32 Namespace_remove(Namespace * ns, const UA_NodeId * nodeid) {
 }
 
 UA_Int32 Namespace_iterate(const Namespace * ns, Namespace_nodeVisitor visitor) {
-	if(visitor == UA_NULL) return UA_SUCCESS;
+	if(ns == UA_NULL || visitor == UA_NULL)
+		return UA_ERROR;
+	
 	for(UA_UInt32 i = 0; i < ns->size; i++) {
-		Namespace_Entry *entry = &ns->entries[i];
-		if(entry->node != UA_NULL)
-			visitor(entry->node);
+		const UA_Node *node = ns->entries[i];
+		if(node != UA_NULL)
+			visitor(node);
 	}
 	return UA_SUCCESS;
 }
 
-void Namespace_Entry_Lock_release(Namespace_Entry_Lock * lock) {
+void Namespace_releaseManagedNode(const UA_Node *managed) {
 	;
 }

+ 44 - 55
src/ua_namespace.h

@@ -5,72 +5,61 @@
 #include "ua_types_generated.h"
 #include "util/ua_list.h"
 
-#ifdef MULTITHREADING
-#define _XOPEN_SOURCE 500
-#define __USE_UNIX98
-#include <pthread.h>
-#endif
-
-/** @brief Namespace entries point to an UA_Node. But the actual data structure
-	is opaque outside of ua_namespace.c */
-
-typedef struct Namespace_Entry {
-	UA_UInt64 status;	/* 2 bits status | 14 bits checkout count | 48 bits timestamp */
-	const UA_Node *node;	/* Nodes are immutable. It is not recommended to change nodes in place */
-} Namespace_Entry;
-
-/** @brief Namespace datastructure. It mainly serves as a hashmap to UA_Nodes. */
-typedef struct Namespace {
-	UA_UInt32 namespaceId;
-	Namespace_Entry *entries;
-	UA_UInt32 size;
-	UA_UInt32 count;
-	UA_UInt32 sizePrimeIndex;	/* Current size, as an index into the table of primes.  */
-} Namespace;
-
-/** Namespace locks indicate that a thread currently operates on an entry. */
-struct Namespace_Entry_Lock;
-typedef struct Namespace_Entry_Lock Namespace_Entry_Lock;
-
-/** @brief Release a lock on a namespace entry. */
-void Namespace_Entry_Lock_release(Namespace_Entry_Lock * lock);
+/**
+   @defgroup namespace Namespace
 
-/** @brief Create a new namespace */
-UA_Int32 Namespace_new(Namespace ** result, UA_UInt32 size, UA_UInt32 namespaceId);
+   @brief The namespace is the central storage for nodes in the UA address
+   space. Internally, the namespace is realised as hash-map where nodes are
+   stored and retrieved with their nodeid.
+
+   The nodes in the namespace are immutable. To change the content of a node, it
+   needs to be replaced as a whole. When a node is inserted into the namespace,
+   it gets replaced with a pointer to a managed node. Managed nodes shall never
+   be freed by the user. This is done by the namespace when the node is removed
+   and no readers (in other threads) access the node.
+
+   @{
+ */
 
-/** @brief Delete all nodes in the namespace */
-void Namespace_empty(Namespace * ns);
+/** @brief Namespace datastructure. Mainly a hashmap to UA_Nodes */
+struct Namespace;
+typedef struct Namespace Namespace;
+
+/** @brief Create a new namespace */
+UA_Int32 Namespace_new(Namespace **result, UA_UInt32 namespaceId);
 
 /** @brief Delete the namespace and all nodes in it */
-void Namespace_delete(Namespace * ns);
+UA_Int32 Namespace_delete(Namespace *ns);
 
-/** @brief Insert a new node into the namespace. Abort an entry with the same
-	NodeId is already present */
-UA_Int32 Namespace_insert(Namespace * ns, const UA_Node * node);
+#define NAMESPACE_INSERT_UNIQUE 1
+#define NAMESPACE_INSERT_GETMANAGED 2
+/** @brief Insert a new node into the namespace
 
-/** @brief Insert a new node or replace an existing node if an entry has the same NodeId. */
-// UA_Int32 Namespace_insertOrReplace(Namespace * ns, const UA_Node * node);
+    With the UNIQUE flag, the node is only inserted if the nodeid does not
+    already exist. With the GETMANAGED flag, the node pointer is replaced with
+    the managed pointer. Otherwise, it is set to UA_NULL. */
+UA_Int32 Namespace_insert(Namespace *ns, const UA_Node **node, UA_Byte flags);
 
-/** @brief Find an unused (numeric) NodeId in the namespace and insert the node.
-	The node is modified to contain the new nodeid after insertion. */
-UA_Int32 Namespace_insertUnique(Namespace * ns, UA_Node * node);
+/** @brief Remove a node from the namespace. Always succeeds, even if the node
+	was not found. */
+UA_Int32 Namespace_remove(Namespace *ns, const UA_NodeId *nodeid);
 
-/** @brief Remove a node from the namespace */
-UA_Int32 Namespace_remove(Namespace * ns, const UA_NodeId * nodeid);
+/** @brief Retrieve a node (read-only) from the namespace. Nodes are immutable.
+    They can only be replaced. After the Node is no longer used, the locked
+    entry needs to be released. */
+UA_Int32 Namespace_get(const Namespace *ns, const UA_NodeId *nodeid, const UA_Node **managedNode);
 
-/** @brief Tests whether the namespace contains an entry for a given NodeId */
-UA_Int32 Namespace_contains(const Namespace * ns, const UA_NodeId * nodeid);
+/** @brief Release a managed node. Do never insert a node that isn't stored in a
+	namespace. */
+void Namespace_releaseManagedNode(const UA_Node *managed);
 
-/** @brief Retrieve a node (read-only) from the namespace. Nodes are identified
-	by their NodeId. After the Node is no longer used, the lock needs to be
-	released. */
-UA_Int32 Namespace_get(const Namespace *ns, const UA_NodeId * nodeid, const UA_Node **result,
-					   Namespace_Entry_Lock ** lock);
+/** @brief A function that can be evaluated on all entries in a namespace via
+	Namespace_iterate. Note that the visitor is read-only on the nodes. */
+typedef void (*Namespace_nodeVisitor)(const UA_Node *node);
 
-/** @brief A function that can be evaluated on all entries in a namespace via Namespace_iterate */
-typedef void (*Namespace_nodeVisitor) (const UA_Node *node);
+/** @brief Iterate over all nodes in a namespace. */
+UA_Int32 Namespace_iterate(const Namespace *ns, Namespace_nodeVisitor visitor);
 
-/** @brief Iterate over all nodes in a namespace */
-UA_Int32 Namespace_iterate(const Namespace * ns, Namespace_nodeVisitor visitor);
+/// @} /* end of group */
 
 #endif /* __NAMESPACE_H */

+ 366 - 0
src/ua_namespace_concurrent.c

@@ -0,0 +1,366 @@
+#include "ua_namespace.h"
+
+#include <urcu.h>
+#include <urcu/compiler.h> // for caa_container_of
+#include <urcu/uatomic.h>
+#include <urcu/rculfhash.h>
+
+#define ALIVE_BIT (1 << 15) /* Alive bit in the readcount */
+typedef struct Namespace_Entry {
+	struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
+	UA_UInt16 readcount;      /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
+	UA_Node   node;           /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
+} Namespace_Entry;
+
+struct Namespace {
+	UA_UInt32       namespaceId;
+	struct cds_lfht *ht; /* Hash table */
+};
+
+/********/
+/* Hash */
+/********/
+
+typedef UA_UInt32 hash_t;
+
+/* Based on Murmur-Hash 3 by Austin Appleby (public domain, freely usable) */
+static inline hash_t hash_array(const UA_Byte *data, UA_UInt32 len) {
+	static const uint32_t c1 = 0xcc9e2d51;
+	static const uint32_t c2 = 0x1b873593;
+	static const uint32_t r1 = 15;
+	static const uint32_t r2 = 13;
+	static const uint32_t m  = 5;
+	static const uint32_t n  = 0xe6546b64;
+	hash_t hash = len;
+
+	if(data == UA_NULL) return 0;
+
+	const int32_t   nblocks = len / 4;
+	const uint32_t *blocks  = (const uint32_t *)data;
+	for(int32_t i = 0;i < nblocks;i++) {
+		uint32_t k = blocks[i];
+		k    *= c1;
+		k     = (k << r1) | (k >> (32 - r1));
+		k    *= c2;
+		hash ^= k;
+		hash  = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+	}
+
+	const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
+	uint32_t       k1   = 0;
+
+	switch(len & 3) {
+	case 3:
+		k1 ^= tail[2] << 16;
+
+	case 2:
+		k1 ^= tail[1] << 8;
+
+	case 1:
+		k1   ^= tail[0];
+		k1   *= c1;
+		k1    = (k1 << r1) | (k1 >> (32 - r1));
+		k1   *= c2;
+		hash ^= k1;
+	}
+
+	hash ^= len;
+	hash ^= (hash >> 16);
+	hash *= 0x85ebca6b;
+	hash ^= (hash >> 13);
+	hash *= 0xc2b2ae35;
+	hash ^= (hash >> 16);
+
+	return hash;
+}
+
+static inline hash_t hash(const UA_NodeId *n) {
+	switch(n->encodingByte) {
+	case UA_NODEIDTYPE_TWOBYTE:
+	case UA_NODEIDTYPE_FOURBYTE:
+	case UA_NODEIDTYPE_NUMERIC:
+		/*  Knuth's multiplicative hashing */
+		return n->identifier.numeric * 2654435761;   // mod(2^32) is implicit
+
+	case UA_NODEIDTYPE_STRING:
+		return hash_array(n->identifier.string.data, n->identifier.string.length);
+
+	case UA_NODEIDTYPE_GUID:
+		return hash_array((UA_Byte *)&(n->identifier.guid), sizeof(UA_Guid));
+
+	case UA_NODEIDTYPE_BYTESTRING:
+		return hash_array((UA_Byte *)n->identifier.byteString.data, n->identifier.byteString.length);
+
+	default:
+		return 0;
+	}
+}
+
+/*************/
+/* Namespace */
+/*************/
+
+static inline void node_deleteMembers(const UA_Node *node) {
+	switch(node->nodeClass) {
+	case UA_NODECLASS_OBJECT:
+		UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
+		break;
+
+	case UA_NODECLASS_VARIABLE:
+		UA_VariableNode_deleteMembers((UA_VariableNode *)node);
+		break;
+
+	case UA_NODECLASS_METHOD:
+		UA_MethodNode_deleteMembers((UA_MethodNode *)node);
+		break;
+
+	case UA_NODECLASS_OBJECTTYPE:
+		UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
+		break;
+
+	case UA_NODECLASS_VARIABLETYPE:
+		UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
+		break;
+
+	case UA_NODECLASS_REFERENCETYPE:
+		UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
+		break;
+
+	case UA_NODECLASS_DATATYPE:
+		UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
+		break;
+
+	case UA_NODECLASS_VIEW:
+		UA_ViewNode_deleteMembers((UA_ViewNode *)node);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/* We are in a rcu_read lock. So the node will not be freed under our feet. */
+static int compare(struct cds_lfht_node *htn, const void *orig) {
+	UA_NodeId *origid = (UA_NodeId*)orig;
+	UA_NodeId   *newid  = &((Namespace_Entry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
+
+	return UA_NodeId_equal(newid, origid) == UA_EQUAL;
+}
+
+/* The entry was removed from the hashtable. No more readers can get it. Since
+   all readers using the node for a longer time (outside the rcu critical
+   section) increased the readcount, we only need to wait for the readcount
+   to reach zero. */
+static void markDead(Namespace_Entry *entry) {
+	if(uatomic_sub_return(&entry->readcount, ALIVE_BIT) > 0)
+		return;
+
+	node_deleteMembers(&entry->node);
+	UA_free(entry);
+	return;
+}
+
+/* Free the entry if it is dead and nobody uses it anymore */
+void Namespace_releaseManagedNode(const UA_Node *managed) {
+	if(managed == UA_NULL)
+		return;
+	
+	Namespace_Entry *entry = caa_container_of(managed, Namespace_Entry, htn); // pointer to the first entry
+	if(uatomic_sub_return(&entry->readcount, 1) > 0)
+		return;
+
+	node_deleteMembers(managed);
+	UA_free(entry);
+	return;
+}
+
+UA_Int32 Namespace_new(Namespace **result, UA_UInt32 namespaceId) {
+	Namespace *ns;
+	if(UA_alloc((void **)&ns, sizeof(Namespace)) != UA_SUCCESS)
+		return UA_ERR_NO_MEMORY;
+
+	/* 32 is the minimum size for the hashtable. */
+	ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
+	if(!ns->ht) {
+		UA_free(ns);
+		return UA_ERR_NO_MEMORY;
+	}
+
+	ns->namespaceId = namespaceId;
+	*result = ns;
+	return UA_SUCCESS;
+}
+
+UA_Int32 Namespace_delete(Namespace *ns) {
+	if(ns == UA_NULL)
+		return UA_ERROR;
+
+	struct cds_lfht *ht = ns->ht;
+	struct cds_lfht_iter iter;
+	struct cds_lfht_node *found_htn;
+
+	rcu_read_lock();
+	cds_lfht_first(ht, &iter);
+	while(iter.node != UA_NULL) {
+		found_htn = cds_lfht_iter_get_node(&iter);
+		if(!cds_lfht_del(ht, found_htn))
+			call_rcu(found_htn, markDead);
+		cds_lfht_next(ht, &iter);
+	}
+	rcu_read_unlock();
+
+	if(!cds_lfht_destroy(ht, UA_NULL)) {
+		UA_free(ns);
+		return UA_SUCCESS;
+	}
+	else
+		return UA_ERROR;
+}
+
+UA_Int32 Namespace_insert(Namespace *ns, const UA_Node **node, UA_Byte flags) {
+	if(ns == UA_NULL || node == UA_NULL || *node == UA_NULL || (*node)->nodeId.namespace != ns->namespaceId)
+		return UA_ERROR;
+
+	UA_UInt32 nodesize;
+	/* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
+	switch((*node)->nodeClass) {
+	case UA_NODECLASS_OBJECT:
+		nodesize = sizeof(UA_ObjectNode);
+		break;
+
+	case UA_NODECLASS_VARIABLE:
+		nodesize = sizeof(UA_VariableNode);
+		break;
+
+	case UA_NODECLASS_METHOD:
+		nodesize = sizeof(UA_MethodNode);
+		break;
+
+	case UA_NODECLASS_OBJECTTYPE:
+		nodesize = sizeof(UA_ObjectTypeNode);
+		break;
+
+	case UA_NODECLASS_VARIABLETYPE:
+		nodesize = sizeof(UA_VariableTypeNode);
+		break;
+
+	case UA_NODECLASS_REFERENCETYPE:
+		nodesize = sizeof(UA_ReferenceTypeNode);
+		break;
+
+	case UA_NODECLASS_DATATYPE:
+		nodesize = sizeof(UA_DataTypeNode);
+		break;
+
+	case UA_NODECLASS_VIEW:
+		nodesize = sizeof(UA_ViewNode);
+		break;
+
+	default:
+		return UA_ERROR;
+	}
+
+	Namespace_Entry *entry;
+	if(UA_alloc((void **)&entry, sizeof(struct cds_lfht_node) + sizeof(UA_UInt16) + nodesize))
+		return UA_ERR_NO_MEMORY;
+	memcpy(&entry->node, *node, nodesize);
+
+	cds_lfht_node_init(&entry->htn);
+	entry->readcount = ALIVE_BIT;
+	if(flags & NAMESPACE_INSERT_GETMANAGED)
+		entry->readcount++;
+
+	hash_t nhash = hash(&(*node)->nodeId);
+	struct cds_lfht_node *result;
+	if(flags & NAMESPACE_INSERT_UNIQUE) {
+		rcu_read_lock();
+		result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
+		rcu_read_unlock();
+
+		/* If the nodeid exists already */
+		if(result != &entry->htn) {
+			UA_free(entry);
+			return UA_ERROR;     // TODO: define a UA_EXISTS_ALREADY
+		}
+	} else {
+		rcu_read_lock();
+		result = cds_lfht_add_replace(ns->ht, nhash, compare, &(*node)->nodeId, &entry->htn);
+		/* If an entry got replaced, mark it as dead. */
+		if(result)
+			call_rcu(markDead, result);      /* Queue this for the next time when no readers are on the entry.*/
+		rcu_read_unlock();
+	}
+
+	UA_free((UA_Node*)*node);     /* The old node is replaced by a managed node. */
+	if(flags & NAMESPACE_INSERT_GETMANAGED)
+		*node = &entry->node;
+	else
+		*node = UA_NULL;
+
+	return UA_SUCCESS;
+}
+
+UA_Int32 Namespace_remove(Namespace *ns, const UA_NodeId *nodeid) {
+	hash_t nhash = hash(nodeid);
+	struct cds_lfht_iter iter;
+
+	rcu_read_lock();
+	cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
+	struct cds_lfht_node *found_node = cds_lfht_iter_get_node(&iter);
+
+	/* If this fails, then the node has already been removed. */
+	if(!found_node || cds_lfht_del(ns->ht, found_node) != 0) {
+		rcu_read_unlock();
+		return UA_ERROR;
+	}
+	
+	call_rcu(markDead, found_node);
+	rcu_read_unlock();
+
+	return UA_SUCCESS;
+}
+
+UA_Int32 Namespace_get(const Namespace *ns, const UA_NodeId *nodeid, const UA_Node **managedNode) {
+	hash_t nhash     = hash(nodeid);
+	struct cds_lfht_iter iter;
+
+	rcu_read_lock();
+	cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
+	Namespace_Entry *found_entry = (Namespace_Entry *)cds_lfht_iter_get_node(&iter);
+
+	if(!found_entry) {
+		rcu_read_unlock();
+		return UA_ERROR;  // TODO: UA_NOTFOUND
+	}
+
+	/* This is done within a read-lock. The node will not be marked dead within a read-lock. */
+	uatomic_inc(&found_entry->readcount);
+	rcu_read_unlock();
+
+	*managedNode = &found_entry->node;
+	return UA_SUCCESS;
+}
+
+UA_Int32 Namespace_iterate(const Namespace *ns, Namespace_nodeVisitor visitor) {
+	if(ns == UA_NULL || visitor == UA_NULL)
+		return UA_ERROR;
+	
+	struct cds_lfht *ht = ns->ht;
+	struct cds_lfht_iter iter;
+
+	rcu_read_lock();
+	cds_lfht_first(ht, &iter);
+	while(iter.node != UA_NULL) {
+		Namespace_Entry *found_entry = (Namespace_Entry *)cds_lfht_iter_get_node(&iter);
+		uatomic_inc(&found_entry->readcount);
+		const UA_Node *node = &found_entry->node;
+		rcu_read_unlock();
+		visitor(node);
+		Namespace_releaseManagedNode((UA_Node *)node);
+		rcu_read_lock();
+		cds_lfht_next(ht, &iter);
+	}
+	rcu_read_unlock();
+
+	return UA_SUCCESS;
+}

+ 5 - 10
src/ua_services_attribute.c

@@ -50,10 +50,8 @@ static UA_DataValue service_read_node(Application *app, const UA_ReadValueId *id
 	}
 
 	UA_Node const *node = UA_NULL;
-	Namespace_Entry_Lock *lock = UA_NULL;
-
 	DBG_VERBOSE(UA_NodeId_printf("service_read_node - search for ", &(id->nodeId)));
-	UA_Int32 result = Namespace_get(ns, &(id->nodeId), &node, &lock);
+	UA_Int32 result = Namespace_get(ns, &(id->nodeId), &node);
 	if(result != UA_SUCCESS || node == UA_NULL) {
 		v.encodingMask = UA_DATAVALUE_ENCODINGMASK_STATUSCODE;
 		v.status       = UA_STATUSCODE_BADNODEIDUNKNOWN;
@@ -216,7 +214,7 @@ static UA_DataValue service_read_node(Application *app, const UA_ReadValueId *id
 		break;
 	}
 
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode(node);
 
 	if(retval != UA_SUCCESS) {
 		v.encodingMask = UA_DATAVALUE_ENCODINGMASK_STATUSCODE;
@@ -261,15 +259,12 @@ UA_Int32 Service_Write_writeNode(Application *app, UA_WriteValue *writeValue, UA
 		*result = UA_STATUSCODE_BADNODEIDINVALID;
 		return UA_ERROR;
 	}
-	Namespace_Entry_Lock *lock;
-	const UA_Node *node;
 
-	if(Namespace_get(ns, &writeValue->nodeId,&node, &lock) != UA_SUCCESS){
+	const UA_Node *node;
+	if(Namespace_get(ns, &writeValue->nodeId, &node) != UA_SUCCESS){
 		return UA_ERROR;
 	}
 
-
-
 	switch(writeValue->attributeId) {
 	case UA_ATTRIBUTEID_NODEID:
 		if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){
@@ -439,7 +434,7 @@ UA_Int32 Service_Write_writeNode(Application *app, UA_WriteValue *writeValue, UA
 		break;
 	}
 
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode(node);
 	return retval;
 
 }

+ 8 - 12
src/ua_services_nodemanagement.c

@@ -34,15 +34,13 @@ static UA_AddNodesResult addSingleNode(Application *app, UA_AddNodesItem *item)
 
 	UA_Int32 status = UA_SUCCESS;
 	const UA_Node *parent;
-	Namespace_Entry_Lock *parent_lock = UA_NULL;
-
-	CHECKED_ACTION(Namespace_get(parent_ns, &item->parentNodeId.nodeId, &parent, &parent_lock),
+	CHECKED_ACTION(Namespace_get(parent_ns, &item->parentNodeId.nodeId, &parent),
 				   result.statusCode = UA_STATUSCODE_BADPARENTNODEIDINVALID, ret);
 
-	if(!nodeid_isnull && Namespace_contains(ns, &item->requestedNewNodeId.nodeId)) {
-		result.statusCode = UA_STATUSCODE_BADNODEIDEXISTS;
-		goto ret;
-	}
+	/* if(!nodeid_isnull && Namespace_contains(ns, &item->requestedNewNodeId.nodeId)) { */
+	/* 	result.statusCode = UA_STATUSCODE_BADNODEIDEXISTS; */
+	/* 	goto ret; */
+	/* } */
 
 	/**
 	   TODO:
@@ -63,7 +61,7 @@ static UA_AddNodesResult addSingleNode(Application *app, UA_AddNodesItem *item)
 	 */
 
  ret:
-	Namespace_Entry_Lock_release(parent_lock);
+	Namespace_releaseManagedNode(parent);
 	return result;
 }
 
@@ -121,10 +119,8 @@ UA_Int32 AddReference(UA_Node *node, UA_ReferenceNode *reference, Namespace *tar
 		return retval;
 
 	UA_Node *targetnode;
-	Namespace_Entry_Lock *lock;
-	// TODO: Nodes in the namespace are immutable (for lockless multithreading).
 	// Do a copy every time?
-	if(Namespace_get(targetns, &reference->targetId.nodeId, (const UA_Node**)&targetnode, &lock) != UA_SUCCESS)
+	if(Namespace_get(targetns, &reference->targetId.nodeId, (const UA_Node**)&targetnode) != UA_SUCCESS)
 		return UA_ERROR;
 
 	UA_ReferenceNode inversereference;
@@ -132,7 +128,7 @@ UA_Int32 AddReference(UA_Node *node, UA_ReferenceNode *reference, Namespace *tar
 	inversereference.isInverse = !reference->isInverse;
 	inversereference.targetId = (UA_ExpandedNodeId){node->nodeId, UA_STRING_NULL, 0};	
 	retval = AddSingleReference(targetnode, &inversereference);
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode(targetnode);
 
 	return retval;
 }

+ 10 - 16
src/ua_services_view.c

@@ -4,9 +4,7 @@
 UA_Int32 Service_Browse_getReferenceDescription(Namespace *ns, UA_ReferenceNode* reference, UA_UInt32 nodeClassMask,
 												UA_UInt32 resultMask, UA_ReferenceDescription* referenceDescription) {
 	const UA_Node* foundNode;
-	Namespace_Entry_Lock *lock;
-
-	if(Namespace_get(ns,&reference->targetId.nodeId,&foundNode, &lock) != UA_SUCCESS)
+	if(Namespace_get(ns,&reference->targetId.nodeId,&foundNode) != UA_SUCCESS)
 		return UA_ERROR;
 
 	UA_NodeId_copy(&foundNode->nodeId, &referenceDescription->nodeId.nodeId);
@@ -48,7 +46,7 @@ UA_Int32 Service_Browse_getReferenceDescription(Namespace *ns, UA_ReferenceNode*
 		}
 	}
 	
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode(foundNode);
 	return UA_SUCCESS;
 }
 
@@ -59,7 +57,7 @@ struct SubRefTypeId {
 };
 UA_SLIST_HEAD(SubRefTypeIdList, SubRefTypeId);
 
-UA_UInt32 walkReferenceTree(Namespace *ns, UA_ReferenceTypeNode *current, struct SubRefTypeIdList *list) {
+UA_UInt32 walkReferenceTree(Namespace *ns, const UA_ReferenceTypeNode *current, struct SubRefTypeIdList *list) {
 	// insert the current referencetype
 	struct SubRefTypeId *element;
 	UA_alloc((void**)&element, sizeof(struct SubRefTypeId));
@@ -73,11 +71,10 @@ UA_UInt32 walkReferenceTree(Namespace *ns, UA_ReferenceTypeNode *current, struct
 		if(current->references[i].referenceTypeId.identifier.numeric == 45 /* HasSubtype */ &&
 		   current->references[i].isInverse == UA_FALSE) {
 			const UA_Node *node;
-			Namespace_Entry_Lock *lock;
-			if(Namespace_get(ns, &current->references[i].targetId.nodeId, &node, &lock) == UA_SUCCESS &&
+			if(Namespace_get(ns, &current->references[i].targetId.nodeId, &node) == UA_SUCCESS &&
 			   node->nodeClass == UA_NODECLASS_REFERENCETYPE) {
 				count += walkReferenceTree(ns,(UA_ReferenceTypeNode*)node, list);
-				Namespace_Entry_Lock_release(lock);
+				Namespace_releaseManagedNode(node);
 			}
 		}
 	}
@@ -90,13 +87,12 @@ static UA_Int32 findSubReferenceTypes(Namespace *ns, UA_NodeId *rootReferenceTyp
 	UA_SLIST_INIT(&list);
 
 	// walk the tree
-	UA_ReferenceTypeNode *root;
-	Namespace_Entry_Lock *lock;
-	if(Namespace_get(ns, rootReferenceType, (const UA_Node**)&root, &lock) != UA_SUCCESS ||
+	const UA_ReferenceTypeNode *root;
+	if(Namespace_get(ns, rootReferenceType, (const UA_Node**)&root) != UA_SUCCESS ||
 	   root->nodeClass != UA_NODECLASS_REFERENCETYPE)
 		return UA_ERROR;
 	UA_UInt32 count = walkReferenceTree(ns, root, &list);
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode((const UA_Node*) root);
 
 	// copy results into an array
 	UA_alloc((void**) ids, sizeof(UA_NodeId)*count);
@@ -129,9 +125,7 @@ static inline UA_Boolean Service_Browse_returnReference(UA_BrowseDescription *br
 static void Service_Browse_getBrowseResult(Namespace *ns, UA_BrowseDescription *browseDescription,
 										   UA_UInt32 maxReferences, UA_BrowseResult *browseResult) {
 	const UA_Node* node;
-	Namespace_Entry_Lock *lock;
-
-	if(Namespace_get(ns, &browseDescription->nodeId, &node, &lock) != UA_SUCCESS) {
+	if(Namespace_get(ns, &browseDescription->nodeId, &node) != UA_SUCCESS) {
 		browseResult->statusCode = UA_STATUSCODE_BADNODEIDUNKNOWN;
 		return;
 	}
@@ -185,7 +179,7 @@ static void Service_Browse_getBrowseResult(Namespace *ns, UA_BrowseDescription *
 		// Todo. Set the Statuscode and the continuation point.
 	}
 	
-	Namespace_Entry_Lock_release(lock);
+	Namespace_releaseManagedNode(node);
 	UA_Array_delete(relevantReferenceTypes, relevantReferenceTypesCount, &UA_.types[UA_NODEID]);
 }
 

+ 1 - 1
src/ua_types.c

@@ -136,7 +136,7 @@ UA_Int32 UA_String_init(UA_String *p) {
 UA_TYPE_DELETE_DEFAULT(UA_String)
 UA_Int32 UA_String_deleteMembers(UA_String *p) {
 	UA_Int32 retval = UA_SUCCESS;
-	if(p!= UA_NULL && p->data != UA_NULL) {
+	if(p != UA_NULL && p->length > 0 && p->data != UA_NULL) {
 		retval   |= UA_free(p->data);
 		retval   |= UA_String_init(p);
 	}

+ 3 - 3
tests/Makefile.am

@@ -23,15 +23,15 @@ endif
 
 check_memory-mem: check_memory
 	$(MAKE) $(AM_MAKEFLAGS) check TESTS="check_memory" TESTS_ENVIRONMENT=$(TESTS_ENVIRONMENT_MEM)
-	
+
 clean-local:
 	rm -rf coverage || true;
 	rm -rf *.gcda *.gcdo *.gcno *.o *.gcov *.log *.trs
 	lcov --directory . --zerocounters || true;
-	
+
 # --- no changes beyond this line needed ---
 INCLUDE = @CHECK_CFLAGS@ -I$(top_builddir)/src -I$(top_builddir)/include
-LDADD = $(top_builddir)/lib/libopen62541.a @CHECK_LIBS@
+LDADD = $(top_builddir)/lib/libopen62541.a @CHECK_LIBS@ $(GLOBAL_AM_LDADD)
 
 AM_LDFLAGS = $(LDADD)
 if COVERAGE

+ 111 - 92
tests/check_namespace.c

@@ -5,6 +5,10 @@
 #include "ua_namespace.h"
 #include "check.h"
 
+#ifdef MULTITHREADING
+#include <urcu.h>
+#endif
+
 int zeroCnt = 0;
 int visitCnt = 0;
 void checkZeroVisitor(const UA_Node* node) {
@@ -12,129 +16,126 @@ void checkZeroVisitor(const UA_Node* node) {
 	if (node == UA_NULL) zeroCnt++;
 }
 
+void printVisitor(const UA_Node* node) {
+	printf("%d\n", node->nodeId.identifier.numeric);
+}
+
 START_TEST(test_Namespace) {
 	Namespace *ns = UA_NULL;
-	Namespace_new(&ns, 512, 0);
+	Namespace_new(&ns, 0);
 	Namespace_delete(ns);
 }
 END_TEST
 
-UA_Int32 createNode(UA_Node** p, UA_Int16 nsid, UA_Int32 id) {
-	UA_Node_new(p);
-	(*p)->nodeId.encodingByte = UA_NODEIDTYPE_FOURBYTE;
-	(*p)->nodeId.namespace = nsid;
-	(*p)->nodeId.identifier.numeric = id;
+UA_Int32 createNode(const UA_Node** p, UA_Int16 nsid, UA_Int32 id) {
+	UA_VariableNode * p2;
+	UA_VariableNode_new(&p2);
+	p2->nodeId.encodingByte = UA_NODEIDTYPE_FOURBYTE;
+	p2->nodeId.namespace = nsid;
+	p2->nodeId.identifier.numeric = id;
+	p2->nodeClass = UA_NODECLASS_VARIABLE;
+	*p = (const UA_Node *)p2;
 	return UA_SUCCESS;
 }
 
-START_TEST(confirmExistenceInNamespaceWithSingleEntry) {
-	// given
-	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Int32 retval;
-	// when
-	retval = Namespace_contains(ns,&(n1->nodeId));
-	// then
-	ck_assert_int_eq(retval, UA_TRUE);
-	// finally
-	Namespace_delete(ns);
-}
-END_TEST
-
 START_TEST(findNodeInNamespaceWithSingleEntry) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
+	Namespace_new(&ns, 0);
+	const UA_Node* n1; createNode(&n1,0,2253);
+	Namespace_insert(ns, &n1, NAMESPACE_INSERT_UNIQUE | NAMESPACE_INSERT_GETMANAGED);
 	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
 	UA_Int32 retval;
 	// when
-	retval = Namespace_get(ns,&(n1->nodeId),&nr,&nl);
+	retval = Namespace_get(ns,&(n1->nodeId),&nr);
 	// then
 	ck_assert_int_eq(retval, UA_SUCCESS);
 	ck_assert_ptr_eq(nr,n1);
 	// finally
+	Namespace_releaseManagedNode(n1);
+	Namespace_releaseManagedNode(nr);
 	Namespace_delete(ns);
-}
-END_TEST
-
-START_TEST(findNodeInNamespaceWithTwoEntries) {
-	// given
-	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns,n2);
-
-	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
-	UA_Int32 retval;
-	// when
-	retval = Namespace_get(ns,&(n2->nodeId),&nr,&nl);
-	// then
-	ck_assert_int_eq(retval, UA_SUCCESS);
-	ck_assert_ptr_eq(nr,n2);
-	// finally
-	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(failToFindNodeInOtherNamespace) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns,n2);
+	Namespace_new(&ns, 0);
+
+	const UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns, &n1, 0);
+	const UA_Node* n2; createNode(&n1,0,2253); Namespace_insert(ns, &n2, 0);
 
 	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
 	UA_Int32 retval;
 	// when
-	UA_Node* n; createNode(&n,1,2255);
-	retval = Namespace_get(ns,&(n->nodeId),&nr,&nl);
+	const UA_Node* n; createNode(&n,1,2255);
+	retval = Namespace_get(ns,&(n->nodeId), &nr);
 	// then
 	ck_assert_int_ne(retval, UA_SUCCESS);
 	// finally
-	UA_free(n);
+	UA_free((void *)n);
+	Namespace_releaseManagedNode(nr);
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(findNodeInNamespaceWithSeveralEntries) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns,n2);
-	UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns,n3);
-	UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns,n4);
-	UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns,n5);
-	UA_Node* n6; createNode(&n6,0,12); Namespace_insert(ns,n6);
+	Namespace_new(&ns, 0);
+	const UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns, &n1, 0);
+	const UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns, &n2, 0);
+	const UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns, &n3, NAMESPACE_INSERT_GETMANAGED);
+	const UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns, &n4, 0);
+	const UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns, &n5, 0);
+	const UA_Node* n6; createNode(&n6,0,12); Namespace_insert(ns, &n6, 0);
 
 	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
 	UA_Int32 retval;
 	// when
-	retval = Namespace_get(ns,&(n3->nodeId),&nr,&nl);
+	retval = Namespace_get(ns,&(n3->nodeId),&nr);
 	// then
 	ck_assert_int_eq(retval, UA_SUCCESS);
 	ck_assert_ptr_eq(nr,n3);
 	// finally
+	Namespace_releaseManagedNode(n3);
+	Namespace_releaseManagedNode(nr);
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(iterateOverNamespaceShallNotVisitEmptyNodes) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns,n2);
-	UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns,n3);
-	UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns,n4);
-	UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns,n5);
-	UA_Node* n6; createNode(&n6,0,12); Namespace_insert(ns,n6);
+	Namespace_new(&ns, 0);
+	const UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns, &n1, 0);
+	const UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns, &n2, 0);
+	const UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns, &n3, 0);
+	const UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns, &n4, 0);
+	const UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns, &n5, 0);
+	const UA_Node* n6; createNode(&n6,0,12); Namespace_insert(ns, &n6, 0);
 
 	UA_Int32 retval;
 	// when
@@ -147,41 +148,53 @@ START_TEST(iterateOverNamespaceShallNotVisitEmptyNodes) {
 	ck_assert_int_eq(visitCnt, 6);
 	// finally
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(findNodeInExpandedNamespace) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 10, 0);
-	UA_Node* n;
+	Namespace_new(&ns, 0);
+	const UA_Node* n;
 	UA_Int32 i=0;
 	for (; i<200; i++) {
-		createNode(&n,0,i); Namespace_insert(ns,n);
+		createNode(&n,0,i); Namespace_insert(ns, &n, 0);
 	}
 	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
 	UA_Int32 retval;
 	// when
 	createNode(&n,0,25);
-	retval = Namespace_get(ns,&(n->nodeId),&nr,&nl);
+	retval = Namespace_get(ns,&(n->nodeId),&nr);
 	// then
 	ck_assert_int_eq(retval, UA_SUCCESS);
 	ck_assert_int_eq(nr->nodeId.identifier.numeric,n->nodeId.identifier.numeric);
 	// finally
-	UA_free(n);
+	UA_free((void*)n);
+	Namespace_releaseManagedNode(nr);
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(iterateOverExpandedNamespaceShallNotVisitEmptyNodes) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 10, 0);
-	UA_Node* n;
+	Namespace_new(&ns, 0);
+	const UA_Node* n;
 	UA_Int32 i=0;
 	for (; i<200; i++) {
-		createNode(&n,0,i); Namespace_insert(ns,n);
+		createNode(&n,0,i); Namespace_insert(ns, &n, 0);
 	}
 	// when
 	UA_Int32 retval;
@@ -194,30 +207,38 @@ START_TEST(iterateOverExpandedNamespaceShallNotVisitEmptyNodes) {
 	ck_assert_int_eq(visitCnt, 200);
 	// finally
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
 START_TEST(failToFindNonExistantNodeInNamespaceWithSeveralEntries) {
+#ifdef MULTITHREADING
+   	rcu_register_thread();
+#endif
 	// given
 	Namespace *ns;
-	Namespace_new(&ns, 512, 0);
-	UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns,n1);
-	UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns,n2);
-	UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns,n3);
-	UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns,n4);
-	UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns,n5);
-	UA_Node* n6; createNode(&n6,0,12);
+	Namespace_new(&ns, 0);
+	const UA_Node* n1; createNode(&n1,0,2253); Namespace_insert(ns, &n1, 0);
+	const UA_Node* n2; createNode(&n2,0,2255); Namespace_insert(ns, &n2, 0);
+	const UA_Node* n3; createNode(&n3,0,2257); Namespace_insert(ns, &n3, 0);
+	const UA_Node* n4; createNode(&n4,0,2200); Namespace_insert(ns, &n4, 0);
+	const UA_Node* n5; createNode(&n5,0,1); Namespace_insert(ns, &n5, 0);
+	const UA_Node* n6; createNode(&n6,0,12); 
 
 	const UA_Node* nr = UA_NULL;
-	Namespace_Entry_Lock* nl = UA_NULL;
 	UA_Int32 retval;
 	// when
-	retval = Namespace_get(ns,&(n6->nodeId),&nr,&nl);
+	retval = Namespace_get(ns, &(n6->nodeId), &nr);
 	// then
 	ck_assert_int_ne(retval, UA_SUCCESS);
 	// finally
-	UA_free(n6);
+	UA_free((void *)n6);
 	Namespace_delete(ns);
+#ifdef MULTITHREADING
+	rcu_unregister_thread();
+#endif
 }
 END_TEST
 
@@ -229,9 +250,7 @@ Suite * namespace_suite (void) {
 	suite_add_tcase (s, tc_cd);
 
 	TCase* tc_find = tcase_create ("Find");
-	tcase_add_test (tc_find, confirmExistenceInNamespaceWithSingleEntry);
 	tcase_add_test (tc_find, findNodeInNamespaceWithSingleEntry);
-	tcase_add_test (tc_find, findNodeInNamespaceWithTwoEntries);
 	tcase_add_test (tc_find, findNodeInNamespaceWithSeveralEntries);
 	tcase_add_test (tc_find, findNodeInExpandedNamespace);
 	tcase_add_test (tc_find, failToFindNonExistantNodeInNamespaceWithSeveralEntries);
@@ -239,8 +258,8 @@ Suite * namespace_suite (void) {
 	suite_add_tcase (s, tc_find);
 
 	TCase* tc_iterate = tcase_create ("Iterate");
-	tcase_add_test (tc_find, iterateOverNamespaceShallNotVisitEmptyNodes);
-	tcase_add_test (tc_find, iterateOverExpandedNamespaceShallNotVisitEmptyNodes);
+	tcase_add_test (tc_iterate, iterateOverNamespaceShallNotVisitEmptyNodes);
+	tcase_add_test (tc_iterate, iterateOverExpandedNamespaceShallNotVisitEmptyNodes);
 	suite_add_tcase (s, tc_iterate);
 
 	return s;
@@ -251,7 +270,7 @@ int main (void) {
 	int number_failed =0;
 	Suite *s = namespace_suite ();
 	SRunner *sr = srunner_create (s);
-	// srunner_set_fork_status(sr,CK_NOFORK);
+	//srunner_set_fork_status(sr,CK_NOFORK);
 	srunner_run_all (sr, CK_NORMAL);
 	number_failed += srunner_ntests_failed (sr);
 	srunner_free (sr);