ua_nodestore_concurrent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. #include "ua_util.h"
  2. #include "ua_nodestore.h"
  3. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  4. struct nodeEntry {
  5. struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
  6. struct rcu_head rcu_head; /* For call-rcu */
  7. UA_UInt16 refcount; /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
  8. UA_Node node; /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
  9. };
  10. struct UA_NodeStore {
  11. struct cds_lfht *ht; /* Hash table */
  12. };
  13. #include "ua_nodestore_hash.inc"
  14. static void node_deleteMembers(UA_Node *node) {
  15. switch(node->nodeClass) {
  16. case UA_NODECLASS_OBJECT:
  17. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  18. break;
  19. case UA_NODECLASS_VARIABLE:
  20. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  21. break;
  22. case UA_NODECLASS_METHOD:
  23. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  24. break;
  25. case UA_NODECLASS_OBJECTTYPE:
  26. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  27. break;
  28. case UA_NODECLASS_VARIABLETYPE:
  29. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  30. break;
  31. case UA_NODECLASS_REFERENCETYPE:
  32. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  33. break;
  34. case UA_NODECLASS_DATATYPE:
  35. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  36. break;
  37. case UA_NODECLASS_VIEW:
  38. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  39. break;
  40. default:
  41. UA_assert(UA_FALSE);
  42. break;
  43. }
  44. }
  45. /* We are in a rcu_read lock. So the node will not be freed under our feet. */
  46. static int compare(struct cds_lfht_node *htn, const void *orig) {
  47. const UA_NodeId *origid = (const UA_NodeId *)orig;
  48. const UA_NodeId *newid = &((struct nodeEntry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
  49. return UA_NodeId_equal(newid, origid);
  50. }
  51. /* The entry was removed from the hashtable. No more readers can get it. Since
  52. all readers using the node for a longer time (outside the rcu critical
  53. section) increased the refcount, we only need to wait for the refcount
  54. to reach zero. */
  55. static void markDead(struct rcu_head *head) {
  56. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)head - offsetof(struct nodeEntry, rcu_head));
  57. uatomic_and(&entry->refcount, ~ALIVE_BIT); // set the alive bit to zero
  58. if(uatomic_read(&entry->refcount) > 0)
  59. return;
  60. node_deleteMembers(&entry->node);
  61. UA_free(entry);
  62. }
  63. /* Free the entry if it is dead and nobody uses it anymore */
  64. void UA_NodeStore_release(const UA_Node *managed) {
  65. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)managed - offsetof(struct nodeEntry, node));
  66. if(uatomic_add_return(&entry->refcount, -1) == 0) {
  67. node_deleteMembers(&entry->node);
  68. UA_free(entry);
  69. }
  70. }
  71. UA_NodeStore * UA_NodeStore_new() {
  72. UA_NodeStore *ns;
  73. if(!(ns = UA_malloc(sizeof(UA_NodeStore))))
  74. return UA_NULL;
  75. /* 32 is the minimum size for the hashtable. */
  76. ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
  77. if(!ns->ht) {
  78. UA_free(ns);
  79. return UA_NULL;
  80. }
  81. return ns;
  82. }
  83. void UA_NodeStore_delete(UA_NodeStore *ns) {
  84. struct cds_lfht *ht = ns->ht;
  85. struct cds_lfht_iter iter;
  86. rcu_read_lock();
  87. cds_lfht_first(ht, &iter);
  88. while(iter.node) {
  89. if(!cds_lfht_del(ht, iter.node)) {
  90. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  91. call_rcu(&entry->rcu_head, markDead);
  92. }
  93. cds_lfht_next(ht, &iter);
  94. }
  95. rcu_read_unlock();
  96. cds_lfht_destroy(ht, UA_NULL);
  97. UA_free(ns);
  98. }
  99. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, UA_Node *node, const UA_Node **inserted) {
  100. size_t nodesize;
  101. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  102. switch(node->nodeClass) {
  103. case UA_NODECLASS_OBJECT:
  104. nodesize = sizeof(UA_ObjectNode);
  105. break;
  106. case UA_NODECLASS_VARIABLE:
  107. nodesize = sizeof(UA_VariableNode);
  108. break;
  109. case UA_NODECLASS_METHOD:
  110. nodesize = sizeof(UA_MethodNode);
  111. break;
  112. case UA_NODECLASS_OBJECTTYPE:
  113. nodesize = sizeof(UA_ObjectTypeNode);
  114. break;
  115. case UA_NODECLASS_VARIABLETYPE:
  116. nodesize = sizeof(UA_VariableTypeNode);
  117. break;
  118. case UA_NODECLASS_REFERENCETYPE:
  119. nodesize = sizeof(UA_ReferenceTypeNode);
  120. break;
  121. case UA_NODECLASS_DATATYPE:
  122. nodesize = sizeof(UA_DataTypeNode);
  123. break;
  124. case UA_NODECLASS_VIEW:
  125. nodesize = sizeof(UA_ViewNode);
  126. break;
  127. default:
  128. return UA_STATUSCODE_BADINTERNALERROR;
  129. }
  130. struct nodeEntry *entry;
  131. if(!(entry = UA_malloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  132. return UA_STATUSCODE_BADOUTOFMEMORY;
  133. UA_memcpy((void*)&entry->node, node, nodesize);
  134. cds_lfht_node_init(&entry->htn);
  135. entry->refcount = ALIVE_BIT;
  136. if(inserted) // increase the counter before adding the node
  137. entry->refcount++;
  138. struct cds_lfht_node *result;
  139. if(!UA_NodeId_isNull(&node->nodeId)) {
  140. hash_t h = hash(&node->nodeId);
  141. rcu_read_lock();
  142. result = cds_lfht_add_unique(ns->ht, h, compare, &entry->node.nodeId, &entry->htn);
  143. rcu_read_unlock();
  144. /* If the nodeid exists already */
  145. if(result != &entry->htn) {
  146. UA_free(entry);
  147. return UA_STATUSCODE_BADNODEIDEXISTS;
  148. }
  149. } else {
  150. /* create a unique nodeid */
  151. ((UA_Node *)&entry->node)->nodeId.identifierType = UA_NODEIDTYPE_NUMERIC;
  152. ((UA_Node *)&entry->node)->nodeId.namespaceIndex = 1; // namespace 1 is always in the local nodestore
  153. if(((UA_Node *)&entry->node)->nodeClass==UA_NODECLASS_VARIABLE){ //set namespaceIndex in browseName in case id is generated
  154. ((UA_VariableNode*)&entry->node)->browseName.namespaceIndex=((UA_Node *)&entry->node)->nodeId.namespaceIndex;
  155. }
  156. unsigned long identifier;
  157. long before, after;
  158. rcu_read_lock();
  159. cds_lfht_count_nodes(ns->ht, &before, &identifier, &after); // current amount of nodes stored
  160. identifier++;
  161. ((UA_Node *)&entry->node)->nodeId.identifier.numeric = identifier;
  162. while(UA_TRUE) {
  163. hash_t nhash = hash(&entry->node.nodeId);
  164. result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
  165. if(result == &entry->htn)
  166. break;
  167. ((UA_Node *)&entry->node)->nodeId.identifier.numeric += (identifier * 2654435761);
  168. }
  169. rcu_read_unlock();
  170. }
  171. UA_free(node);
  172. if(inserted)
  173. *inserted = &entry->node;
  174. return UA_STATUSCODE_GOOD;
  175. }
  176. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node *oldNode, UA_Node *node,
  177. const UA_Node **inserted) {
  178. size_t nodesize;
  179. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  180. switch(node->nodeClass) {
  181. case UA_NODECLASS_OBJECT:
  182. nodesize = sizeof(UA_ObjectNode);
  183. break;
  184. case UA_NODECLASS_VARIABLE:
  185. nodesize = sizeof(UA_VariableNode);
  186. break;
  187. case UA_NODECLASS_METHOD:
  188. nodesize = sizeof(UA_MethodNode);
  189. break;
  190. case UA_NODECLASS_OBJECTTYPE:
  191. nodesize = sizeof(UA_ObjectTypeNode);
  192. break;
  193. case UA_NODECLASS_VARIABLETYPE:
  194. nodesize = sizeof(UA_VariableTypeNode);
  195. break;
  196. case UA_NODECLASS_REFERENCETYPE:
  197. nodesize = sizeof(UA_ReferenceTypeNode);
  198. break;
  199. case UA_NODECLASS_DATATYPE:
  200. nodesize = sizeof(UA_DataTypeNode);
  201. break;
  202. case UA_NODECLASS_VIEW:
  203. nodesize = sizeof(UA_ViewNode);
  204. break;
  205. default:
  206. return UA_STATUSCODE_BADINTERNALERROR;
  207. }
  208. struct nodeEntry *newEntry;
  209. if(!(newEntry = UA_malloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  210. return UA_STATUSCODE_BADOUTOFMEMORY;
  211. UA_memcpy((void*)&newEntry->node, node, nodesize);
  212. cds_lfht_node_init(&newEntry->htn);
  213. newEntry->refcount = ALIVE_BIT;
  214. if(inserted) // increase the counter before adding the node
  215. newEntry->refcount++;
  216. hash_t h = hash(&node->nodeId);
  217. struct cds_lfht_iter iter;
  218. rcu_read_lock();
  219. cds_lfht_lookup(ns->ht, h, compare, &node->nodeId, &iter);
  220. /* No node found that can be replaced */
  221. if(!iter.node) {
  222. rcu_read_unlock();
  223. UA_free(newEntry);
  224. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  225. }
  226. struct nodeEntry *oldEntry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  227. /* The node we found is obsolete*/
  228. if(&oldEntry->node != oldNode) {
  229. rcu_read_unlock();
  230. UA_free(newEntry);
  231. return UA_STATUSCODE_BADINTERNALERROR;
  232. }
  233. /* The old node is replaced by a managed node. */
  234. if(cds_lfht_replace(ns->ht, &iter, h, compare, &node->nodeId, &newEntry->htn) != 0) {
  235. /* Replacing failed. Maybe the node got replaced just before this thread tried to.*/
  236. rcu_read_unlock();
  237. UA_free(newEntry);
  238. return UA_STATUSCODE_BADINTERNALERROR;
  239. }
  240. /* If an entry got replaced, mark it as dead. */
  241. call_rcu(&oldEntry->rcu_head, markDead);
  242. rcu_read_unlock();
  243. UA_free(node);
  244. if(inserted)
  245. *inserted = &newEntry->node;
  246. return UA_STATUSCODE_GOOD;
  247. }
  248. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  249. hash_t nhash = hash(nodeid);
  250. struct cds_lfht_iter iter;
  251. rcu_read_lock();
  252. /* If this fails, then the node has already been removed. */
  253. cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
  254. if(!iter.node || cds_lfht_del(ns->ht, iter.node) != 0) {
  255. rcu_read_unlock();
  256. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  257. }
  258. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  259. call_rcu(&entry->rcu_head, markDead);
  260. rcu_read_unlock();
  261. return UA_STATUSCODE_GOOD;
  262. }
  263. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  264. hash_t nhash = hash(nodeid);
  265. struct cds_lfht_iter iter;
  266. rcu_read_lock();
  267. cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
  268. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  269. if(!found_entry) {
  270. rcu_read_unlock();
  271. return UA_NULL;
  272. }
  273. /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
  274. uatomic_inc(&found_entry->refcount);
  275. rcu_read_unlock();
  276. return &found_entry->node;
  277. }
  278. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  279. struct cds_lfht *ht = ns->ht;
  280. struct cds_lfht_iter iter;
  281. rcu_read_lock();
  282. cds_lfht_first(ht, &iter);
  283. while(iter.node != UA_NULL) {
  284. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  285. uatomic_inc(&found_entry->refcount);
  286. const UA_Node *node = &found_entry->node;
  287. rcu_read_unlock();
  288. visitor(node);
  289. UA_NodeStore_release((const UA_Node *)node);
  290. rcu_read_lock();
  291. cds_lfht_next(ht, &iter);
  292. }
  293. rcu_read_unlock();
  294. }