ua_nodestore_concurrent.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. #include "ua_nodestore.h"
  2. #include "ua_util.h"
  3. #include <urcu.h>
  4. #include <urcu/compiler.h> // for caa_container_of
  5. #include <urcu/uatomic.h>
  6. #include <urcu/rculfhash.h>
  7. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  8. struct nodeEntry {
  9. struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
  10. struct rcu_head rcu_head; /* For call-rcu */
  11. UA_UInt16 refcount; /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
  12. const UA_Node node; /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
  13. };
  14. struct UA_NodeStore {
  15. struct cds_lfht *ht; /* Hash table */
  16. };
  17. #include "ua_nodestore_hash.inc"
  18. static inline void node_deleteMembers(const UA_Node *node) {
  19. switch(node->nodeClass) {
  20. case UA_NODECLASS_OBJECT:
  21. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  22. break;
  23. case UA_NODECLASS_VARIABLE:
  24. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  25. break;
  26. case UA_NODECLASS_METHOD:
  27. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  28. break;
  29. case UA_NODECLASS_OBJECTTYPE:
  30. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  31. break;
  32. case UA_NODECLASS_VARIABLETYPE:
  33. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  34. break;
  35. case UA_NODECLASS_REFERENCETYPE:
  36. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  37. break;
  38. case UA_NODECLASS_DATATYPE:
  39. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  40. break;
  41. case UA_NODECLASS_VIEW:
  42. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  43. break;
  44. default:
  45. UA_assert(UA_FALSE);
  46. break;
  47. }
  48. }
  49. /* We are in a rcu_read lock. So the node will not be freed under our feet. */
  50. static int compare(struct cds_lfht_node *htn, const void *orig) {
  51. const UA_NodeId *origid = (const UA_NodeId *)orig;
  52. const UA_NodeId *newid = &((struct nodeEntry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
  53. return UA_NodeId_equal(newid, origid);
  54. }
  55. /* The entry was removed from the hashtable. No more readers can get it. Since
  56. all readers using the node for a longer time (outside the rcu critical
  57. section) increased the refcount, we only need to wait for the refcount
  58. to reach zero. */
  59. static void markDead(struct rcu_head *head) {
  60. struct nodeEntry *entry = caa_container_of(head, struct nodeEntry, rcu_head);
  61. uatomic_and(&entry->refcount, ~ALIVE_BIT); // set the alive bit to zero
  62. if(uatomic_read(&entry->refcount) > 0)
  63. return;
  64. node_deleteMembers(&entry->node);
  65. UA_free(entry);
  66. return;
  67. }
  68. /* Free the entry if it is dead and nobody uses it anymore */
  69. void UA_NodeStore_release(const UA_Node *managed) {
  70. struct nodeEntry *entry = caa_container_of(managed, struct nodeEntry, node); // pointer to the first entry
  71. if(uatomic_sub_return(&entry->refcount, 1) > 0)
  72. return;
  73. node_deleteMembers(managed);
  74. UA_free(entry);
  75. return;
  76. }
  77. UA_NodeStore * UA_NodeStore_new() {
  78. UA_NodeStore *ns;
  79. if(!(ns = UA_alloc(sizeof(UA_NodeStore))))
  80. return UA_NULL;
  81. /* 32 is the minimum size for the hashtable. */
  82. ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
  83. if(!ns->ht) {
  84. UA_free(ns);
  85. return UA_NULL;
  86. }
  87. return ns;
  88. }
  89. void UA_NodeStore_delete(UA_NodeStore *ns) {
  90. struct cds_lfht *ht = ns->ht;
  91. struct cds_lfht_iter iter;
  92. struct cds_lfht_node *found_htn;
  93. rcu_read_lock();
  94. cds_lfht_first(ht, &iter);
  95. while(iter.node != UA_NULL) {
  96. found_htn = cds_lfht_iter_get_node(&iter);
  97. if(!cds_lfht_del(ht, found_htn)) {
  98. struct nodeEntry *entry = caa_container_of(found_htn, struct nodeEntry, htn);
  99. call_rcu(&entry->rcu_head, markDead);
  100. }
  101. cds_lfht_next(ht, &iter);
  102. }
  103. rcu_read_unlock();
  104. cds_lfht_destroy(ht, UA_NULL);
  105. UA_free(ns);
  106. }
  107. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, const UA_Node **node, UA_Boolean getManaged) {
  108. UA_UInt32 nodesize;
  109. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  110. switch((*node)->nodeClass) {
  111. case UA_NODECLASS_OBJECT:
  112. nodesize = sizeof(UA_ObjectNode);
  113. break;
  114. case UA_NODECLASS_VARIABLE:
  115. nodesize = sizeof(UA_VariableNode);
  116. break;
  117. case UA_NODECLASS_METHOD:
  118. nodesize = sizeof(UA_MethodNode);
  119. break;
  120. case UA_NODECLASS_OBJECTTYPE:
  121. nodesize = sizeof(UA_ObjectTypeNode);
  122. break;
  123. case UA_NODECLASS_VARIABLETYPE:
  124. nodesize = sizeof(UA_VariableTypeNode);
  125. break;
  126. case UA_NODECLASS_REFERENCETYPE:
  127. nodesize = sizeof(UA_ReferenceTypeNode);
  128. break;
  129. case UA_NODECLASS_DATATYPE:
  130. nodesize = sizeof(UA_DataTypeNode);
  131. break;
  132. case UA_NODECLASS_VIEW:
  133. nodesize = sizeof(UA_ViewNode);
  134. break;
  135. default:
  136. return UA_STATUSCODE_BADINTERNALERROR;
  137. }
  138. struct nodeEntry *entry;
  139. if(!(entry = UA_alloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  140. return UA_STATUSCODE_BADOUTOFMEMORY;
  141. memcpy((void*)&entry->node, *node, nodesize);
  142. cds_lfht_node_init(&entry->htn);
  143. entry->refcount = ALIVE_BIT;
  144. if(getManaged) // increase the counter before adding the node
  145. entry->refcount++;
  146. struct cds_lfht_node *result;
  147. if(!UA_NodeId_isNull(&(*node)->nodeId)) {
  148. hash_t h = hash(&(*node)->nodeId);
  149. rcu_read_lock();
  150. result = cds_lfht_add_unique(ns->ht, h, compare, &entry->node.nodeId, &entry->htn);
  151. rcu_read_unlock();
  152. /* If the nodeid exists already */
  153. if(result != &entry->htn) {
  154. UA_free(entry);
  155. return UA_STATUSCODE_BADNODEIDEXISTS;
  156. }
  157. } else {
  158. /* create a unique nodeid */
  159. ((UA_Node *)&entry->node)->nodeId.identifierType = UA_NODEIDTYPE_NUMERIC;
  160. ((UA_Node *)&entry->node)->nodeId.namespaceIndex = 1; // namespace 1 is always in the local nodestore
  161. unsigned long identifier;
  162. long before, after;
  163. rcu_read_lock();
  164. cds_lfht_count_nodes(ns->ht, &before, &identifier, &after); // current amount of nodes stored
  165. rcu_read_unlock();
  166. identifier++;
  167. ((UA_Node *)&entry->node)->nodeId.identifier.numeric = identifier;
  168. while(UA_TRUE) {
  169. hash_t nhash = hash(&entry->node.nodeId);
  170. rcu_read_lock();
  171. result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
  172. rcu_read_unlock();
  173. if(result == &entry->htn)
  174. break;
  175. ((UA_Node *)&entry->node)->nodeId.identifier.numeric += (identifier * 2654435761);
  176. }
  177. }
  178. UA_free((UA_Node *)*node); /* The old node is replaced by a managed node. */
  179. if(getManaged)
  180. *node = &entry->node;
  181. else
  182. *node = UA_NULL;
  183. return UA_STATUSCODE_GOOD;
  184. }
  185. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node *oldNode,
  186. const UA_Node **node, UA_Boolean getManaged) {
  187. UA_UInt32 nodesize;
  188. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  189. switch((*node)->nodeClass) {
  190. case UA_NODECLASS_OBJECT:
  191. nodesize = sizeof(UA_ObjectNode);
  192. break;
  193. case UA_NODECLASS_VARIABLE:
  194. nodesize = sizeof(UA_VariableNode);
  195. break;
  196. case UA_NODECLASS_METHOD:
  197. nodesize = sizeof(UA_MethodNode);
  198. break;
  199. case UA_NODECLASS_OBJECTTYPE:
  200. nodesize = sizeof(UA_ObjectTypeNode);
  201. break;
  202. case UA_NODECLASS_VARIABLETYPE:
  203. nodesize = sizeof(UA_VariableTypeNode);
  204. break;
  205. case UA_NODECLASS_REFERENCETYPE:
  206. nodesize = sizeof(UA_ReferenceTypeNode);
  207. break;
  208. case UA_NODECLASS_DATATYPE:
  209. nodesize = sizeof(UA_DataTypeNode);
  210. break;
  211. case UA_NODECLASS_VIEW:
  212. nodesize = sizeof(UA_ViewNode);
  213. break;
  214. default:
  215. return UA_STATUSCODE_BADINTERNALERROR;
  216. }
  217. struct nodeEntry *entry;
  218. if(!(entry = UA_alloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  219. return UA_STATUSCODE_BADOUTOFMEMORY;
  220. memcpy((void*)&entry->node, *node, nodesize);
  221. cds_lfht_node_init(&entry->htn);
  222. entry->refcount = ALIVE_BIT;
  223. if(getManaged) // increase the counter before adding the node
  224. entry->refcount++;
  225. hash_t h = hash(&(*node)->nodeId);
  226. struct cds_lfht_iter iter;
  227. rcu_read_lock();
  228. cds_lfht_lookup(ns->ht, h, compare, &(*node)->nodeId, &iter);
  229. struct cds_lfht_node *result = cds_lfht_iter_get_node(&iter);
  230. /* No node found that can be replaced */
  231. if(!result) {
  232. rcu_read_unlock();
  233. UA_free(entry);
  234. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  235. }
  236. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  237. /* The node we found is obsolete*/
  238. if(&found_entry->node != oldNode) {
  239. rcu_read_unlock();
  240. UA_free(entry);
  241. return UA_STATUSCODE_BADINTERNALERROR;
  242. }
  243. /* The old node is replaced by a managed node. */
  244. if(cds_lfht_replace(ns->ht, &iter, h, compare, &(*node)->nodeId, &entry->htn) == 0) {
  245. struct nodeEntry *entry = caa_container_of(result, struct nodeEntry, htn);
  246. /* If an entry got replaced, mark it as dead. */
  247. call_rcu(&entry->rcu_head, markDead);
  248. rcu_read_unlock();
  249. UA_free((UA_Node *)*node);
  250. if(getManaged)
  251. *node = &entry->node;
  252. else
  253. *node = UA_NULL;
  254. return UA_STATUSCODE_GOOD;
  255. }
  256. /* Replacing failed. Maybe the node got replaced just before this thread tried to.*/
  257. rcu_read_unlock();
  258. UA_free(entry);
  259. return UA_STATUSCODE_BADINTERNALERROR;
  260. }
  261. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  262. hash_t nhash = hash(nodeid);
  263. struct cds_lfht_iter iter;
  264. rcu_read_lock();
  265. cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
  266. struct cds_lfht_node *found_htn = cds_lfht_iter_get_node(&iter);
  267. /* If this fails, then the node has already been removed. */
  268. if(!found_htn || cds_lfht_del(ns->ht, found_htn) != 0) {
  269. rcu_read_unlock();
  270. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  271. }
  272. struct nodeEntry *entry = caa_container_of(found_htn, struct nodeEntry, htn);
  273. call_rcu(&entry->rcu_head, markDead);
  274. rcu_read_unlock();
  275. return UA_STATUSCODE_GOOD;
  276. }
  277. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  278. hash_t nhash = hash(nodeid);
  279. struct cds_lfht_iter iter;
  280. rcu_read_lock();
  281. cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
  282. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  283. if(!found_entry) {
  284. rcu_read_unlock();
  285. return UA_NULL;
  286. }
  287. /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
  288. uatomic_inc(&found_entry->refcount);
  289. rcu_read_unlock();
  290. return &found_entry->node;
  291. }
  292. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  293. struct cds_lfht *ht = ns->ht;
  294. struct cds_lfht_iter iter;
  295. rcu_read_lock();
  296. cds_lfht_first(ht, &iter);
  297. while(iter.node != UA_NULL) {
  298. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  299. uatomic_inc(&found_entry->refcount);
  300. const UA_Node *node = &found_entry->node;
  301. rcu_read_unlock();
  302. visitor(node);
  303. UA_NodeStore_release((UA_Node *)node);
  304. rcu_read_lock();
  305. cds_lfht_next(ht, &iter);
  306. }
  307. rcu_read_unlock();
  308. }